diff options
356 files changed, 91474 insertions, 259 deletions
diff --git a/Documentation/android.txt b/Documentation/android.txt new file mode 100644 index 000000000000..72a62afdf202 --- /dev/null +++ b/Documentation/android.txt @@ -0,0 +1,121 @@ + ============= + A N D R O I D + ============= + +Copyright (C) 2009 Google, Inc. +Written by Mike Chan <mike@android.com> + +CONTENTS: +--------- + +1. Android + 1.1 Required enabled config options + 1.2 Required disabled config options + 1.3 Recommended enabled config options +2. Contact + + +1. Android +========== + +Android (www.android.com) is an open source operating system for mobile devices. +This document describes configurations needed to run the Android framework on +top of the Linux kernel. + +To see a working defconfig look at msm_defconfig or goldfish_defconfig +which can be found at http://android.git.kernel.org in kernel/common.git +and kernel/msm.git + + +1.1 Required enabled config options +----------------------------------- +After building a standard defconfig, ensure that these options are enabled in +your .config or defconfig if they are not already. Based off the msm_defconfig. +You should keep the rest of the default options enabled in the defconfig +unless you know what you are doing. + +ANDROID_PARANOID_NETWORK +ASHMEM +CONFIG_FB_MODE_HELPERS +CONFIG_FONT_8x16 +CONFIG_FONT_8x8 +CONFIG_YAFFS_SHORT_NAMES_IN_RAM +DAB +EARLYSUSPEND +FB +FB_CFB_COPYAREA +FB_CFB_FILLRECT +FB_CFB_IMAGEBLIT +FB_DEFERRED_IO +FB_TILEBLITTING +HIGH_RES_TIMERS +INOTIFY +INOTIFY_USER +INPUT_EVDEV +INPUT_GPIO +INPUT_MISC +LEDS_CLASS +LEDS_GPIO +LOCK_KERNEL +LkOGGER +LOW_MEMORY_KILLER +MISC_DEVICES +NEW_LEDS +NO_HZ +POWER_SUPPLY +PREEMPT +RAMFS +RTC_CLASS +RTC_LIB +SWITCH +SWITCH_GPIO +TMPFS +UID_STAT +UID16 +USB_FUNCTION +USB_FUNCTION_ADB +USER_WAKELOCK +VIDEO_OUTPUT_CONTROL +WAKELOCK +YAFFS_AUTO_YAFFS2 +YAFFS_FS +YAFFS_YAFFS1 +YAFFS_YAFFS2 + + +1.2 Required disabled config options +------------------------------------ +CONFIG_YAFFS_DISABLE_LAZY_LOAD +DNOTIFY + + +1.3 Recommended enabled config options +------------------------------ +ANDROID_PMEM +ANDROID_RAM_CONSOLE +ANDROID_RAM_CONSOLE_ERROR_CORRECTION +SCHEDSTATS +DEBUG_PREEMPT +DEBUG_MUTEXES +DEBUG_SPINLOCK_SLEEP +DEBUG_INFO +FRAME_POINTER +CPU_FREQ +CPU_FREQ_TABLE +CPU_FREQ_DEFAULT_GOV_ONDEMAND +CPU_FREQ_GOV_ONDEMAND +CRC_CCITT +EMBEDDED +INPUT_TOUCHSCREEN +I2C +I2C_BOARDINFO +LOG_BUF_SHIFT=17 +SERIAL_CORE +SERIAL_CORE_CONSOLE + + +2. Contact +========== +website: http://android.git.kernel.org + +mailing-lists: android-kernel@googlegroups.com diff --git a/Documentation/cgroups/cpuacct.txt b/Documentation/cgroups/cpuacct.txt index 8b930946c52a..84e471b612eb 100644 --- a/Documentation/cgroups/cpuacct.txt +++ b/Documentation/cgroups/cpuacct.txt @@ -40,6 +40,13 @@ system: Time spent by tasks of the cgroup in kernel mode. user and system are in USER_HZ unit. +cpuacct.cpufreq file gives CPU time (in nanoseconds) spent at each CPU +frequency. Platform hooks must be implemented inorder to properly track +time at each CPU frequency. + +cpuacct.power file gives CPU power consumed (in milliWatt seconds). Platform +must provide and implement power callback functions. + cpuacct controller uses percpu_counter interface to collect user and system times. This has two side effects: diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S index 6825c34646d4..051b179c9135 100644 --- a/arch/arm/boot/compressed/head.S +++ b/arch/arm/boot/compressed/head.S @@ -21,7 +21,7 @@ #if defined(CONFIG_DEBUG_ICEDCC) -#ifdef CONFIG_CPU_V6 +#ifdef defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V7) .macro loadsp, rb, tmp .endm .macro writeb, ch, rb @@ -637,6 +637,8 @@ proc_types: @ b __arm6_mmu_cache_off @ b __armv3_mmu_cache_flush +#if !defined(CONFIG_CPU_V7) + /* This collides with some V7 IDs, preventing correct detection */ .word 0x00000000 @ old ARM ID .word 0x0000f000 mov pc, lr @@ -645,6 +647,7 @@ proc_types: THUMB( nop ) mov pc, lr THUMB( nop ) +#endif .word 0x41007000 @ ARM7/710 .word 0xfff8fe00 diff --git a/arch/arm/boot/compressed/misc.c b/arch/arm/boot/compressed/misc.c index e653a6d3c8d9..253ecc88a7bb 100644 --- a/arch/arm/boot/compressed/misc.c +++ b/arch/arm/boot/compressed/misc.c @@ -36,7 +36,7 @@ extern void error(char *x); #ifdef CONFIG_DEBUG_ICEDCC -#ifdef CONFIG_CPU_V6 +#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V7) static void icedcc_putc(int ch) { diff --git a/arch/arm/common/Kconfig b/arch/arm/common/Kconfig index 0a34c8186924..1777af96c309 100644 --- a/arch/arm/common/Kconfig +++ b/arch/arm/common/Kconfig @@ -41,3 +41,46 @@ config SHARP_SCOOP config COMMON_CLKDEV bool select HAVE_CLK + +config FIQ_GLUE + bool + select FIQ + +config FIQ_DEBUGGER + bool "FIQ Mode Serial Debugger" + select FIQ + select FIQ_GLUE + select KERNEL_DEBUGGER_CORE + default n + help + The FIQ serial debugger can accept commands even when the + kernel is unresponsive due to being stuck with interrupts + disabled. Depends on the kernel debugger core in drivers/misc. + + +config FIQ_DEBUGGER_NO_SLEEP + bool "Keep serial debugger active" + depends on FIQ_DEBUGGER + default n + help + Enables the serial debugger at boot. Passing + fiq_debugger.no_sleep on the kernel commandline will + override this config option. + +config FIQ_DEBUGGER_WAKEUP_IRQ_ALWAYS_ON + bool "Don't disable wakeup IRQ when debugger is active" + depends on FIQ_DEBUGGER + default n + help + Don't disable the wakeup irq when enabling the uart clock. This will + cause extra interrupts, but it makes the serial debugger usable with + on some MSM radio builds that ignore the uart clock request in power + collapse. + +config FIQ_DEBUGGER_CONSOLE + bool "Console on FIQ Serial Debugger port" + depends on FIQ_DEBUGGER + default n + help + Enables a console so that printk messages are displayed on + the debugger serial port as the occur. diff --git a/arch/arm/common/Makefile b/arch/arm/common/Makefile index e6e8664a9413..d986c26ee35b 100644 --- a/arch/arm/common/Makefile +++ b/arch/arm/common/Makefile @@ -17,3 +17,5 @@ obj-$(CONFIG_ARCH_IXP2000) += uengine.o obj-$(CONFIG_ARCH_IXP23XX) += uengine.o obj-$(CONFIG_PCI_HOST_ITE8152) += it8152.o obj-$(CONFIG_COMMON_CLKDEV) += clkdev.o +obj-$(CONFIG_FIQ_GLUE) += fiq_glue.o fiq_glue_setup.o +obj-$(CONFIG_FIQ_DEBUGGER) += fiq_debugger.o diff --git a/arch/arm/common/fiq_debugger.c b/arch/arm/common/fiq_debugger.c new file mode 100644 index 000000000000..330053856c76 --- /dev/null +++ b/arch/arm/common/fiq_debugger.c @@ -0,0 +1,898 @@ +/* + * arch/arm/common/fiq_debugger.c + * + * Serial Debugger Interface accessed through an FIQ interrupt. + * + * Copyright (C) 2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <stdarg.h> +#include <linux/module.h> +#include <linux/io.h> +#include <linux/console.h> +#include <linux/interrupt.h> +#include <linux/clk.h> +#include <linux/platform_device.h> +#include <linux/kernel_debugger.h> +#include <linux/kernel_stat.h> +#include <linux/irq.h> +#include <linux/delay.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/timer.h> +#include <linux/tty.h> +#include <linux/tty_flip.h> +#include <linux/wakelock.h> + +#include <asm/fiq_debugger.h> +#include <asm/fiq_glue.h> +#include <asm/stacktrace.h> + +#include <mach/system.h> + +#include <linux/uaccess.h> + +#include "fiq_debugger_ringbuf.h" + +#define DEBUG_MAX 64 +#define MAX_UNHANDLED_FIQ_COUNT 1000000 + +#define THREAD_INFO(sp) ((struct thread_info *) \ + ((unsigned long)(sp) & ~(THREAD_SIZE - 1))) + +struct fiq_debugger_state { + struct fiq_glue_handler handler; + + int fiq; + int signal_irq; + int wakeup_irq; + bool wakeup_irq_no_set_wake; + struct clk *clk; + struct fiq_debugger_pdata *pdata; + struct platform_device *pdev; + + char debug_cmd[DEBUG_MAX]; + int debug_busy; + int debug_abort; + + char debug_buf[DEBUG_MAX]; + int debug_count; + + bool no_sleep; + bool debug_enable; + bool ignore_next_wakeup_irq; + struct timer_list sleep_timer; + bool uart_clk_enabled; + struct wake_lock debugger_wake_lock; + bool console_enable; + int current_cpu; + atomic_t unhandled_fiq_count; + bool in_fiq; + +#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE + struct console console; + struct tty_driver *tty_driver; + struct tty_struct *tty; + int tty_open_count; + struct fiq_debugger_ringbuf *tty_rbuf; +#endif + + unsigned int last_irqs[NR_IRQS]; +}; + +#ifdef CONFIG_FIQ_DEBUGGER_NO_SLEEP +static bool initial_no_sleep = true; +#else +static bool initial_no_sleep; +#endif +static bool initial_debug_enable; +static bool initial_console_enable; + +module_param_named(no_sleep, initial_no_sleep, bool, 0644); +module_param_named(debug_enable, initial_debug_enable, bool, 0644); +module_param_named(console_enable, initial_console_enable, bool, 0644); + +#ifdef CONFIG_FIQ_DEBUGGER_WAKEUP_IRQ_ALWAYS_ON +static inline void enable_wakeup_irq(struct fiq_debugger_state *state) {} +static inline void disable_wakeup_irq(struct fiq_debugger_state *state) {} +#else +static inline void enable_wakeup_irq(struct fiq_debugger_state *state) +{ + if (state->wakeup_irq < 0) + return; + enable_irq(state->wakeup_irq); + if (!state->wakeup_irq_no_set_wake) + enable_irq_wake(state->wakeup_irq); +} +static inline void disable_wakeup_irq(struct fiq_debugger_state *state) +{ + if (state->wakeup_irq < 0) + return; + disable_irq_nosync(state->wakeup_irq); + if (!state->wakeup_irq_no_set_wake) + disable_irq_wake(state->wakeup_irq); +} +#endif + +static void debug_force_irq(struct fiq_debugger_state *state) +{ + unsigned int irq = state->signal_irq; + if (state->pdata->force_irq) + state->pdata->force_irq(state->pdev, irq); + else { + struct irq_chip *chip = get_irq_chip(irq); + if (chip && chip->retrigger) + chip->retrigger(irq); + } +} + +static void debug_uart_flush(struct fiq_debugger_state *state) +{ + if (state->pdata->uart_flush) + state->pdata->uart_flush(state->pdev); +} + +static void debug_puts(struct fiq_debugger_state *state, char *s) +{ + unsigned c; + while ((c = *s++)) { + if (c == '\n') + state->pdata->uart_putc(state->pdev, '\r'); + state->pdata->uart_putc(state->pdev, c); + } +} + +static void debug_prompt(struct fiq_debugger_state *state) +{ + debug_puts(state, "debug> "); +} + +int log_buf_copy(char *dest, int idx, int len); +static void dump_kernel_log(struct fiq_debugger_state *state) +{ + char buf[1024]; + int idx = 0; + int ret; + int saved_oip; + + /* setting oops_in_progress prevents log_buf_copy() + * from trying to take a spinlock which will make it + * very unhappy in some cases... + */ + saved_oip = oops_in_progress; + oops_in_progress = 1; + for (;;) { + ret = log_buf_copy(buf, idx, 1023); + if (ret <= 0) + break; + buf[ret] = 0; + debug_puts(state, buf); + idx += ret; + } + oops_in_progress = saved_oip; +} + +static char *mode_name(unsigned cpsr) +{ + switch (cpsr & MODE_MASK) { + case USR_MODE: return "USR"; + case FIQ_MODE: return "FIQ"; + case IRQ_MODE: return "IRQ"; + case SVC_MODE: return "SVC"; + case ABT_MODE: return "ABT"; + case UND_MODE: return "UND"; + case SYSTEM_MODE: return "SYS"; + default: return "???"; + } +} + +static int debug_printf(void *cookie, const char *fmt, ...) +{ + struct fiq_debugger_state *state = cookie; + char buf[256]; + va_list ap; + + va_start(ap, fmt); + vsnprintf(buf, sizeof(buf), fmt, ap); + va_end(ap); + + debug_puts(state, buf); + return state->debug_abort; +} + +/* Safe outside fiq context */ +static int debug_printf_nfiq(void *cookie, const char *fmt, ...) +{ + struct fiq_debugger_state *state = cookie; + char buf[256]; + va_list ap; + unsigned long irq_flags; + + va_start(ap, fmt); + vsnprintf(buf, 128, fmt, ap); + va_end(ap); + + local_irq_save(irq_flags); + debug_puts(state, buf); + debug_uart_flush(state); + local_irq_restore(irq_flags); + return state->debug_abort; +} + +static void dump_regs(struct fiq_debugger_state *state, unsigned *regs) +{ + debug_printf(state, " r0 %08x r1 %08x r2 %08x r3 %08x\n", + regs[0], regs[1], regs[2], regs[3]); + debug_printf(state, " r4 %08x r5 %08x r6 %08x r7 %08x\n", + regs[4], regs[5], regs[6], regs[7]); + debug_printf(state, " r8 %08x r9 %08x r10 %08x r11 %08x mode %s\n", + regs[8], regs[9], regs[10], regs[11], + mode_name(regs[16])); + if ((regs[16] & MODE_MASK) == USR_MODE) + debug_printf(state, " ip %08x sp %08x lr %08x pc %08x " + "cpsr %08x\n", regs[12], regs[13], regs[14], + regs[15], regs[16]); + else + debug_printf(state, " ip %08x sp %08x lr %08x pc %08x " + "cpsr %08x spsr %08x\n", regs[12], regs[13], + regs[14], regs[15], regs[16], regs[17]); +} + +struct mode_regs { + unsigned long sp_svc; + unsigned long lr_svc; + unsigned long spsr_svc; + + unsigned long sp_abt; + unsigned long lr_abt; + unsigned long spsr_abt; + + unsigned long sp_und; + unsigned long lr_und; + unsigned long spsr_und; + + unsigned long sp_irq; + unsigned long lr_irq; + unsigned long spsr_irq; + + unsigned long r8_fiq; + unsigned long r9_fiq; + unsigned long r10_fiq; + unsigned long r11_fiq; + unsigned long r12_fiq; + unsigned long sp_fiq; + unsigned long lr_fiq; + unsigned long spsr_fiq; +}; + +void __naked get_mode_regs(struct mode_regs *regs) +{ + asm volatile ( + "mrs r1, cpsr\n" + "msr cpsr_c, #0xd3 @(SVC_MODE | PSR_I_BIT | PSR_F_BIT)\n" + "stmia r0!, {r13 - r14}\n" + "mrs r2, spsr\n" + "msr cpsr_c, #0xd7 @(ABT_MODE | PSR_I_BIT | PSR_F_BIT)\n" + "stmia r0!, {r2, r13 - r14}\n" + "mrs r2, spsr\n" + "msr cpsr_c, #0xdb @(UND_MODE | PSR_I_BIT | PSR_F_BIT)\n" + "stmia r0!, {r2, r13 - r14}\n" + "mrs r2, spsr\n" + "msr cpsr_c, #0xd2 @(IRQ_MODE | PSR_I_BIT | PSR_F_BIT)\n" + "stmia r0!, {r2, r13 - r14}\n" + "mrs r2, spsr\n" + "msr cpsr_c, #0xd1 @(FIQ_MODE | PSR_I_BIT | PSR_F_BIT)\n" + "stmia r0!, {r2, r8 - r14}\n" + "mrs r2, spsr\n" + "stmia r0!, {r2}\n" + "msr cpsr_c, r1\n" + "bx lr\n"); +} + + +static void dump_allregs(struct fiq_debugger_state *state, unsigned *regs) +{ + struct mode_regs mode_regs; + dump_regs(state, regs); + get_mode_regs(&mode_regs); + debug_printf(state, " svc: sp %08x lr %08x spsr %08x\n", + mode_regs.sp_svc, mode_regs.lr_svc, mode_regs.spsr_svc); + debug_printf(state, " abt: sp %08x lr %08x spsr %08x\n", + mode_regs.sp_abt, mode_regs.lr_abt, mode_regs.spsr_abt); + debug_printf(state, " und: sp %08x lr %08x spsr %08x\n", + mode_regs.sp_und, mode_regs.lr_und, mode_regs.spsr_und); + debug_printf(state, " irq: sp %08x lr %08x spsr %08x\n", + mode_regs.sp_irq, mode_regs.lr_irq, mode_regs.spsr_irq); + debug_printf(state, " fiq: r8 %08x r9 %08x r10 %08x r11 %08x " + "r12 %08x\n", + mode_regs.r8_fiq, mode_regs.r9_fiq, mode_regs.r10_fiq, + mode_regs.r11_fiq, mode_regs.r12_fiq); + debug_printf(state, " fiq: sp %08x lr %08x spsr %08x\n", + mode_regs.sp_fiq, mode_regs.lr_fiq, mode_regs.spsr_fiq); +} + +static void dump_irqs(struct fiq_debugger_state *state) +{ + int n; + debug_printf(state, "irqnr total since-last status name\n"); + for (n = 0; n < NR_IRQS; n++) { + struct irqaction *act = irq_desc[n].action; + if (!act && !kstat_irqs(n)) + continue; + debug_printf(state, "%5d: %10u %11u %8x %s\n", n, + kstat_irqs(n), + kstat_irqs(n) - state->last_irqs[n], + irq_desc[n].status, + (act && act->name) ? act->name : "???"); + state->last_irqs[n] = kstat_irqs(n); + } +} + +struct stacktrace_state { + struct fiq_debugger_state *state; + unsigned int depth; +}; + +static int report_trace(struct stackframe *frame, void *d) +{ + struct stacktrace_state *sts = d; + + if (sts->depth) { + debug_printf(sts->state, + " pc: %p (%pF), lr %p (%pF), sp %p, fp %p\n", + frame->pc, frame->pc, frame->lr, frame->lr, + frame->sp, frame->fp); + sts->depth--; + return 0; + } + debug_printf(sts->state, " ...\n"); + + return sts->depth == 0; +} + +struct frame_tail { + struct frame_tail *fp; + unsigned long sp; + unsigned long lr; +} __attribute__((packed)); + +static struct frame_tail *user_backtrace(struct fiq_debugger_state *state, + struct frame_tail *tail) +{ + struct frame_tail buftail[2]; + + /* Also check accessibility of one struct frame_tail beyond */ + if (!access_ok(VERIFY_READ, tail, sizeof(buftail))) { + debug_printf(state, " invalid frame pointer %p\n", tail); + return NULL; + } + if (__copy_from_user_inatomic(buftail, tail, sizeof(buftail))) { + debug_printf(state, + " failed to copy frame pointer %p\n", tail); + return NULL; + } + + debug_printf(state, " %p\n", buftail[0].lr); + + /* frame pointers should strictly progress back up the stack + * (towards higher addresses) */ + if (tail >= buftail[0].fp) + return NULL; + + return buftail[0].fp-1; +} + +void dump_stacktrace(struct fiq_debugger_state *state, + struct pt_regs * const regs, unsigned int depth, void *ssp) +{ + struct frame_tail *tail; + struct thread_info *real_thread_info = THREAD_INFO(ssp); + struct stacktrace_state sts; + + sts.depth = depth; + sts.state = state; + *current_thread_info() = *real_thread_info; + + if (!current) + debug_printf(state, "current NULL\n"); + else + debug_printf(state, "pid: %d comm: %s\n", + current->pid, current->comm); + dump_regs(state, (unsigned *)regs); + + if (!user_mode(regs)) { + struct stackframe frame; + frame.fp = regs->ARM_fp; + frame.sp = regs->ARM_sp; + frame.lr = regs->ARM_lr; + frame.pc = regs->ARM_pc; + debug_printf(state, + " pc: %p (%pF), lr %p (%pF), sp %p, fp %p\n", + regs->ARM_pc, regs->ARM_pc, regs->ARM_lr, regs->ARM_lr, + regs->ARM_sp, regs->ARM_fp); + walk_stackframe(&frame, report_trace, &sts); + return; + } + + tail = ((struct frame_tail *) regs->ARM_fp) - 1; + while (depth-- && tail && !((unsigned long) tail & 3)) + tail = user_backtrace(state, tail); +} + +static void debug_exec(struct fiq_debugger_state *state, + const char *cmd, unsigned *regs, void *svc_sp) +{ + if (!strcmp(cmd, "pc")) { + debug_printf(state, " pc %08x cpsr %08x mode %s\n", + regs[15], regs[16], mode_name(regs[16])); + } else if (!strcmp(cmd, "regs")) { + dump_regs(state, regs); + } else if (!strcmp(cmd, "allregs")) { + dump_allregs(state, regs); + } else if (!strcmp(cmd, "bt")) { + dump_stacktrace(state, (struct pt_regs *)regs, 100, svc_sp); + } else if (!strcmp(cmd, "reboot")) { + arch_reset(0, 0); + } else if (!strcmp(cmd, "irqs")) { + dump_irqs(state); + } else if (!strcmp(cmd, "kmsg")) { + dump_kernel_log(state); + } else if (!strcmp(cmd, "version")) { + debug_printf(state, "%s\n", linux_banner); + } else if (!strcmp(cmd, "sleep")) { + state->no_sleep = false; + } else if (!strcmp(cmd, "nosleep")) { + state->no_sleep = true; + } else if (!strcmp(cmd, "console")) { + state->console_enable = true; + debug_printf(state, "console mode\n"); + } else if (!strcmp(cmd, "cpu")) { + debug_printf(state, "cpu %d\n", state->current_cpu); + } else if (!strncmp(cmd, "cpu ", 4)) { + unsigned long cpu = 0; + if (strict_strtoul(cmd + 4, 10, &cpu) == 0) + state->current_cpu = cpu; + else + debug_printf(state, "invalid cpu\n"); + debug_printf(state, "cpu %d\n", state->current_cpu); + } else { + if (state->debug_busy) { + debug_printf(state, + "command processor busy. trying to abort.\n"); + state->debug_abort = -1; + } else { + strcpy(state->debug_cmd, cmd); + state->debug_busy = 1; + } + + debug_force_irq(state); + + return; + } + if (!state->console_enable) + debug_prompt(state); +} + +static void sleep_timer_expired(unsigned long data) +{ + struct fiq_debugger_state *state = (struct fiq_debugger_state *)data; + + if (state->uart_clk_enabled && !state->no_sleep) { + if (state->debug_enable) { + state->debug_enable = false; + debug_printf_nfiq(state, "suspending fiq debugger\n"); + } + state->ignore_next_wakeup_irq = true; + if (state->clk) + clk_disable(state->clk); + state->uart_clk_enabled = false; + enable_wakeup_irq(state); + } + wake_unlock(&state->debugger_wake_lock); +} + +static irqreturn_t wakeup_irq_handler(int irq, void *dev) +{ + struct fiq_debugger_state *state = dev; + + if (!state->no_sleep) + debug_puts(state, "WAKEUP\n"); + if (state->ignore_next_wakeup_irq) + state->ignore_next_wakeup_irq = false; + else if (!state->uart_clk_enabled) { + wake_lock(&state->debugger_wake_lock); + if (state->clk) + clk_enable(state->clk); + state->uart_clk_enabled = true; + disable_wakeup_irq(state); + mod_timer(&state->sleep_timer, jiffies + HZ / 2); + } + return IRQ_HANDLED; +} + +static irqreturn_t debug_irq(int irq, void *dev) +{ + struct fiq_debugger_state *state = dev; + if (state->pdata->force_irq_ack) + state->pdata->force_irq_ack(state->pdev, state->signal_irq); + + if (!state->no_sleep) { + wake_lock(&state->debugger_wake_lock); + mod_timer(&state->sleep_timer, jiffies + HZ * 5); + } +#if defined(CONFIG_FIQ_DEBUGGER_CONSOLE) + if (state->tty) { + int i; + int count = fiq_debugger_ringbuf_level(state->tty_rbuf); + for (i = 0; i < count; i++) { + int c = fiq_debugger_ringbuf_peek(state->tty_rbuf, i); + tty_insert_flip_char(state->tty, c, TTY_NORMAL); + if (!fiq_debugger_ringbuf_consume(state->tty_rbuf, 1)) + pr_warn("fiq tty failed to consume byte\n"); + } + tty_flip_buffer_push(state->tty); + } +#endif + if (state->debug_busy) { + struct kdbg_ctxt ctxt; + + ctxt.printf = debug_printf_nfiq; + ctxt.cookie = state; + kernel_debugger(&ctxt, state->debug_cmd); + debug_prompt(state); + + state->debug_busy = 0; + } + return IRQ_HANDLED; +} + +static int debug_getc(struct fiq_debugger_state *state) +{ + return state->pdata->uart_getc(state->pdev); +} + +static void debug_fiq(struct fiq_glue_handler *h, void *regs, void *svc_sp) +{ + struct fiq_debugger_state *state = + container_of(h, struct fiq_debugger_state, handler); + int c; + static int last_c; + int count = 0; + unsigned int this_cpu = THREAD_INFO(svc_sp)->cpu; + + if (this_cpu != state->current_cpu) { + if (state->in_fiq) + return; + + if (atomic_inc_return(&state->unhandled_fiq_count) != + MAX_UNHANDLED_FIQ_COUNT) + return; + + debug_printf(state, "fiq_debugger: cpu %d not responding, " + "reverting to cpu %d\n", state->current_cpu, + this_cpu); + + atomic_set(&state->unhandled_fiq_count, 0); + state->current_cpu = this_cpu; + return; + } + + state->in_fiq = true; + + while ((c = debug_getc(state)) != FIQ_DEBUGGER_NO_CHAR) { + count++; + if (!state->debug_enable) { + if ((c == 13) || (c == 10)) { + state->debug_enable = true; + state->debug_count = 0; + debug_prompt(state); + } + } else if (c == FIQ_DEBUGGER_BREAK) { + state->console_enable = false; + debug_puts(state, "fiq debugger mode\n"); + state->debug_count = 0; + debug_prompt(state); +#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE + } else if (state->console_enable && state->tty_rbuf) { + fiq_debugger_ringbuf_push(state->tty_rbuf, c); + debug_force_irq(state); +#endif + } else if ((c >= ' ') && (c < 127)) { + if (state->debug_count < (DEBUG_MAX - 1)) { + state->debug_buf[state->debug_count++] = c; + state->pdata->uart_putc(state->pdev, c); + } + } else if ((c == 8) || (c == 127)) { + if (state->debug_count > 0) { + state->debug_count--; + state->pdata->uart_putc(state->pdev, 8); + state->pdata->uart_putc(state->pdev, ' '); + state->pdata->uart_putc(state->pdev, 8); + } + } else if ((c == 13) || (c == 10)) { + if (c == '\r' || (c == '\n' && last_c != '\r')) { + state->pdata->uart_putc(state->pdev, '\r'); + state->pdata->uart_putc(state->pdev, '\n'); + } + if (state->debug_count) { + state->debug_buf[state->debug_count] = 0; + state->debug_count = 0; + debug_exec(state, state->debug_buf, + regs, svc_sp); + } else { + debug_prompt(state); + } + } + last_c = c; + } + debug_uart_flush(state); + if (state->pdata->fiq_ack) + state->pdata->fiq_ack(state->pdev, state->fiq); + + /* poke sleep timer if necessary */ + if (state->debug_enable && !state->no_sleep) + debug_force_irq(state); + + atomic_set(&state->unhandled_fiq_count, 0); + state->in_fiq = false; +} + +static void debug_resume(struct fiq_glue_handler *h) +{ + struct fiq_debugger_state *state = + container_of(h, struct fiq_debugger_state, handler); + if (state->pdata->uart_resume) + state->pdata->uart_resume(state->pdev); +} + +#if defined(CONFIG_FIQ_DEBUGGER_CONSOLE) +struct tty_driver *debug_console_device(struct console *co, int *index) +{ + struct fiq_debugger_state *state; + state = container_of(co, struct fiq_debugger_state, console); + *index = 0; + return state->tty_driver; +} + +static void debug_console_write(struct console *co, + const char *s, unsigned int count) +{ + struct fiq_debugger_state *state; + + state = container_of(co, struct fiq_debugger_state, console); + + if (!state->console_enable) + return; + + while (count--) { + if (*s == '\n') + state->pdata->uart_putc(state->pdev, '\r'); + state->pdata->uart_putc(state->pdev, *s++); + } + debug_uart_flush(state); +} + +static struct console fiq_debugger_console = { + .name = "ttyFIQ", + .device = debug_console_device, + .write = debug_console_write, + .flags = CON_PRINTBUFFER | CON_ANYTIME | CON_ENABLED, +}; + +int fiq_tty_open(struct tty_struct *tty, struct file *filp) +{ + struct fiq_debugger_state *state = tty->driver->driver_state; + if (state->tty_open_count++) + return 0; + + tty->driver_data = state; + state->tty = tty; + return 0; +} + +void fiq_tty_close(struct tty_struct *tty, struct file *filp) +{ + struct fiq_debugger_state *state = tty->driver_data; + if (--state->tty_open_count) + return; + state->tty = NULL; +} + +int fiq_tty_write(struct tty_struct *tty, const unsigned char *buf, int count) +{ + int i; + struct fiq_debugger_state *state = tty->driver_data; + + if (!state->console_enable) + return count; + + if (state->clk) + clk_enable(state->clk); + for (i = 0; i < count; i++) + state->pdata->uart_putc(state->pdev, *buf++); + if (state->clk) + clk_disable(state->clk); + + return count; +} + +int fiq_tty_write_room(struct tty_struct *tty) +{ + return 1024; +} + +static const struct tty_operations fiq_tty_driver_ops = { + .write = fiq_tty_write, + .write_room = fiq_tty_write_room, + .open = fiq_tty_open, + .close = fiq_tty_close, +}; + +static int fiq_debugger_tty_init(struct fiq_debugger_state *state) +{ + int ret = -EINVAL; + + state->tty_driver = alloc_tty_driver(1); + if (!state->tty_driver) { + pr_err("Failed to allocate fiq debugger tty\n"); + return -ENOMEM; + } + + state->tty_driver->owner = THIS_MODULE; + state->tty_driver->driver_name = "fiq-debugger"; + state->tty_driver->name = "ttyFIQ"; + state->tty_driver->type = TTY_DRIVER_TYPE_SERIAL; + state->tty_driver->subtype = SERIAL_TYPE_NORMAL; + state->tty_driver->init_termios = tty_std_termios; + state->tty_driver->init_termios.c_cflag = + B115200 | CS8 | CREAD | HUPCL | CLOCAL; + state->tty_driver->init_termios.c_ispeed = + state->tty_driver->init_termios.c_ospeed = 115200; + state->tty_driver->flags = TTY_DRIVER_REAL_RAW; + tty_set_operations(state->tty_driver, &fiq_tty_driver_ops); + state->tty_driver->driver_state = state; + + ret = tty_register_driver(state->tty_driver); + if (ret) { + pr_err("Failed to register fiq tty: %d\n", ret); + goto err; + } + + state->tty_rbuf = fiq_debugger_ringbuf_alloc(1024); + if (!state->tty_rbuf) { + pr_err("Failed to allocate fiq debugger ringbuf\n"); + ret = -ENOMEM; + goto err; + } + + pr_info("Registered FIQ tty driver %p\n", state->tty_driver); + return 0; + +err: + fiq_debugger_ringbuf_free(state->tty_rbuf); + state->tty_rbuf = NULL; + put_tty_driver(state->tty_driver); + return ret; +} +#endif + +static int fiq_debugger_probe(struct platform_device *pdev) +{ + int ret; + struct fiq_debugger_pdata *pdata = dev_get_platdata(&pdev->dev); + struct fiq_debugger_state *state; + + if (!pdata->uart_getc || !pdata->uart_putc || !pdata->fiq_enable) + return -EINVAL; + + state = kzalloc(sizeof(*state), GFP_KERNEL); + state->handler.fiq = debug_fiq; + state->handler.resume = debug_resume; + setup_timer(&state->sleep_timer, sleep_timer_expired, + (unsigned long)state); + state->pdata = pdata; + state->pdev = pdev; + state->no_sleep = initial_no_sleep; + state->debug_enable = initial_debug_enable; + state->console_enable = initial_console_enable; + + state->fiq = platform_get_irq_byname(pdev, "fiq"); + state->signal_irq = platform_get_irq_byname(pdev, "signal"); + state->wakeup_irq = platform_get_irq_byname(pdev, "wakeup"); + + if (state->wakeup_irq < 0) + state->no_sleep = true; + state->ignore_next_wakeup_irq = !state->no_sleep; + + wake_lock_init(&state->debugger_wake_lock, + WAKE_LOCK_SUSPEND, "serial-debug"); + + state->clk = clk_get(&pdev->dev, NULL); + if (IS_ERR(state->clk)) + state->clk = NULL; + + if (state->clk) + clk_enable(state->clk); + + if (pdata->uart_init) { + ret = pdata->uart_init(pdev); + if (ret) + goto err_uart_init; + } + + debug_printf_nfiq(state, "<hit enter %sto activate fiq debugger>\n", + state->no_sleep ? "" : "twice "); + + ret = fiq_glue_register_handler(&state->handler); + if (ret) { + pr_err("serial_debugger: could not install fiq handler\n"); + goto err_register_fiq; + } + + pdata->fiq_enable(pdev, state->fiq, 1); + + if (state->clk) + clk_disable(state->clk); + + ret = request_irq(state->signal_irq, debug_irq, + IRQF_TRIGGER_RISING, "debug", state); + if (ret) + pr_err("serial_debugger: could not install signal_irq"); + + if (state->wakeup_irq >= 0) { + ret = request_irq(state->wakeup_irq, wakeup_irq_handler, + IRQF_TRIGGER_FALLING | IRQF_DISABLED, + "debug-wakeup", state); + if (ret) { + pr_err("serial_debugger: " + "could not install wakeup irq\n"); + state->wakeup_irq = -1; + } else { + ret = enable_irq_wake(state->wakeup_irq); + if (ret) { + pr_err("serial_debugger: " + "could not enable wakeup\n"); + state->wakeup_irq_no_set_wake = true; + } + } + } + if (state->no_sleep) + wakeup_irq_handler(state->wakeup_irq, state); + +#if defined(CONFIG_FIQ_DEBUGGER_CONSOLE) + state->console = fiq_debugger_console; + register_console(&state->console); + fiq_debugger_tty_init(state); +#endif + return 0; + +err_register_fiq: + if (pdata->uart_free) + pdata->uart_free(pdev); +err_uart_init: + kfree(state); + if (state->clk) + clk_put(state->clk); + return ret; +} + +static struct platform_driver fiq_debugger_driver = { + .probe = fiq_debugger_probe, + .driver.name = "fiq_debugger", +}; + +static int __init fiq_debugger_init(void) +{ + return platform_driver_register(&fiq_debugger_driver); +} + +postcore_initcall(fiq_debugger_init); diff --git a/arch/arm/common/fiq_debugger_ringbuf.h b/arch/arm/common/fiq_debugger_ringbuf.h new file mode 100644 index 000000000000..2649b5581088 --- /dev/null +++ b/arch/arm/common/fiq_debugger_ringbuf.h @@ -0,0 +1,94 @@ +/* + * arch/arm/common/fiq_debugger_ringbuf.c + * + * simple lockless ringbuffer + * + * Copyright (C) 2010 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/kernel.h> +#include <linux/slab.h> + +struct fiq_debugger_ringbuf { + int len; + int head; + int tail; + u8 buf[]; +}; + + +static inline struct fiq_debugger_ringbuf *fiq_debugger_ringbuf_alloc(int len) +{ + struct fiq_debugger_ringbuf *rbuf; + + rbuf = kzalloc(sizeof(*rbuf) + len, GFP_KERNEL); + if (rbuf == NULL) + return NULL; + + rbuf->len = len; + rbuf->head = 0; + rbuf->tail = 0; + smp_mb(); + + return rbuf; +} + +static inline void fiq_debugger_ringbuf_free(struct fiq_debugger_ringbuf *rbuf) +{ + kfree(rbuf); +} + +static inline int fiq_debugger_ringbuf_level(struct fiq_debugger_ringbuf *rbuf) +{ + int level = rbuf->head - rbuf->tail; + + if (level < 0) + level = rbuf->len + level; + + return level; +} + +static inline int fiq_debugger_ringbuf_room(struct fiq_debugger_ringbuf *rbuf) +{ + return rbuf->len - fiq_debugger_ringbuf_level(rbuf) - 1; +} + +static inline u8 +fiq_debugger_ringbuf_peek(struct fiq_debugger_ringbuf *rbuf, int i) +{ + return rbuf->buf[(rbuf->tail + i) % rbuf->len]; +} + +static inline int +fiq_debugger_ringbuf_consume(struct fiq_debugger_ringbuf *rbuf, int count) +{ + count = min(count, fiq_debugger_ringbuf_level(rbuf)); + + rbuf->tail = (rbuf->tail + count) % rbuf->len; + smp_mb(); + + return count; +} + +static inline int +fiq_debugger_ringbuf_push(struct fiq_debugger_ringbuf *rbuf, u8 datum) +{ + if (fiq_debugger_ringbuf_room(rbuf) == 0) + return 0; + + rbuf->buf[rbuf->head] = datum; + smp_mb(); + rbuf->head = (rbuf->head + 1) % rbuf->len; + smp_mb(); + + return 1; +} diff --git a/arch/arm/common/fiq_glue.S b/arch/arm/common/fiq_glue.S new file mode 100644 index 000000000000..9e3455a09f8f --- /dev/null +++ b/arch/arm/common/fiq_glue.S @@ -0,0 +1,111 @@ +/* + * Copyright (C) 2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/linkage.h> +#include <asm/assembler.h> + + .text + + .global fiq_glue_end + + /* fiq stack: r0-r15,cpsr,spsr of interrupted mode */ + +ENTRY(fiq_glue) + /* store pc, cpsr from previous mode */ + mrs r12, spsr + sub r11, lr, #4 + subs r10, #1 + bne nested_fiq + + stmfd sp!, {r11-r12, lr} + + /* store r8-r14 from previous mode */ + sub sp, sp, #(7 * 4) + stmia sp, {r8-r14}^ + nop + + /* store r0-r7 from previous mode */ + stmfd sp!, {r0-r7} + + /* setup func(data,regs) arguments */ + mov r0, r9 + mov r1, sp + mov r3, r8 + + mov r7, sp + + /* Get sp and lr from non-user modes */ + and r4, r12, #MODE_MASK + cmp r4, #USR_MODE + beq fiq_from_usr_mode + + mov r7, sp + orr r4, r4, #(PSR_I_BIT | PSR_F_BIT) + msr cpsr_c, r4 + str sp, [r7, #(4 * 13)] + str lr, [r7, #(4 * 14)] + mrs r5, spsr + str r5, [r7, #(4 * 17)] + + cmp r4, #(SVC_MODE | PSR_I_BIT | PSR_F_BIT) + /* use fiq stack if we reenter this mode */ + subne sp, r7, #(4 * 3) + +fiq_from_usr_mode: + msr cpsr_c, #(SVC_MODE | PSR_I_BIT | PSR_F_BIT) + mov r2, sp + sub sp, r7, #12 + stmfd sp!, {r2, ip, lr} + /* call func(data,regs) */ + blx r3 + ldmfd sp, {r2, ip, lr} + mov sp, r2 + + /* restore/discard saved state */ + cmp r4, #USR_MODE + beq fiq_from_usr_mode_exit + + msr cpsr_c, r4 + ldr sp, [r7, #(4 * 13)] + ldr lr, [r7, #(4 * 14)] + msr spsr_cxsf, r5 + +fiq_from_usr_mode_exit: + msr cpsr_c, #(FIQ_MODE | PSR_I_BIT | PSR_F_BIT) + + ldmfd sp!, {r0-r7} + add sp, sp, #(7 * 4) + ldmfd sp!, {r11-r12, lr} +exit_fiq: + msr spsr_cxsf, r12 + add r10, #1 + movs pc, r11 + +nested_fiq: + orr r12, r12, #(PSR_F_BIT) + b exit_fiq + +fiq_glue_end: + +ENTRY(fiq_glue_setup) /* func, data, sp */ + mrs r3, cpsr + msr cpsr_c, #(FIQ_MODE | PSR_I_BIT | PSR_F_BIT) + movs r8, r0 + mov r9, r1 + mov sp, r2 + moveq r10, #0 + movne r10, #1 + msr cpsr_c, r3 + bx lr + diff --git a/arch/arm/common/fiq_glue_setup.c b/arch/arm/common/fiq_glue_setup.c new file mode 100644 index 000000000000..4044c7db95c8 --- /dev/null +++ b/arch/arm/common/fiq_glue_setup.c @@ -0,0 +1,100 @@ +/* + * Copyright (C) 2010 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/kernel.h> +#include <linux/percpu.h> +#include <linux/slab.h> +#include <asm/fiq.h> +#include <asm/fiq_glue.h> + +extern unsigned char fiq_glue, fiq_glue_end; +extern void fiq_glue_setup(void *func, void *data, void *sp); + +static struct fiq_handler fiq_debbuger_fiq_handler = { + .name = "fiq_glue", +}; +DEFINE_PER_CPU(void *, fiq_stack); +static struct fiq_glue_handler *current_handler; +static DEFINE_MUTEX(fiq_glue_lock); + +static void fiq_glue_setup_helper(void *info) +{ + struct fiq_glue_handler *handler = info; + fiq_glue_setup(handler->fiq, handler, + __get_cpu_var(fiq_stack) + THREAD_START_SP); +} + +int fiq_glue_register_handler(struct fiq_glue_handler *handler) +{ + int ret; + int cpu; + + if (!handler || !handler->fiq) + return -EINVAL; + + mutex_lock(&fiq_glue_lock); + if (fiq_stack) { + ret = -EBUSY; + goto err_busy; + } + + for_each_possible_cpu(cpu) { + void *stack; + stack = (void *)__get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER); + if (WARN_ON(!stack)) { + ret = -ENOMEM; + goto err_alloc_fiq_stack; + } + per_cpu(fiq_stack, cpu) = stack; + } + + ret = claim_fiq(&fiq_debbuger_fiq_handler); + if (WARN_ON(ret)) + goto err_claim_fiq; + + current_handler = handler; + on_each_cpu(fiq_glue_setup_helper, handler, true); + set_fiq_handler(&fiq_glue, &fiq_glue_end - &fiq_glue); + + mutex_unlock(&fiq_glue_lock); + return 0; + +err_claim_fiq: +err_alloc_fiq_stack: + for_each_possible_cpu(cpu) { + __free_pages(per_cpu(fiq_stack, cpu), THREAD_SIZE_ORDER); + per_cpu(fiq_stack, cpu) = NULL; + } +err_busy: + mutex_unlock(&fiq_glue_lock); + return ret; +} + +/** + * fiq_glue_resume - Restore fiqs after suspend or low power idle states + * + * This must be called before calling local_fiq_enable after returning from a + * power state where the fiq mode registers were lost. If a driver provided + * a resume hook when it registered the handler it will be called. + */ + +void fiq_glue_resume(void) +{ + if (!current_handler) + return; + fiq_glue_setup(current_handler->fiq, current_handler, + __get_cpu_var(fiq_stack) + THREAD_START_SP); + if (current_handler->resume) + current_handler->resume(current_handler); +} + diff --git a/arch/arm/configs/tegra_android_defconfig b/arch/arm/configs/tegra_android_defconfig new file mode 100644 index 000000000000..55367029c677 --- /dev/null +++ b/arch/arm/configs/tegra_android_defconfig @@ -0,0 +1,307 @@ +CONFIG_EXPERIMENTAL=y +CONFIG_CROSS_COMPILE="arm-eabi-" +# CONFIG_SWAP is not set +CONFIG_RCU_FAST_NO_HZ=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_CGROUPS=y +CONFIG_CGROUP_DEBUG=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_RESOURCE_COUNTERS=y +CONFIG_CGROUP_SCHED=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_PANIC_TIMEOUT=10 +CONFIG_EMBEDDED=y +# CONFIG_SYSCTL_SYSCALL is not set +# CONFIG_ELF_CORE is not set +CONFIG_ASHMEM=y +CONFIG_SLAB=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +# CONFIG_BLK_DEV_BSG is not set +# CONFIG_IOSCHED_DEADLINE is not set +# CONFIG_IOSCHED_CFQ is not set +CONFIG_ARCH_TEGRA=y +CONFIG_MACH_HARMONY=y +CONFIG_MACH_VENTANA=y +CONFIG_TEGRA_DEBUG_UARTD=y +CONFIG_TEGRA_PWM=y +CONFIG_TEGRA_NVRM=y +CONFIG_TEGRA_NVOS=y +CONFIG_FIQ_DEBUGGER=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_SMP=y +CONFIG_NR_CPUS=2 +CONFIG_PREEMPT=y +CONFIG_AEABI=y +# CONFIG_OABI_COMPAT is not set +CONFIG_HIGHMEM=y +CONFIG_ZBOOT_ROM_TEXT=0x0 +CONFIG_ZBOOT_ROM_BSS=0x0 +CONFIG_CMDLINE="mem=448M@0M console=ttyS0,115200n8 earlyprintk init=/bin/ash" +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_IDLE=y +CONFIG_VFP=y +CONFIG_PM=y +CONFIG_WAKELOCK=y +CONFIG_PM_RUNTIME=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_NET_KEY=y +CONFIG_INET=y +CONFIG_INET_ESP=y +# CONFIG_INET_XFRM_MODE_TUNNEL is not set +# CONFIG_INET_XFRM_MODE_BEET is not set +# CONFIG_INET_LRO is not set +# CONFIG_INET_DIAG is not set +CONFIG_IPV6=y +CONFIG_IPV6_PRIVACY=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=y +CONFIG_INET6_ESP=y +CONFIG_INET6_IPCOMP=y +CONFIG_IPV6_MIP6=y +CONFIG_IPV6_TUNNEL=y +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_NETFILTER=y +CONFIG_NETFILTER_NETLINK_QUEUE=y +CONFIG_NETFILTER_NETLINK_LOG=y +CONFIG_NF_CONNTRACK=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=y +CONFIG_NF_CONNTRACK_FTP=y +CONFIG_NF_CONNTRACK_H323=y +CONFIG_NF_CONNTRACK_IRC=y +CONFIG_NF_CONNTRACK_NETBIOS_NS=y +CONFIG_NF_CONNTRACK_PPTP=y +CONFIG_NF_CONNTRACK_SANE=y +CONFIG_NF_CONNTRACK_SIP=y +CONFIG_NF_CONNTRACK_TFTP=y +CONFIG_NF_CT_NETLINK=y +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y +CONFIG_NETFILTER_XT_TARGET_CONNMARK=y +CONFIG_NETFILTER_XT_TARGET_MARK=y +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y +CONFIG_NETFILTER_XT_MATCH_COMMENT=y +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=y +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y +CONFIG_NETFILTER_XT_MATCH_CONNMARK=y +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y +CONFIG_NETFILTER_XT_MATCH_HELPER=y +CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +CONFIG_NETFILTER_XT_MATCH_LENGTH=y +CONFIG_NETFILTER_XT_MATCH_LIMIT=y +CONFIG_NETFILTER_XT_MATCH_MAC=y +CONFIG_NETFILTER_XT_MATCH_MARK=y +CONFIG_NETFILTER_XT_MATCH_OWNER=y +CONFIG_NETFILTER_XT_MATCH_POLICY=y +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y +CONFIG_NETFILTER_XT_MATCH_QUOTA=y +CONFIG_NETFILTER_XT_MATCH_STATE=y +CONFIG_NETFILTER_XT_MATCH_STATISTIC=y +CONFIG_NETFILTER_XT_MATCH_STRING=y +CONFIG_NETFILTER_XT_MATCH_TIME=y +CONFIG_NETFILTER_XT_MATCH_U32=y +CONFIG_NF_CONNTRACK_IPV4=y +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MATCH_ADDRTYPE=y +CONFIG_IP_NF_MATCH_AH=y +CONFIG_IP_NF_MATCH_ECN=y +CONFIG_IP_NF_MATCH_TTL=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +CONFIG_IP_NF_TARGET_LOG=y +CONFIG_NF_NAT=y +CONFIG_IP_NF_TARGET_MASQUERADE=y +CONFIG_IP_NF_TARGET_NETMAP=y +CONFIG_IP_NF_TARGET_REDIRECT=y +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_HTB=y +CONFIG_NET_SCH_INGRESS=y +CONFIG_NET_CLS_U32=y +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_U32=y +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=y +CONFIG_NET_ACT_GACT=y +CONFIG_NET_ACT_MIRRED=y +# CONFIG_RPS is not set +CONFIG_BT=y +CONFIG_BT_L2CAP=y +CONFIG_BT_SCO=y +CONFIG_BT_RFCOMM=y +CONFIG_BT_RFCOMM_TTY=y +CONFIG_BT_BNEP=y +CONFIG_BT_HIDP=y +CONFIG_BT_HCIUART=y +CONFIG_BT_HCIUART_H4=y +CONFIG_BT_HCIUART_LL=y +CONFIG_RFKILL=y +# CONFIG_FIRMWARE_IN_KERNEL is not set +CONFIG_MTD=y +CONFIG_MTD_PARTITIONS=y +CONFIG_MTD_CMDLINE_PARTS=y +CONFIG_MTD_CHAR=y +CONFIG_MTD_BLOCK=y +CONFIG_MTD_NAND_TEGRA=y +CONFIG_MTD_NAND=y +CONFIG_BLK_DEV_LOOP=y +# CONFIG_ANDROID_PMEM is not set +CONFIG_SENSORS_AK8975=y +CONFIG_SCSI=y +CONFIG_BLK_DEV_SD=y +CONFIG_BLK_DEV_SR=y +CONFIG_BLK_DEV_SR_VENDOR=y +CONFIG_CHR_DEV_SG=y +CONFIG_SCSI_MULTI_LUN=y +CONFIG_MD=y +CONFIG_BLK_DEV_DM=y +CONFIG_DM_CRYPT=y +CONFIG_DM_UEVENT=y +CONFIG_NETDEVICES=y +CONFIG_DUMMY=y +CONFIG_NET_ETHERNET=y +CONFIG_SMC91X=y +# CONFIG_NETDEV_1000 is not set +# CONFIG_NETDEV_10000 is not set +CONFIG_PPP=y +CONFIG_PPP_ASYNC=y +CONFIG_PPP_DEFLATE=y +CONFIG_PPP_BSDCOMP=y +CONFIG_PPP_MPPE=y +CONFIG_PPPOLAC=y +CONFIG_PPPOPNS=y +# CONFIG_INPUT_MOUSEDEV is not set +CONFIG_INPUT_EVDEV=y +CONFIG_INPUT_KEYRESET=y +# CONFIG_KEYBOARD_ATKBD is not set +CONFIG_KEYBOARD_GPIO=y +# CONFIG_INPUT_MOUSE is not set +CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_TOUCHSCREEN_PANJIT_I2C=y +CONFIG_INPUT_MISC=y +CONFIG_INPUT_UINPUT=y +CONFIG_INPUT_GPIO=y +# CONFIG_SERIO is not set +# CONFIG_VT is not set +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_TEGRA=y +# CONFIG_LEGACY_PTYS is not set +# CONFIG_HW_RANDOM is not set +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_TEGRA=y +CONFIG_SPI=y +CONFIG_SPI_TEGRA=y +CONFIG_GPIO_SYSFS=y +CONFIG_POWER_SUPPLY=y +CONFIG_PDA_POWER=y +CONFIG_WATCHDOG=y +CONFIG_TEGRA_WATCHDOG=y +CONFIG_REGULATOR=y +CONFIG_REGULATOR_TPS6586X=y +CONFIG_MEDIA_SUPPORT=y +CONFIG_VIDEO_DEV=y +# CONFIG_VIDEO_ALLOW_V4L1 is not set +# CONFIG_RC_MAP is not set +# CONFIG_IR_NEC_DECODER is not set +# CONFIG_IR_RC5_DECODER is not set +# CONFIG_IR_RC6_DECODER is not set +# CONFIG_IR_JVC_DECODER is not set +# CONFIG_IR_SONY_DECODER is not set +# CONFIG_MEDIA_TUNER_CUSTOMISE is not set +CONFIG_VIDEO_HELPER_CHIPS_AUTO=y +CONFIG_USB_VIDEO_CLASS=y +# CONFIG_USB_GSPCA is not set +CONFIG_VIDEO_OUTPUT_CONTROL=y +CONFIG_FB=y +CONFIG_TEGRA_GRHOST=y +CONFIG_TEGRA_DC=y +CONFIG_BACKLIGHT_LCD_SUPPORT=y +CONFIG_LCD_CLASS_DEVICE=y +CONFIG_BACKLIGHT_CLASS_DEVICE=y +# CONFIG_BACKLIGHT_GENERIC is not set +CONFIG_BACKLIGHT_PWM=y +CONFIG_USB=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_DEVICEFS=y +CONFIG_USB_SUSPEND=y +CONFIG_USB_EHCI_HCD=y +# CONFIG_USB_EHCI_TT_NEWSCHED is not set +CONFIG_USB_TEGRA_HCD=y +CONFIG_USB_STORAGE=y +CONFIG_USB_LIBUSUAL=y +CONFIG_USB_SERIAL=y +CONFIG_USB_GADGET=y +CONFIG_USB_GADGET_VBUS_DRAW=500 +CONFIG_USB_ANDROID=y +CONFIG_USB_ANDROID_ADB=y +CONFIG_USB_ANDROID_MTP=y +CONFIG_USB_TEGRA_OTG=y +CONFIG_MMC=y +CONFIG_MMC_UNSAFE_RESUME=y +CONFIG_MMC_EMBEDDED_SDIO=y +CONFIG_MMC_PARANOID_SD_INIT=y +# CONFIG_MMC_BLOCK_BOUNCE is not set +CONFIG_MMC_BLOCK_DEFERRED_RESUME=y +CONFIG_MMC_SDHCI=y +CONFIG_MMC_SDHCI_TEGRA=y +CONFIG_SWITCH=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_TPS6586X=y +CONFIG_STAGING=y +# CONFIG_STAGING_EXCLUDE_BUILD is not set +CONFIG_ANDROID=y +CONFIG_ANDROID_BINDER_IPC=y +CONFIG_ANDROID_LOGGER=y +CONFIG_ANDROID_RAM_CONSOLE=y +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION=y +CONFIG_ANDROID_LOW_MEMORY_KILLER=y +CONFIG_EXT2_FS=y +CONFIG_EXT2_FS_XATTR=y +CONFIG_EXT2_FS_POSIX_ACL=y +CONFIG_EXT2_FS_SECURITY=y +CONFIG_EXT3_FS=y +# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set +CONFIG_EXT3_FS_POSIX_ACL=y +CONFIG_EXT3_FS_SECURITY=y +CONFIG_EXT4_FS=y +# CONFIG_DNOTIFY is not set +CONFIG_FUSE_FS=y +CONFIG_VFAT_FS=y +CONFIG_TMPFS=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_EFI_PARTITION=y +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_ISO8859_1=y +CONFIG_PRINTK_TIME=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_FS=y +CONFIG_DEBUG_KERNEL=y +CONFIG_DETECT_HUNG_TASK=y +CONFIG_SCHEDSTATS=y +CONFIG_TIMER_STATS=y +# CONFIG_DEBUG_PREEMPT is not set +# CONFIG_DEBUG_BUGVERBOSE is not set +CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_VM=y +# CONFIG_RCU_CPU_STALL_DETECTOR is not set +CONFIG_CRYPTO_AES=y +CONFIG_CRYPTO_TWOFISH=y +# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 4656a24058d2..5078dc6c50d8 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -336,7 +336,7 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr * Harvard caches are synchronised for the user space address range. * This is used for the ARM private sys_cacheflush system call. */ -#define flush_cache_user_range(vma,start,end) \ +#define flush_cache_user_range(start,end) \ __cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end)) /* diff --git a/arch/arm/include/asm/fiq_debugger.h b/arch/arm/include/asm/fiq_debugger.h new file mode 100644 index 000000000000..e711b57b22f1 --- /dev/null +++ b/arch/arm/include/asm/fiq_debugger.h @@ -0,0 +1,46 @@ +/* + * arch/arm/include/asm/fiq_debugger.h + * + * Copyright (C) 2010 Google, Inc. + * Author: Colin Cross <ccross@android.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _ARCH_ARM_MACH_TEGRA_FIQ_DEBUGGER_H_ +#define _ARCH_ARM_MACH_TEGRA_FIQ_DEBUGGER_H_ + +#include <linux/serial_core.h> + +#define FIQ_DEBUGGER_NO_CHAR NO_POLL_CHAR +#define FIQ_DEBUGGER_BREAK 0x00ff0100 + +#define FIQ_DEBUGGER_FIQ_IRQ_NAME "fiq" +#define FIQ_DEBUGGER_SIGNAL_IRQ_NAME "signal" +#define FIQ_DEBUGGER_WAKEUP_IRQ_NAME "wakeup" + +struct fiq_debugger_pdata { + int (*uart_init)(struct platform_device *pdev); + void (*uart_free)(struct platform_device *pdev); + int (*uart_resume)(struct platform_device *pdev); + int (*uart_getc)(struct platform_device *pdev); + void (*uart_putc)(struct platform_device *pdev, unsigned int c); + void (*uart_flush)(struct platform_device *pdev); + + void (*fiq_enable)(struct platform_device *pdev, unsigned int fiq, + bool enable); + void (*fiq_ack)(struct platform_device *pdev, unsigned int fiq); + + void (*force_irq)(struct platform_device *pdev, unsigned int irq); + void (*force_irq_ack)(struct platform_device *pdev, unsigned int irq); +}; + +#endif diff --git a/arch/arm/include/asm/fiq_glue.h b/arch/arm/include/asm/fiq_glue.h new file mode 100644 index 000000000000..d54c29db97a8 --- /dev/null +++ b/arch/arm/include/asm/fiq_glue.h @@ -0,0 +1,30 @@ +/* + * Copyright (C) 2010 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __ASM_FIQ_GLUE_H +#define __ASM_FIQ_GLUE_H + +struct fiq_glue_handler { + void (*fiq)(struct fiq_glue_handler *h, void *regs, void *svc_sp); + void (*resume)(struct fiq_glue_handler *h); +}; + +int fiq_glue_register_handler(struct fiq_glue_handler *handler); + +#ifdef CONFIG_FIQ_GLUE +void fiq_glue_resume(void); +#else +static inline void fiq_glue_resume(void) {} +#endif + +#endif diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h index e6215305544a..963a338d567b 100644 --- a/arch/arm/include/asm/smp_plat.h +++ b/arch/arm/include/asm/smp_plat.h @@ -13,9 +13,13 @@ static inline int tlb_ops_need_broadcast(void) return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 2; } +#if !defined(CONFIG_SMP) || __LINUX_ARM_ARCH__ >= 7 +#define cache_ops_need_broadcast() 0 +#else static inline int cache_ops_need_broadcast(void) { return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 1; } +#endif #endif diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h index e71d6ff8d104..80f749368e4f 100644 --- a/arch/arm/include/asm/tls.h +++ b/arch/arm/include/asm/tls.h @@ -7,6 +7,8 @@ .macro set_tls_v6k, tp, tmp1, tmp2 mcr p15, 0, \tp, c13, c0, 3 @ set TLS register + mov \tmp1, #0xffff0fff + str \tp, [\tmp1, #-15] @ set TLS value at 0xffff0ff0 .endm .macro set_tls_v6, tp, tmp1, tmp2 @@ -15,7 +17,7 @@ mov \tmp2, #0xffff0fff tst \tmp1, #HWCAP_TLS @ hardware TLS available? mcrne p15, 0, \tp, c13, c0, 3 @ yes, set TLS register - streq \tp, [\tmp2, #-15] @ set TLS value at 0xffff0ff0 + str \tp, [\tmp2, #-15] @ set TLS value at 0xffff0ff0 .endm .macro set_tls_software, tp, tmp1, tmp2 diff --git a/arch/arm/kernel/debug.S b/arch/arm/kernel/debug.S index a38b4879441d..90ecfa32a226 100644 --- a/arch/arm/kernel/debug.S +++ b/arch/arm/kernel/debug.S @@ -22,7 +22,7 @@ #if defined(CONFIG_DEBUG_ICEDCC) @@ debug using ARM EmbeddedICE DCC channel -#if defined(CONFIG_CPU_V6) +#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V7) .macro addruart, rx, tmp .endm diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 69141f0b8388..3161faf3d2e8 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -245,6 +245,77 @@ void machine_restart(char *cmd) arm_pm_restart(reboot_mode, cmd); } +/* + * dump a block of kernel memory from around the given address + */ +static void show_data(unsigned long addr, int nbytes, const char *name) +{ + int i, j; + int nlines; + u32 *p; + + /* + * don't attempt to dump non-kernel addresses or + * values that are probably just small negative numbers + */ + if (addr < PAGE_OFFSET || addr > -256UL) + return; + + printk("\n%s: %#lx:\n", name, addr); + + /* + * round address down to a 32 bit boundary + * and always dump a multiple of 32 bytes + */ + p = (u32 *)(addr & ~(sizeof(u32) - 1)); + nbytes += (addr & (sizeof(u32) - 1)); + nlines = (nbytes + 31) / 32; + + + for (i = 0; i < nlines; i++) { + /* + * just display low 16 bits of address to keep + * each line of the dump < 80 characters + */ + printk("%04lx ", (unsigned long)p & 0xffff); + for (j = 0; j < 8; j++) { + u32 data; + if (probe_kernel_address(p, data)) { + printk(" ********"); + } else { + printk(" %08x", data); + } + ++p; + } + printk("\n"); + } +} + +static void show_extra_register_data(struct pt_regs *regs, int nbytes) +{ + mm_segment_t fs; + + fs = get_fs(); + set_fs(KERNEL_DS); + show_data(regs->ARM_pc - nbytes, nbytes * 2, "PC"); + show_data(regs->ARM_lr - nbytes, nbytes * 2, "LR"); + show_data(regs->ARM_sp - nbytes, nbytes * 2, "SP"); + show_data(regs->ARM_ip - nbytes, nbytes * 2, "IP"); + show_data(regs->ARM_fp - nbytes, nbytes * 2, "FP"); + show_data(regs->ARM_r0 - nbytes, nbytes * 2, "R0"); + show_data(regs->ARM_r1 - nbytes, nbytes * 2, "R1"); + show_data(regs->ARM_r2 - nbytes, nbytes * 2, "R2"); + show_data(regs->ARM_r3 - nbytes, nbytes * 2, "R3"); + show_data(regs->ARM_r4 - nbytes, nbytes * 2, "R4"); + show_data(regs->ARM_r5 - nbytes, nbytes * 2, "R5"); + show_data(regs->ARM_r6 - nbytes, nbytes * 2, "R6"); + show_data(regs->ARM_r7 - nbytes, nbytes * 2, "R7"); + show_data(regs->ARM_r8 - nbytes, nbytes * 2, "R8"); + show_data(regs->ARM_r9 - nbytes, nbytes * 2, "R9"); + show_data(regs->ARM_r10 - nbytes, nbytes * 2, "R10"); + set_fs(fs); +} + void __show_regs(struct pt_regs *regs) { unsigned long flags; @@ -304,6 +375,8 @@ void __show_regs(struct pt_regs *regs) printk("Control: %08x%s\n", ctrl, buf); } #endif + + show_extra_register_data(regs, 128); } void show_regs(struct pt_regs * regs) diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 907d5a620bca..15c46d27ca27 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -602,6 +602,14 @@ setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info, static inline void setup_syscall_restart(struct pt_regs *regs) { + if (regs->ARM_ORIG_r0 == -ERESTARTNOHAND || + regs->ARM_ORIG_r0 == -ERESTARTSYS || + regs->ARM_ORIG_r0 == -ERESTARTNOINTR || + regs->ARM_ORIG_r0 == -ERESTART_RESTARTBLOCK) { + /* the syscall cannot be safely restarted, return -EINTR instead */ + regs->ARM_r0 = -EINTR; + return; + } regs->ARM_r0 = regs->ARM_ORIG_r0; regs->ARM_pc -= thumb_mode(regs) ? 2 : 4; } @@ -734,6 +742,7 @@ static void do_signal(struct pt_regs *regs, int syscall) */ if (syscall) { if (regs->ARM_r0 == -ERESTART_RESTARTBLOCK) { + regs->ARM_r0 = -EAGAIN; /* prevent multiple restarts */ if (thumb_mode(regs)) { regs->ARM_r7 = __NR_restart_syscall - __NR_SYSCALL_BASE; regs->ARM_pc -= 2; diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index cda78d59aa31..17c5649340ad 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -454,7 +454,9 @@ do_cache_op(unsigned long start, unsigned long end, int flags) if (end > vma->vm_end) end = vma->vm_end; - flush_cache_user_range(vma, start, end); + up_read(&mm->mmap_sem); + flush_cache_user_range(start, end); + return; } up_read(&mm->mmap_sem); } @@ -524,7 +526,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs) if (has_tls_reg) { asm ("mcr p15, 0, %0, c13, c0, 3" : : "r" (regs->ARM_r0)); - } else { + } /*else*/ { /* * User space must never try to access this directly. * Expect your app to break eventually if you do so. diff --git a/arch/arm/mach-tegra/Kconfig b/arch/arm/mach-tegra/Kconfig index 398af59d7e3e..0bd7b41592be 100644 --- a/arch/arm/mach-tegra/Kconfig +++ b/arch/arm/mach-tegra/Kconfig @@ -66,6 +66,13 @@ config TEGRA_PWM help Enable support for the Tegra PWM controller(s). +config TEGRA_FIQ_DEBUGGER + bool "Enable the FIQ serial debugger on Tegra" + default y + select FIQ_DEBUGGER + help + Enables the FIQ serial debugger on Tegra" + endif config TEGRA_IOVMM_GART diff --git a/arch/arm/mach-tegra/Makefile b/arch/arm/mach-tegra/Makefile index 22e59f357a92..8872cd54337f 100644 --- a/arch/arm/mach-tegra/Makefile +++ b/arch/arm/mach-tegra/Makefile @@ -13,6 +13,7 @@ obj-y += fuse.o obj-y += tegra_i2s_audio.o obj-$(CONFIG_USB_SUPPORT) += usb_phy.o obj-$(CONFIG_FIQ) += fiq.o +obj-$(CONFIG_TEGRA_FIQ_DEBUGGER) += tegra_fiq_debugger.o obj-$(CONFIG_TEGRA_PWM) += pwm.o obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += clock.o diff --git a/arch/arm/mach-tegra/board-ventana.c b/arch/arm/mach-tegra/board-ventana.c index 82e3a22efa3e..60948ad9dba4 100644 --- a/arch/arm/mach-tegra/board-ventana.c +++ b/arch/arm/mach-tegra/board-ventana.c @@ -33,6 +33,7 @@ #include <linux/gpio.h> #include <linux/gpio_keys.h> #include <linux/input.h> +#include <linux/usb/android_composite.h> #include <mach/clk.h> #include <mach/iomap.h> @@ -49,6 +50,7 @@ #include "board-ventana.h" #include "devices.h" #include "gpio-names.h" +#include "fuse.h" static struct plat_serial8250_port debug_uart_platform_data[] = { { @@ -80,6 +82,43 @@ static __initdata struct tegra_clk_init_table ventana_clk_init_table[] = { { NULL, NULL, 0, 0}, }; +static char *usb_functions[] = { "mtp" }; +static char *usb_functions_adb[] = { "mtp", "adb" }; + +static struct android_usb_product usb_products[] = { + { + .product_id = 0x7102, + .num_functions = ARRAY_SIZE(usb_functions), + .functions = usb_functions, + }, + { + .product_id = 0x7100, + .num_functions = ARRAY_SIZE(usb_functions_adb), + .functions = usb_functions_adb, + }, +}; + +/* standard android USB platform data */ +static struct android_usb_platform_data andusb_plat = { + .vendor_id = 0x0955, + .product_id = 0x7100, + .manufacturer_name = "NVIDIA", + .product_name = "Ventana", + .serial_number = NULL, + .num_products = ARRAY_SIZE(usb_products), + .products = usb_products, + .num_functions = ARRAY_SIZE(usb_functions_adb), + .functions = usb_functions_adb, +}; + +static struct platform_device androidusb_device = { + .name = "android_usb", + .id = -1, + .dev = { + .platform_data = &andusb_plat, + }, +}; + static struct tegra_i2c_platform_data ventana_i2c1_platform_data = { .adapter_nr = 0, .bus_count = 1, @@ -165,6 +204,7 @@ static struct platform_device ventana_keys_device = { static struct platform_device *ventana_devices[] __initdata = { &tegra_otg_device, + &androidusb_device, &debug_uart, &pmu_device, &tegra_udc_device, @@ -204,12 +244,16 @@ static int __init ventana_touch_init(void) static void __init tegra_ventana_init(void) { - tegra_common_init(); + char serial[20]; + tegra_common_init(); tegra_clk_init_from_table(ventana_clk_init_table); ventana_pinmux_init(); + snprintf(serial, sizeof(serial), "%llx", tegra_chip_uid()); + andusb_plat.serial_number = kstrdup(serial, GFP_KERNEL); platform_add_devices(ventana_devices, ARRAY_SIZE(ventana_devices)); + ventana_sdhci_init(); ventana_i2c_init(); ventana_regulator_init(); diff --git a/arch/arm/mach-tegra/include/mach/tegra_fiq_debugger.h b/arch/arm/mach-tegra/include/mach/tegra_fiq_debugger.h new file mode 100644 index 000000000000..4d1a0b54f2ae --- /dev/null +++ b/arch/arm/mach-tegra/include/mach/tegra_fiq_debugger.h @@ -0,0 +1,30 @@ +/* + * linux/arch/arm/mach-tegra/include/mach/tegra_fiq_debugger.h + * + * Copyright (C) 2010 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __MACH_TEGRA_FIQ_DEBUGGER_H +#define __MACH_TEGRA_FIQ_DEBUGGER_H + +#ifdef CONFIG_TEGRA_FIQ_DEBUGGER +void tegra_serial_debug_init(unsigned int base, int irq, + struct clk *clk, int signal_irq, int wakeup_irq); +#else +static inline void tegra_serial_debug_init(unsigned int base, int irq, + struct clk *clk, int signal_irq, int wakeup_irq) +{ +} +#endif + +#endif diff --git a/arch/arm/mach-tegra/tegra_fiq_debugger.c b/arch/arm/mach-tegra/tegra_fiq_debugger.c new file mode 100644 index 000000000000..29e3119b0edd --- /dev/null +++ b/arch/arm/mach-tegra/tegra_fiq_debugger.c @@ -0,0 +1,208 @@ +/* + * arch/arm/mach-tegra/fiq_debugger.c + * + * Serial Debugger Interface for Tegra + * + * Copyright (C) 2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <stdarg.h> +#include <linux/module.h> +#include <linux/io.h> +#include <linux/interrupt.h> +#include <linux/clk.h> +#include <linux/platform_device.h> +#include <linux/irq.h> +#include <linux/serial_reg.h> +#include <linux/slab.h> +#include <linux/stacktrace.h> +#include <asm/fiq_debugger.h> +#include <mach/tegra_fiq_debugger.h> +#include <mach/system.h> +#include <mach/fiq.h> + +#include <linux/uaccess.h> + +#include <mach/legacy_irq.h> + +struct tegra_fiq_debugger { + struct fiq_debugger_pdata pdata; + void __iomem *debug_port_base; + bool break_seen; +}; + +static inline void tegra_write(struct tegra_fiq_debugger *t, + unsigned int val, unsigned int off) +{ + __raw_writeb(val, t->debug_port_base + off * 4); +} + +static inline unsigned int tegra_read(struct tegra_fiq_debugger *t, + unsigned int off) +{ + return __raw_readb(t->debug_port_base + off * 4); +} + +static inline unsigned int tegra_read_lsr(struct tegra_fiq_debugger *t) +{ + unsigned int lsr; + + lsr = tegra_read(t, UART_LSR); + if (lsr & UART_LSR_BI) + t->break_seen = true; + + return lsr; +} + +static int debug_port_init(struct platform_device *pdev) +{ + struct tegra_fiq_debugger *t; + t = container_of(dev_get_platdata(&pdev->dev), typeof(*t), pdata); + + if (tegra_read(t, UART_LSR) & UART_LSR_DR) + (void)tegra_read(t, UART_RX); + /* enable rx and lsr interrupt */ + tegra_write(t, UART_IER_RLSI | UART_IER_RDI, UART_IER); + /* interrupt on every character */ + tegra_write(t, 0, UART_IIR); + + return 0; +} + +static int debug_getc(struct platform_device *pdev) +{ + unsigned int lsr; + struct tegra_fiq_debugger *t; + t = container_of(dev_get_platdata(&pdev->dev), typeof(*t), pdata); + + lsr = tegra_read_lsr(t); + + if (lsr & UART_LSR_BI || t->break_seen) { + t->break_seen = false; + return FIQ_DEBUGGER_BREAK; + } + + if (lsr & UART_LSR_DR) + return tegra_read(t, UART_RX); + + return FIQ_DEBUGGER_NO_CHAR; +} + +static void debug_putc(struct platform_device *pdev, unsigned int c) +{ + struct tegra_fiq_debugger *t; + t = container_of(dev_get_platdata(&pdev->dev), typeof(*t), pdata); + + while (!(tegra_read_lsr(t) & UART_LSR_THRE)) + cpu_relax(); + + tegra_write(t, c, UART_TX); +} + +static void debug_flush(struct platform_device *pdev) +{ + struct tegra_fiq_debugger *t; + t = container_of(dev_get_platdata(&pdev->dev), typeof(*t), pdata); + + while (!(tegra_read_lsr(t) & UART_LSR_TEMT)) + cpu_relax(); +} + +static void fiq_enable(struct platform_device *pdev, unsigned int irq, bool on) +{ + if (on) + tegra_fiq_enable(irq); + else + tegra_fiq_disable(irq); +} + +static int tegra_fiq_debugger_id; + +void tegra_serial_debug_init(unsigned int base, int irq, + struct clk *clk, int signal_irq, int wakeup_irq) +{ + struct tegra_fiq_debugger *t; + struct platform_device *pdev; + struct resource *res; + int res_count; + + t = kzalloc(sizeof(struct tegra_fiq_debugger), GFP_KERNEL); + if (!t) { + pr_err("Failed to allocate for fiq debugger\n"); + return; + } + + t->pdata.uart_init = debug_port_init; + t->pdata.uart_getc = debug_getc; + t->pdata.uart_putc = debug_putc; + t->pdata.uart_flush = debug_flush; + t->pdata.fiq_enable = fiq_enable; + + t->debug_port_base = ioremap(base, PAGE_SIZE); + if (!t->debug_port_base) { + pr_err("Failed to ioremap for fiq debugger\n"); + goto out1; + } + + res = kzalloc(sizeof(struct resource) * 3, GFP_KERNEL); + if (!res) { + pr_err("Failed to alloc fiq debugger resources\n"); + goto out2; + } + + pdev = kzalloc(sizeof(struct platform_device), GFP_KERNEL); + if (!pdev) { + pr_err("Failed to alloc fiq debugger platform device\n"); + goto out3; + }; + + res[0].flags = IORESOURCE_IRQ; + res[0].start = irq; + res[0].end = irq; + res[0].name = "fiq"; + + res[1].flags = IORESOURCE_IRQ; + res[1].start = signal_irq; + res[1].end = signal_irq; + res[1].name = "signal"; + res_count = 2; + + if (wakeup_irq >= 0) { + res[2].flags = IORESOURCE_IRQ; + res[2].start = wakeup_irq; + res[2].end = wakeup_irq; + res[2].name = "wakeup"; + res_count++; + } + + pdev->name = "fiq_debugger"; + pdev->id = tegra_fiq_debugger_id++; + pdev->dev.platform_data = &t->pdata; + pdev->resource = res; + pdev->num_resources = res_count; + + if (platform_device_register(pdev)) { + pr_err("Failed to register fiq debugger\n"); + goto out4; + } + + return; + +out4: + kfree(pdev); +out3: + kfree(res); +out2: + iounmap(t->debug_port_base); +out1: + kfree(t); +} diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S index 86aa689ef1aa..ea0894f62e07 100644 --- a/arch/arm/mm/cache-v6.S +++ b/arch/arm/mm/cache-v6.S @@ -256,6 +256,11 @@ v6_dma_clean_range: * - end - virtual end address of region */ ENTRY(v6_dma_flush_range) +#ifdef CONFIG_CACHE_FLUSH_RANGE_LIMIT + sub r2, r1, r0 + cmp r2, #CONFIG_CACHE_FLUSH_RANGE_LIMIT + bhi v6_dma_flush_dcache_all +#endif bic r0, r0, #D_CACHE_LINE_SIZE - 1 1: #ifdef CONFIG_DMA_CACHE_RWFO @@ -274,6 +279,18 @@ ENTRY(v6_dma_flush_range) mcr p15, 0, r0, c7, c10, 4 @ drain write buffer mov pc, lr +#ifdef CONFIG_CACHE_FLUSH_RANGE_LIMIT +v6_dma_flush_dcache_all: + mov r0, #0 +#ifdef HARVARD_CACHE + mcr p15, 0, r0, c7, c14, 0 @ D cache clean+invalidate +#else + mcr p15, 0, r0, c7, c15, 0 @ Cache clean+invalidate +#endif + mcr p15, 0, r0, c7, c10, 4 @ drain write buffer + mov pc, lr +#endif + /* * dma_map_area(start, size, dir) * - start - kernel virtual start address diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c index 9b906dec1ca1..9d77d90f4ade 100644 --- a/arch/arm/mm/fault-armv.c +++ b/arch/arm/mm/fault-armv.c @@ -168,10 +168,8 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, return; mapping = page_mapping(page); -#ifndef CONFIG_SMP if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) __flush_dcache_page(mapping, page); -#endif if (mapping) { if (cache_is_vivt()) make_coherent(mapping, vma, addr, ptep, pfn); diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index c6844cb9b508..e0758968e9f8 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c @@ -17,6 +17,7 @@ #include <asm/smp_plat.h> #include <asm/system.h> #include <asm/tlbflush.h> +#include <asm/smp_plat.h> #include "mm.h" @@ -246,12 +247,10 @@ void flush_dcache_page(struct page *page) mapping = page_mapping(page); -#ifndef CONFIG_SMP - if (!PageHighMem(page) && mapping && !mapping_mapped(mapping)) + if (!cache_ops_need_broadcast() && + !PageHighMem(page) && mapping && !mapping_mapped(mapping)) set_bit(PG_dcache_dirty, &page->flags); - else -#endif - { + else { __flush_dcache_page(mapping, page); if (mapping && cache_is_vivt()) __flush_dcache_aliases(mapping, page); diff --git a/block/blk-core.c b/block/blk-core.c index 32a1c123dfb3..677f57445fd9 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1612,11 +1612,12 @@ void submit_bio(int rw, struct bio *bio) if (unlikely(block_dump)) { char b[BDEVNAME_SIZE]; - printk(KERN_DEBUG "%s(%d): %s block %Lu on %s\n", + printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n", current->comm, task_pid_nr(current), (rw & WRITE) ? "WRITE" : "READ", (unsigned long long)bio->bi_sector, - bdevname(bio->bi_bdev, b)); + bdevname(bio->bi_bdev, b), + count); } } diff --git a/block/genhd.c b/block/genhd.c index 59a2db6fecef..aa628253bee1 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -1006,6 +1006,22 @@ static void disk_release(struct device *dev) free_part_stats(&disk->part0); kfree(disk); } + +static int disk_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + struct gendisk *disk = dev_to_disk(dev); + struct disk_part_iter piter; + struct hd_struct *part; + int cnt = 0; + + disk_part_iter_init(&piter, disk, 0); + while((part = disk_part_iter_next(&piter))) + cnt++; + disk_part_iter_exit(&piter); + add_uevent_var(env, "NPARTS=%u", cnt); + return 0; +} + struct class block_class = { .name = "block", }; @@ -1024,6 +1040,7 @@ static struct device_type disk_type = { .groups = disk_attr_groups, .release = disk_release, .devnode = block_devnode, + .uevent = disk_uevent, }; #ifdef CONFIG_PROC_FS diff --git a/drivers/Kconfig b/drivers/Kconfig index a2b902f4d437..6781b709cac5 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig @@ -88,6 +88,8 @@ source "drivers/memstick/Kconfig" source "drivers/leds/Kconfig" +source "drivers/switch/Kconfig" + source "drivers/accessibility/Kconfig" source "drivers/infiniband/Kconfig" diff --git a/drivers/Makefile b/drivers/Makefile index a2aea53a75ed..d7f34f1a6cab 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -95,6 +95,7 @@ obj-$(CONFIG_CPU_IDLE) += cpuidle/ obj-$(CONFIG_MMC) += mmc/ obj-$(CONFIG_MEMSTICK) += memstick/ obj-$(CONFIG_NEW_LEDS) += leds/ +obj-$(CONFIG_SWITCH) += switch/ obj-$(CONFIG_INFINIBAND) += infiniband/ obj-$(CONFIG_SGI_SN) += sn/ obj-y += firmware/ diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 276d5a701dc3..17aff79bd329 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -26,6 +26,7 @@ #include <linux/interrupt.h> #include <linux/sched.h> #include <linux/async.h> +#include <linux/timer.h> #include "../base.h" #include "power.h" @@ -45,6 +46,9 @@ LIST_HEAD(dpm_list); static DEFINE_MUTEX(dpm_list_mtx); static pm_message_t pm_transition; +static void dpm_drv_timeout(unsigned long data); +static DEFINE_TIMER(dpm_drv_wd, dpm_drv_timeout, 0, 0); + /* * Set once the preparation of devices for a PM transition has started, reset * before starting to resume devices. Protected by dpm_list_mtx. @@ -524,7 +528,8 @@ static int device_resume(struct device *dev, pm_message_t state, bool async) TRACE_DEVICE(dev); TRACE_RESUME(0); - dpm_wait(dev->parent, async); + if (dev->parent && dev->parent->power.status >= DPM_OFF) + dpm_wait(dev->parent, async); device_lock(dev); dev->power.status = DPM_RESUMING; @@ -585,6 +590,45 @@ static bool is_async(struct device *dev) } /** + * dpm_drv_timeout - Driver suspend / resume watchdog handler + * @data: struct device which timed out + * + * Called when a driver has timed out suspending or resuming. + * There's not much we can do here to recover so + * BUG() out for a crash-dump + * + */ +static void dpm_drv_timeout(unsigned long data) +{ + struct device *dev = (struct device *) data; + + printk(KERN_EMERG "**** DPM device timeout: %s (%s)\n", dev_name(dev), + (dev->driver ? dev->driver->name : "no driver")); + BUG(); +} + +/** + * dpm_drv_wdset - Sets up driver suspend/resume watchdog timer. + * @dev: struct device which we're guarding. + * + */ +static void dpm_drv_wdset(struct device *dev) +{ + dpm_drv_wd.data = (unsigned long) dev; + mod_timer(&dpm_drv_wd, jiffies + (HZ * 3)); +} + +/** + * dpm_drv_wdclr - clears driver suspend/resume watchdog timer. + * @dev: struct device which we're no longer guarding. + * + */ +static void dpm_drv_wdclr(struct device *dev) +{ + del_timer_sync(&dpm_drv_wd); +} + +/** * dpm_resume - Execute "resume" callbacks for non-sysdev devices. * @state: PM transition of the system being carried out. * @@ -935,7 +979,9 @@ static int dpm_suspend(pm_message_t state) get_device(dev); mutex_unlock(&dpm_list_mtx); + dpm_drv_wdset(dev); error = device_suspend(dev); + dpm_drv_wdclr(dev); mutex_lock(&dpm_list_mtx); if (error) { diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index 3d44ec724c17..0a0880332f20 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -88,6 +88,19 @@ config VT_HW_CONSOLE_BINDING information. For framebuffer console users, please refer to <file:Documentation/fb/fbcon.txt>. +config DEVMEM + bool "Memory device driver" + default y + help + The memory driver provides two character devices, mem and kmem, which + provide access to the system's memory. The mem device is a view of + physical memory, and each byte in the device corresponds to the + matching physical address. The kmem device is the same as mem, but + the addresses correspond to the kernel's virtual address space rather + than physical memory. These devices are standard parts of a Linux + system and most users should say Y here. You might say N if very + security conscience or memory is tight. + config DEVKMEM bool "/dev/kmem virtual device support" default y @@ -1119,6 +1132,10 @@ config DEVPORT depends on ISA || PCI default y +config DCC_TTY + tristate "DCC tty driver" + depends on ARM + source "drivers/s390/char/Kconfig" config RAMOOPS diff --git a/drivers/char/Makefile b/drivers/char/Makefile index dc9641660605..720d6e8142cd 100644 --- a/drivers/char/Makefile +++ b/drivers/char/Makefile @@ -109,6 +109,7 @@ obj-$(CONFIG_IPMI_HANDLER) += ipmi/ obj-$(CONFIG_HANGCHECK_TIMER) += hangcheck-timer.o obj-$(CONFIG_TCG_TPM) += tpm/ +obj-$(CONFIG_DCC_TTY) += dcc_tty.o obj-$(CONFIG_PS3_FLASH) += ps3flash.o obj-$(CONFIG_RAMOOPS) += ramoops.o diff --git a/drivers/char/dcc_tty.c b/drivers/char/dcc_tty.c new file mode 100644 index 000000000000..a787accdcb14 --- /dev/null +++ b/drivers/char/dcc_tty.c @@ -0,0 +1,326 @@ +/* drivers/char/dcc_tty.c + * + * Copyright (C) 2007 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/delay.h> +#include <linux/console.h> +#include <linux/hrtimer.h> +#include <linux/tty.h> +#include <linux/tty_driver.h> +#include <linux/tty_flip.h> + +MODULE_DESCRIPTION("DCC TTY Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION("1.0"); + +static spinlock_t g_dcc_tty_lock = SPIN_LOCK_UNLOCKED; +static struct hrtimer g_dcc_timer; +static char g_dcc_buffer[16]; +static int g_dcc_buffer_head; +static int g_dcc_buffer_count; +static unsigned g_dcc_write_delay_usecs = 1; +static struct tty_driver *g_dcc_tty_driver; +static struct tty_struct *g_dcc_tty; +static int g_dcc_tty_open_count; + +static void dcc_poll_locked(void) +{ + char ch; + int rch; + int written; + + while (g_dcc_buffer_count) { + ch = g_dcc_buffer[g_dcc_buffer_head]; + asm( + "mrc 14, 0, r15, c0, c1, 0\n" + "mcrcc 14, 0, %1, c0, c5, 0\n" + "movcc %0, #1\n" + "movcs %0, #0\n" + : "=r" (written) + : "r" (ch) + ); + if (written) { + if (ch == '\n') + g_dcc_buffer[g_dcc_buffer_head] = '\r'; + else { + g_dcc_buffer_head = (g_dcc_buffer_head + 1) % ARRAY_SIZE(g_dcc_buffer); + g_dcc_buffer_count--; + if (g_dcc_tty) + tty_wakeup(g_dcc_tty); + } + g_dcc_write_delay_usecs = 1; + } else { + if (g_dcc_write_delay_usecs > 0x100) + break; + g_dcc_write_delay_usecs <<= 1; + udelay(g_dcc_write_delay_usecs); + } + } + + if (g_dcc_tty && !test_bit(TTY_THROTTLED, &g_dcc_tty->flags)) { + asm( + "mrc 14, 0, %0, c0, c1, 0\n" + "tst %0, #(1 << 30)\n" + "moveq %0, #-1\n" + "mrcne 14, 0, %0, c0, c5, 0\n" + : "=r" (rch) + ); + if (rch >= 0) { + ch = rch; + tty_insert_flip_string(g_dcc_tty, &ch, 1); + tty_flip_buffer_push(g_dcc_tty); + } + } + + + if (g_dcc_buffer_count) + hrtimer_start(&g_dcc_timer, ktime_set(0, g_dcc_write_delay_usecs * NSEC_PER_USEC), HRTIMER_MODE_REL); + else + hrtimer_start(&g_dcc_timer, ktime_set(0, 20 * NSEC_PER_MSEC), HRTIMER_MODE_REL); +} + +static int dcc_tty_open(struct tty_struct * tty, struct file * filp) +{ + int ret; + unsigned long irq_flags; + + spin_lock_irqsave(&g_dcc_tty_lock, irq_flags); + if (g_dcc_tty == NULL || g_dcc_tty == tty) { + g_dcc_tty = tty; + g_dcc_tty_open_count++; + ret = 0; + } else + ret = -EBUSY; + spin_unlock_irqrestore(&g_dcc_tty_lock, irq_flags); + + printk("dcc_tty_open, tty %p, f_flags %x, returned %d\n", tty, filp->f_flags, ret); + + return ret; +} + +static void dcc_tty_close(struct tty_struct * tty, struct file * filp) +{ + printk("dcc_tty_close, tty %p, f_flags %x\n", tty, filp->f_flags); + if (g_dcc_tty == tty) { + if (--g_dcc_tty_open_count == 0) + g_dcc_tty = NULL; + } +} + +static int dcc_write(const unsigned char *buf_start, int count) +{ + const unsigned char *buf = buf_start; + unsigned long irq_flags; + int copy_len; + int space_left; + int tail; + + if (count < 1) + return 0; + + spin_lock_irqsave(&g_dcc_tty_lock, irq_flags); + do { + tail = (g_dcc_buffer_head + g_dcc_buffer_count) % ARRAY_SIZE(g_dcc_buffer); + copy_len = ARRAY_SIZE(g_dcc_buffer) - tail; + space_left = ARRAY_SIZE(g_dcc_buffer) - g_dcc_buffer_count; + if (copy_len > space_left) + copy_len = space_left; + if (copy_len > count) + copy_len = count; + memcpy(&g_dcc_buffer[tail], buf, copy_len); + g_dcc_buffer_count += copy_len; + buf += copy_len; + count -= copy_len; + if (copy_len < count && copy_len < space_left) { + space_left -= copy_len; + copy_len = count; + if (copy_len > space_left) { + copy_len = space_left; + } + memcpy(g_dcc_buffer, buf, copy_len); + buf += copy_len; + count -= copy_len; + g_dcc_buffer_count += copy_len; + } + dcc_poll_locked(); + space_left = ARRAY_SIZE(g_dcc_buffer) - g_dcc_buffer_count; + } while(count && space_left); + spin_unlock_irqrestore(&g_dcc_tty_lock, irq_flags); + return buf - buf_start; +} + +static int dcc_tty_write(struct tty_struct * tty, const unsigned char *buf, int count) +{ + int ret; + /* printk("dcc_tty_write %p, %d\n", buf, count); */ + ret = dcc_write(buf, count); + if (ret != count) + printk("dcc_tty_write %p, %d, returned %d\n", buf, count, ret); + return ret; +} + +static int dcc_tty_write_room(struct tty_struct *tty) +{ + int space_left; + unsigned long irq_flags; + + spin_lock_irqsave(&g_dcc_tty_lock, irq_flags); + space_left = ARRAY_SIZE(g_dcc_buffer) - g_dcc_buffer_count; + spin_unlock_irqrestore(&g_dcc_tty_lock, irq_flags); + return space_left; +} + +static int dcc_tty_chars_in_buffer(struct tty_struct *tty) +{ + int ret; + asm( + "mrc 14, 0, %0, c0, c1, 0\n" + "mov %0, %0, LSR #30\n" + "and %0, %0, #1\n" + : "=r" (ret) + ); + return ret; +} + +static void dcc_tty_unthrottle(struct tty_struct * tty) +{ + unsigned long irq_flags; + + spin_lock_irqsave(&g_dcc_tty_lock, irq_flags); + dcc_poll_locked(); + spin_unlock_irqrestore(&g_dcc_tty_lock, irq_flags); +} + +static enum hrtimer_restart dcc_tty_timer_func(struct hrtimer *timer) +{ + unsigned long irq_flags; + + spin_lock_irqsave(&g_dcc_tty_lock, irq_flags); + dcc_poll_locked(); + spin_unlock_irqrestore(&g_dcc_tty_lock, irq_flags); + return HRTIMER_NORESTART; +} + +void dcc_console_write(struct console *co, const char *b, unsigned count) +{ +#if 1 + dcc_write(b, count); +#else + /* blocking printk */ + while (count > 0) { + int written; + written = dcc_write(b, count); + if (written) { + b += written; + count -= written; + } + } +#endif +} + +static struct tty_driver *dcc_console_device(struct console *c, int *index) +{ + *index = 0; + return g_dcc_tty_driver; +} + +static int __init dcc_console_setup(struct console *co, char *options) +{ + if (co->index != 0) + return -ENODEV; + return 0; +} + + +static struct console dcc_console = +{ + .name = "ttyDCC", + .write = dcc_console_write, + .device = dcc_console_device, + .setup = dcc_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, +}; + +static struct tty_operations dcc_tty_ops = { + .open = dcc_tty_open, + .close = dcc_tty_close, + .write = dcc_tty_write, + .write_room = dcc_tty_write_room, + .chars_in_buffer = dcc_tty_chars_in_buffer, + .unthrottle = dcc_tty_unthrottle, +}; + +static int __init dcc_tty_init(void) +{ + int ret; + + hrtimer_init(&g_dcc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + g_dcc_timer.function = dcc_tty_timer_func; + + g_dcc_tty_driver = alloc_tty_driver(1); + if (!g_dcc_tty_driver) { + printk(KERN_ERR "dcc_tty_probe: alloc_tty_driver failed\n"); + ret = -ENOMEM; + goto err_alloc_tty_driver_failed; + } + g_dcc_tty_driver->owner = THIS_MODULE; + g_dcc_tty_driver->driver_name = "dcc"; + g_dcc_tty_driver->name = "ttyDCC"; + g_dcc_tty_driver->major = 0; // auto assign + g_dcc_tty_driver->minor_start = 0; + g_dcc_tty_driver->type = TTY_DRIVER_TYPE_SERIAL; + g_dcc_tty_driver->subtype = SERIAL_TYPE_NORMAL; + g_dcc_tty_driver->init_termios = tty_std_termios; + g_dcc_tty_driver->flags = TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV; + tty_set_operations(g_dcc_tty_driver, &dcc_tty_ops); + ret = tty_register_driver(g_dcc_tty_driver); + if (ret) { + printk(KERN_ERR "dcc_tty_probe: tty_register_driver failed, %d\n", ret); + goto err_tty_register_driver_failed; + } + tty_register_device(g_dcc_tty_driver, 0, NULL); + + register_console(&dcc_console); + hrtimer_start(&g_dcc_timer, ktime_set(0, 0), HRTIMER_MODE_REL); + + return 0; + +err_tty_register_driver_failed: + put_tty_driver(g_dcc_tty_driver); + g_dcc_tty_driver = NULL; +err_alloc_tty_driver_failed: + return ret; +} + +static void __exit dcc_tty_exit(void) +{ + int ret; + + tty_unregister_device(g_dcc_tty_driver, 0); + ret = tty_unregister_driver(g_dcc_tty_driver); + if (ret < 0) { + printk(KERN_ERR "dcc_tty_remove: tty_unregister_driver failed, %d\n", ret); + } else { + put_tty_driver(g_dcc_tty_driver); + } + g_dcc_tty_driver = NULL; +} + +module_init(dcc_tty_init); +module_exit(dcc_tty_exit); + + diff --git a/drivers/char/mem.c b/drivers/char/mem.c index 1f528fad3516..91091b0f1a32 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c @@ -59,6 +59,7 @@ static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size) } #endif +#if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM) #ifdef CONFIG_STRICT_DEVMEM static inline int range_is_allowed(unsigned long pfn, unsigned long size) { @@ -84,7 +85,9 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size) return 1; } #endif +#endif +#ifdef CONFIG_DEVMEM void __weak unxlate_dev_mem_ptr(unsigned long phys, void *addr) { } @@ -211,6 +214,9 @@ static ssize_t write_mem(struct file *file, const char __user *buf, *ppos += written; return written; } +#endif /* CONFIG_DEVMEM */ + +#if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM) int __weak phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, unsigned long size, pgprot_t *vma_prot) @@ -332,6 +338,7 @@ static int mmap_mem(struct file *file, struct vm_area_struct *vma) } return 0; } +#endif /* CONFIG_DEVMEM */ #ifdef CONFIG_DEVKMEM static int mmap_kmem(struct file *file, struct vm_area_struct *vma) @@ -696,6 +703,8 @@ static loff_t null_lseek(struct file *file, loff_t offset, int orig) return file->f_pos = 0; } +#if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM) || defined(CONFIG_DEVPORT) + /* * The memory devices use the full 32/64 bits of the offset, and so we cannot * check against negative addresses: they are ok. The return value is weird, @@ -729,10 +738,14 @@ static loff_t memory_lseek(struct file *file, loff_t offset, int orig) return ret; } +#endif + +#if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM) || defined(CONFIG_DEVPORT) static int open_port(struct inode * inode, struct file * filp) { return capable(CAP_SYS_RAWIO) ? 0 : -EPERM; } +#endif #define zero_lseek null_lseek #define full_lseek null_lseek @@ -742,6 +755,7 @@ static int open_port(struct inode * inode, struct file * filp) #define open_kmem open_mem #define open_oldmem open_mem +#ifdef CONFIG_DEVMEM static const struct file_operations mem_fops = { .llseek = memory_lseek, .read = read_mem, @@ -750,6 +764,7 @@ static const struct file_operations mem_fops = { .open = open_mem, .get_unmapped_area = get_unmapped_area_mem, }; +#endif #ifdef CONFIG_DEVKMEM static const struct file_operations kmem_fops = { @@ -839,7 +854,9 @@ static const struct memdev { const struct file_operations *fops; struct backing_dev_info *dev_info; } devlist[] = { +#ifdef CONFIG_DEVMEM [1] = { "mem", 0, &mem_fops, &directly_mappable_cdev_bdi }, +#endif #ifdef CONFIG_DEVKMEM [2] = { "kmem", 0, &kmem_fops, &directly_mappable_cdev_bdi }, #endif diff --git a/drivers/input/Kconfig b/drivers/input/Kconfig index 07c2cd43109c..dd62135e7045 100644 --- a/drivers/input/Kconfig +++ b/drivers/input/Kconfig @@ -161,6 +161,15 @@ config INPUT_APMPOWER To compile this driver as a module, choose M here: the module will be called apm-power. +config INPUT_KEYRESET + tristate "Reset key" + depends on INPUT + ---help--- + Say Y here if you want to reboot when some keys are pressed; + + To compile this driver as a module, choose M here: the + module will be called keyreset. + config XEN_KBDDEV_FRONTEND tristate "Xen virtual keyboard and mouse support" depends on XEN_FBDEV_FRONTEND diff --git a/drivers/input/Makefile b/drivers/input/Makefile index 7ad212d31f99..3c7125758b7f 100644 --- a/drivers/input/Makefile +++ b/drivers/input/Makefile @@ -24,5 +24,6 @@ obj-$(CONFIG_INPUT_TOUCHSCREEN) += touchscreen/ obj-$(CONFIG_INPUT_MISC) += misc/ obj-$(CONFIG_INPUT_APMPOWER) += apm-power.o +obj-$(CONFIG_INPUT_KEYRESET) += keyreset.o obj-$(CONFIG_XEN_KBDDEV_FRONTEND) += xen-kbdfront.o diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c index c908c5f83645..4d3e28103233 100644 --- a/drivers/input/evdev.c +++ b/drivers/input/evdev.c @@ -21,6 +21,7 @@ #include <linux/input.h> #include <linux/major.h> #include <linux/device.h> +#include <linux/wakelock.h> #include "input-compat.h" struct evdev { @@ -40,6 +41,8 @@ struct evdev_client { int head; int tail; spinlock_t buffer_lock; /* protects access to buffer, head and tail */ + struct wake_lock wake_lock; + char name[28]; struct fasync_struct *fasync; struct evdev *evdev; struct list_head node; @@ -59,6 +62,7 @@ static void evdev_pass_event(struct evdev_client *client, * "empty" by having client->head == client->tail. */ spin_lock(&client->buffer_lock); + wake_lock_timeout(&client->wake_lock, 5 * HZ); do { client->buffer[client->head++] = *event; client->head &= client->bufsize - 1; @@ -78,8 +82,11 @@ static void evdev_event(struct input_handle *handle, struct evdev *evdev = handle->private; struct evdev_client *client; struct input_event event; + struct timespec ts; - do_gettimeofday(&event.time); + ktime_get_ts(&ts); + event.time.tv_sec = ts.tv_sec; + event.time.tv_usec = ts.tv_nsec / NSEC_PER_USEC; event.type = type; event.code = code; event.value = value; @@ -240,6 +247,7 @@ static int evdev_release(struct inode *inode, struct file *file) mutex_unlock(&evdev->mutex); evdev_detach_client(evdev, client); + wake_lock_destroy(&client->wake_lock); kfree(client); evdev_close_device(evdev); @@ -291,6 +299,9 @@ static int evdev_open(struct inode *inode, struct file *file) client->bufsize = bufsize; spin_lock_init(&client->buffer_lock); + snprintf(client->name, sizeof(client->name), "%s-%d", + dev_name(&evdev->dev), task_tgid_vnr(current)); + wake_lock_init(&client->wake_lock, WAKE_LOCK_SUSPEND, client->name); client->evdev = evdev; evdev_attach_client(evdev, client); @@ -356,6 +367,8 @@ static int evdev_fetch_next_event(struct evdev_client *client, if (have_event) { *event = client->buffer[client->tail++]; client->tail &= client->bufsize - 1; + if (client->head == client->tail) + wake_unlock(&client->wake_lock); } spin_unlock_irq(&client->buffer_lock); diff --git a/drivers/input/keyreset.c b/drivers/input/keyreset.c new file mode 100644 index 000000000000..b51350602fc3 --- /dev/null +++ b/drivers/input/keyreset.c @@ -0,0 +1,230 @@ +/* drivers/input/keyreset.c + * + * Copyright (C) 2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/input.h> +#include <linux/keyreset.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/reboot.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/syscalls.h> + + +struct keyreset_state { + struct input_handler input_handler; + unsigned long keybit[BITS_TO_LONGS(KEY_CNT)]; + unsigned long upbit[BITS_TO_LONGS(KEY_CNT)]; + unsigned long key[BITS_TO_LONGS(KEY_CNT)]; + spinlock_t lock; + int key_down_target; + int key_down; + int key_up; + int restart_disabled; +}; + +int restart_requested; +static void deferred_restart(struct work_struct *dummy) +{ + restart_requested = 2; + sys_sync(); + restart_requested = 3; + kernel_restart(NULL); +} +static DECLARE_WORK(restart_work, deferred_restart); + +static void keyreset_event(struct input_handle *handle, unsigned int type, + unsigned int code, int value) +{ + unsigned long flags; + struct keyreset_state *state = handle->private; + + if (type != EV_KEY) + return; + + if (code >= KEY_MAX) + return; + + if (!test_bit(code, state->keybit)) + return; + + spin_lock_irqsave(&state->lock, flags); + if (!test_bit(code, state->key) == !value) + goto done; + __change_bit(code, state->key); + if (test_bit(code, state->upbit)) { + if (value) { + state->restart_disabled = 1; + state->key_up++; + } else + state->key_up--; + } else { + if (value) + state->key_down++; + else + state->key_down--; + } + if (state->key_down == 0 && state->key_up == 0) + state->restart_disabled = 0; + + pr_debug("reset key changed %d %d new state %d-%d-%d\n", code, value, + state->key_down, state->key_up, state->restart_disabled); + + if (value && !state->restart_disabled && + state->key_down == state->key_down_target) { + state->restart_disabled = 1; + if (restart_requested) + panic("keyboard reset failed, %d", restart_requested); + pr_info("keyboard reset\n"); + schedule_work(&restart_work); + restart_requested = 1; + } +done: + spin_unlock_irqrestore(&state->lock, flags); +} + +static int keyreset_connect(struct input_handler *handler, + struct input_dev *dev, + const struct input_device_id *id) +{ + int i; + int ret; + struct input_handle *handle; + struct keyreset_state *state = + container_of(handler, struct keyreset_state, input_handler); + + for (i = 0; i < KEY_MAX; i++) { + if (test_bit(i, state->keybit) && test_bit(i, dev->keybit)) + break; + } + if (i == KEY_MAX) + return -ENODEV; + + handle = kzalloc(sizeof(*handle), GFP_KERNEL); + if (!handle) + return -ENOMEM; + + handle->dev = dev; + handle->handler = handler; + handle->name = "keyreset"; + handle->private = state; + + ret = input_register_handle(handle); + if (ret) + goto err_input_register_handle; + + ret = input_open_device(handle); + if (ret) + goto err_input_open_device; + + pr_info("using input dev %s for key reset\n", dev->name); + + return 0; + +err_input_open_device: + input_unregister_handle(handle); +err_input_register_handle: + kfree(handle); + return ret; +} + +static void keyreset_disconnect(struct input_handle *handle) +{ + input_close_device(handle); + input_unregister_handle(handle); + kfree(handle); +} + +static const struct input_device_id keyreset_ids[] = { + { + .flags = INPUT_DEVICE_ID_MATCH_EVBIT, + .evbit = { BIT_MASK(EV_KEY) }, + }, + { }, +}; +MODULE_DEVICE_TABLE(input, keyreset_ids); + +static int keyreset_probe(struct platform_device *pdev) +{ + int ret; + int key, *keyp; + struct keyreset_state *state; + struct keyreset_platform_data *pdata = pdev->dev.platform_data; + + if (!pdata) + return -EINVAL; + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return -ENOMEM; + + spin_lock_init(&state->lock); + keyp = pdata->keys_down; + while ((key = *keyp++)) { + if (key >= KEY_MAX) + continue; + state->key_down_target++; + __set_bit(key, state->keybit); + } + if (pdata->keys_up) { + keyp = pdata->keys_up; + while ((key = *keyp++)) { + if (key >= KEY_MAX) + continue; + __set_bit(key, state->keybit); + __set_bit(key, state->upbit); + } + } + state->input_handler.event = keyreset_event; + state->input_handler.connect = keyreset_connect; + state->input_handler.disconnect = keyreset_disconnect; + state->input_handler.name = KEYRESET_NAME; + state->input_handler.id_table = keyreset_ids; + ret = input_register_handler(&state->input_handler); + if (ret) { + kfree(state); + return ret; + } + platform_set_drvdata(pdev, state); + return 0; +} + +int keyreset_remove(struct platform_device *pdev) +{ + struct keyreset_state *state = platform_get_drvdata(pdev); + input_unregister_handler(&state->input_handler); + kfree(state); + return 0; +} + + +struct platform_driver keyreset_driver = { + .driver.name = KEYRESET_NAME, + .probe = keyreset_probe, + .remove = keyreset_remove, +}; + +static int __init keyreset_init(void) +{ + return platform_driver_register(&keyreset_driver); +} + +static void __exit keyreset_exit(void) +{ + return platform_driver_unregister(&keyreset_driver); +} + +module_init(keyreset_init); +module_exit(keyreset_exit); diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig index b49e23379723..263471a905f7 100644 --- a/drivers/input/misc/Kconfig +++ b/drivers/input/misc/Kconfig @@ -183,6 +183,17 @@ config INPUT_ATI_REMOTE2 To compile this driver as a module, choose M here: the module will be called ati_remote2. +config INPUT_KEYCHORD + tristate "Key chord input driver support" + help + Say Y here if you want to enable the key chord driver + accessible at /dev/keychord. This driver can be used + for receiving notifications when client specified key + combinations are pressed. + + To compile this driver as a module, choose M here: the + module will be called keychord. + config INPUT_KEYSPAN_REMOTE tristate "Keyspan DMR USB remote control (EXPERIMENTAL)" depends on EXPERIMENTAL @@ -302,6 +313,11 @@ config INPUT_WINBOND_CIR To compile this driver as a module, choose M here: the module will be called winbond_cir. +config INPUT_GPIO + tristate "GPIO driver support" + help + Say Y here if you want to support gpio based keys, wheels etc... + config HP_SDC_RTC tristate "HP SDC Real Time Clock" depends on (GSC || HP300) && SERIO diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile index 19ccca78fa76..7ac4ca759999 100644 --- a/drivers/input/misc/Makefile +++ b/drivers/input/misc/Makefile @@ -19,8 +19,10 @@ obj-$(CONFIG_INPUT_BFIN_ROTARY) += bfin_rotary.o obj-$(CONFIG_INPUT_CM109) += cm109.o obj-$(CONFIG_INPUT_COBALT_BTNS) += cobalt_btns.o obj-$(CONFIG_INPUT_DM355EVM) += dm355evm_keys.o +obj-$(CONFIG_INPUT_GPIO) += gpio_event.o gpio_matrix.o gpio_input.o gpio_output.o gpio_axis.o obj-$(CONFIG_HP_SDC_RTC) += hp_sdc_rtc.o obj-$(CONFIG_INPUT_IXP4XX_BEEPER) += ixp4xx-beeper.o +obj-$(CONFIG_INPUT_KEYCHORD) += keychord.o obj-$(CONFIG_INPUT_KEYSPAN_REMOTE) += keyspan_remote.o obj-$(CONFIG_INPUT_M68K_BEEP) += m68kspkr.o obj-$(CONFIG_INPUT_MAX8925_ONKEY) += max8925_onkey.o diff --git a/drivers/input/misc/gpio_axis.c b/drivers/input/misc/gpio_axis.c new file mode 100644 index 000000000000..0acf4a576f53 --- /dev/null +++ b/drivers/input/misc/gpio_axis.c @@ -0,0 +1,192 @@ +/* drivers/input/misc/gpio_axis.c + * + * Copyright (C) 2007 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/kernel.h> +#include <linux/gpio.h> +#include <linux/gpio_event.h> +#include <linux/interrupt.h> +#include <linux/slab.h> + +struct gpio_axis_state { + struct gpio_event_input_devs *input_devs; + struct gpio_event_axis_info *info; + uint32_t pos; +}; + +uint16_t gpio_axis_4bit_gray_map_table[] = { + [0x0] = 0x0, [0x1] = 0x1, /* 0000 0001 */ + [0x3] = 0x2, [0x2] = 0x3, /* 0011 0010 */ + [0x6] = 0x4, [0x7] = 0x5, /* 0110 0111 */ + [0x5] = 0x6, [0x4] = 0x7, /* 0101 0100 */ + [0xc] = 0x8, [0xd] = 0x9, /* 1100 1101 */ + [0xf] = 0xa, [0xe] = 0xb, /* 1111 1110 */ + [0xa] = 0xc, [0xb] = 0xd, /* 1010 1011 */ + [0x9] = 0xe, [0x8] = 0xf, /* 1001 1000 */ +}; +uint16_t gpio_axis_4bit_gray_map(struct gpio_event_axis_info *info, uint16_t in) +{ + return gpio_axis_4bit_gray_map_table[in]; +} + +uint16_t gpio_axis_5bit_singletrack_map_table[] = { + [0x10] = 0x00, [0x14] = 0x01, [0x1c] = 0x02, /* 10000 10100 11100 */ + [0x1e] = 0x03, [0x1a] = 0x04, [0x18] = 0x05, /* 11110 11010 11000 */ + [0x08] = 0x06, [0x0a] = 0x07, [0x0e] = 0x08, /* 01000 01010 01110 */ + [0x0f] = 0x09, [0x0d] = 0x0a, [0x0c] = 0x0b, /* 01111 01101 01100 */ + [0x04] = 0x0c, [0x05] = 0x0d, [0x07] = 0x0e, /* 00100 00101 00111 */ + [0x17] = 0x0f, [0x16] = 0x10, [0x06] = 0x11, /* 10111 10110 00110 */ + [0x02] = 0x12, [0x12] = 0x13, [0x13] = 0x14, /* 00010 10010 10011 */ + [0x1b] = 0x15, [0x0b] = 0x16, [0x03] = 0x17, /* 11011 01011 00011 */ + [0x01] = 0x18, [0x09] = 0x19, [0x19] = 0x1a, /* 00001 01001 11001 */ + [0x1d] = 0x1b, [0x15] = 0x1c, [0x11] = 0x1d, /* 11101 10101 10001 */ +}; +uint16_t gpio_axis_5bit_singletrack_map( + struct gpio_event_axis_info *info, uint16_t in) +{ + return gpio_axis_5bit_singletrack_map_table[in]; +} + +static void gpio_event_update_axis(struct gpio_axis_state *as, int report) +{ + struct gpio_event_axis_info *ai = as->info; + int i; + int change; + uint16_t state = 0; + uint16_t pos; + uint16_t old_pos = as->pos; + for (i = ai->count - 1; i >= 0; i--) + state = (state << 1) | gpio_get_value(ai->gpio[i]); + pos = ai->map(ai, state); + if (ai->flags & GPIOEAF_PRINT_RAW) + pr_info("axis %d-%d raw %x, pos %d -> %d\n", + ai->type, ai->code, state, old_pos, pos); + if (report && pos != old_pos) { + if (ai->type == EV_REL) { + change = (ai->decoded_size + pos - old_pos) % + ai->decoded_size; + if (change > ai->decoded_size / 2) + change -= ai->decoded_size; + if (change == ai->decoded_size / 2) { + if (ai->flags & GPIOEAF_PRINT_EVENT) + pr_info("axis %d-%d unknown direction, " + "pos %d -> %d\n", ai->type, + ai->code, old_pos, pos); + change = 0; /* no closest direction */ + } + if (ai->flags & GPIOEAF_PRINT_EVENT) + pr_info("axis %d-%d change %d\n", + ai->type, ai->code, change); + input_report_rel(as->input_devs->dev[ai->dev], + ai->code, change); + } else { + if (ai->flags & GPIOEAF_PRINT_EVENT) + pr_info("axis %d-%d now %d\n", + ai->type, ai->code, pos); + input_event(as->input_devs->dev[ai->dev], + ai->type, ai->code, pos); + } + input_sync(as->input_devs->dev[ai->dev]); + } + as->pos = pos; +} + +static irqreturn_t gpio_axis_irq_handler(int irq, void *dev_id) +{ + struct gpio_axis_state *as = dev_id; + gpio_event_update_axis(as, 1); + return IRQ_HANDLED; +} + +int gpio_event_axis_func(struct gpio_event_input_devs *input_devs, + struct gpio_event_info *info, void **data, int func) +{ + int ret; + int i; + int irq; + struct gpio_event_axis_info *ai; + struct gpio_axis_state *as; + + ai = container_of(info, struct gpio_event_axis_info, info); + if (func == GPIO_EVENT_FUNC_SUSPEND) { + for (i = 0; i < ai->count; i++) + disable_irq(gpio_to_irq(ai->gpio[i])); + return 0; + } + if (func == GPIO_EVENT_FUNC_RESUME) { + for (i = 0; i < ai->count; i++) + enable_irq(gpio_to_irq(ai->gpio[i])); + return 0; + } + + if (func == GPIO_EVENT_FUNC_INIT) { + *data = as = kmalloc(sizeof(*as), GFP_KERNEL); + if (as == NULL) { + ret = -ENOMEM; + goto err_alloc_axis_state_failed; + } + as->input_devs = input_devs; + as->info = ai; + if (ai->dev >= input_devs->count) { + pr_err("gpio_event_axis: bad device index %d >= %d " + "for %d:%d\n", ai->dev, input_devs->count, + ai->type, ai->code); + ret = -EINVAL; + goto err_bad_device_index; + } + + input_set_capability(input_devs->dev[ai->dev], + ai->type, ai->code); + if (ai->type == EV_ABS) { + input_set_abs_params(input_devs->dev[ai->dev], ai->code, + 0, ai->decoded_size - 1, 0, 0); + } + for (i = 0; i < ai->count; i++) { + ret = gpio_request(ai->gpio[i], "gpio_event_axis"); + if (ret < 0) + goto err_request_gpio_failed; + ret = gpio_direction_input(ai->gpio[i]); + if (ret < 0) + goto err_gpio_direction_input_failed; + ret = irq = gpio_to_irq(ai->gpio[i]); + if (ret < 0) + goto err_get_irq_num_failed; + ret = request_irq(irq, gpio_axis_irq_handler, + IRQF_TRIGGER_RISING | + IRQF_TRIGGER_FALLING, + "gpio_event_axis", as); + if (ret < 0) + goto err_request_irq_failed; + } + gpio_event_update_axis(as, 0); + return 0; + } + + ret = 0; + as = *data; + for (i = ai->count - 1; i >= 0; i--) { + free_irq(gpio_to_irq(ai->gpio[i]), as); +err_request_irq_failed: +err_get_irq_num_failed: +err_gpio_direction_input_failed: + gpio_free(ai->gpio[i]); +err_request_gpio_failed: + ; + } +err_bad_device_index: + kfree(as); + *data = NULL; +err_alloc_axis_state_failed: + return ret; +} diff --git a/drivers/input/misc/gpio_event.c b/drivers/input/misc/gpio_event.c new file mode 100644 index 000000000000..a98be67d1ab0 --- /dev/null +++ b/drivers/input/misc/gpio_event.c @@ -0,0 +1,260 @@ +/* drivers/input/misc/gpio_event.c + * + * Copyright (C) 2007 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/earlysuspend.h> +#include <linux/module.h> +#include <linux/input.h> +#include <linux/gpio_event.h> +#include <linux/hrtimer.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +struct gpio_event { + struct gpio_event_input_devs *input_devs; + const struct gpio_event_platform_data *info; + struct early_suspend early_suspend; + void *state[0]; +}; + +static int gpio_input_event( + struct input_dev *dev, unsigned int type, unsigned int code, int value) +{ + int i; + int devnr; + int ret = 0; + int tmp_ret; + struct gpio_event_info **ii; + struct gpio_event *ip = input_get_drvdata(dev); + + for (devnr = 0; devnr < ip->input_devs->count; devnr++) + if (ip->input_devs->dev[devnr] == dev) + break; + if (devnr == ip->input_devs->count) { + pr_err("gpio_input_event: unknown device %p\n", dev); + return -EIO; + } + + for (i = 0, ii = ip->info->info; i < ip->info->info_count; i++, ii++) { + if ((*ii)->event) { + tmp_ret = (*ii)->event(ip->input_devs, *ii, + &ip->state[i], + devnr, type, code, value); + if (tmp_ret) + ret = tmp_ret; + } + } + return ret; +} + +static int gpio_event_call_all_func(struct gpio_event *ip, int func) +{ + int i; + int ret; + struct gpio_event_info **ii; + + if (func == GPIO_EVENT_FUNC_INIT || func == GPIO_EVENT_FUNC_RESUME) { + ii = ip->info->info; + for (i = 0; i < ip->info->info_count; i++, ii++) { + if ((*ii)->func == NULL) { + ret = -ENODEV; + pr_err("gpio_event_probe: Incomplete pdata, " + "no function\n"); + goto err_no_func; + } + if (func == GPIO_EVENT_FUNC_RESUME && (*ii)->no_suspend) + continue; + ret = (*ii)->func(ip->input_devs, *ii, &ip->state[i], + func); + if (ret) { + pr_err("gpio_event_probe: function failed\n"); + goto err_func_failed; + } + } + return 0; + } + + ret = 0; + i = ip->info->info_count; + ii = ip->info->info + i; + while (i > 0) { + i--; + ii--; + if ((func & ~1) == GPIO_EVENT_FUNC_SUSPEND && (*ii)->no_suspend) + continue; + (*ii)->func(ip->input_devs, *ii, &ip->state[i], func & ~1); +err_func_failed: +err_no_func: + ; + } + return ret; +} + +#ifdef CONFIG_HAS_EARLYSUSPEND +void gpio_event_suspend(struct early_suspend *h) +{ + struct gpio_event *ip; + ip = container_of(h, struct gpio_event, early_suspend); + gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_SUSPEND); + ip->info->power(ip->info, 0); +} + +void gpio_event_resume(struct early_suspend *h) +{ + struct gpio_event *ip; + ip = container_of(h, struct gpio_event, early_suspend); + ip->info->power(ip->info, 1); + gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_RESUME); +} +#endif + +static int gpio_event_probe(struct platform_device *pdev) +{ + int err; + struct gpio_event *ip; + struct gpio_event_platform_data *event_info; + int dev_count = 1; + int i; + int registered = 0; + + event_info = pdev->dev.platform_data; + if (event_info == NULL) { + pr_err("gpio_event_probe: No pdata\n"); + return -ENODEV; + } + if ((!event_info->name && !event_info->names[0]) || + !event_info->info || !event_info->info_count) { + pr_err("gpio_event_probe: Incomplete pdata\n"); + return -ENODEV; + } + if (!event_info->name) + while (event_info->names[dev_count]) + dev_count++; + ip = kzalloc(sizeof(*ip) + + sizeof(ip->state[0]) * event_info->info_count + + sizeof(*ip->input_devs) + + sizeof(ip->input_devs->dev[0]) * dev_count, GFP_KERNEL); + if (ip == NULL) { + err = -ENOMEM; + pr_err("gpio_event_probe: Failed to allocate private data\n"); + goto err_kp_alloc_failed; + } + ip->input_devs = (void*)&ip->state[event_info->info_count]; + platform_set_drvdata(pdev, ip); + + for (i = 0; i < dev_count; i++) { + struct input_dev *input_dev = input_allocate_device(); + if (input_dev == NULL) { + err = -ENOMEM; + pr_err("gpio_event_probe: " + "Failed to allocate input device\n"); + goto err_input_dev_alloc_failed; + } + input_set_drvdata(input_dev, ip); + input_dev->name = event_info->name ? + event_info->name : event_info->names[i]; + input_dev->event = gpio_input_event; + ip->input_devs->dev[i] = input_dev; + } + ip->input_devs->count = dev_count; + ip->info = event_info; + if (event_info->power) { +#ifdef CONFIG_HAS_EARLYSUSPEND + ip->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1; + ip->early_suspend.suspend = gpio_event_suspend; + ip->early_suspend.resume = gpio_event_resume; + register_early_suspend(&ip->early_suspend); +#endif + ip->info->power(ip->info, 1); + } + + err = gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_INIT); + if (err) + goto err_call_all_func_failed; + + for (i = 0; i < dev_count; i++) { + err = input_register_device(ip->input_devs->dev[i]); + if (err) { + pr_err("gpio_event_probe: Unable to register %s " + "input device\n", ip->input_devs->dev[i]->name); + goto err_input_register_device_failed; + } + registered++; + } + + return 0; + +err_input_register_device_failed: + gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_UNINIT); +err_call_all_func_failed: + if (event_info->power) { +#ifdef CONFIG_HAS_EARLYSUSPEND + unregister_early_suspend(&ip->early_suspend); +#endif + ip->info->power(ip->info, 0); + } + for (i = 0; i < registered; i++) + input_unregister_device(ip->input_devs->dev[i]); + for (i = dev_count - 1; i >= registered; i--) { + input_free_device(ip->input_devs->dev[i]); +err_input_dev_alloc_failed: + ; + } + kfree(ip); +err_kp_alloc_failed: + return err; +} + +static int gpio_event_remove(struct platform_device *pdev) +{ + struct gpio_event *ip = platform_get_drvdata(pdev); + int i; + + gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_UNINIT); + if (ip->info->power) { +#ifdef CONFIG_HAS_EARLYSUSPEND + unregister_early_suspend(&ip->early_suspend); +#endif + ip->info->power(ip->info, 0); + } + for (i = 0; i < ip->input_devs->count; i++) + input_unregister_device(ip->input_devs->dev[i]); + kfree(ip); + return 0; +} + +static struct platform_driver gpio_event_driver = { + .probe = gpio_event_probe, + .remove = gpio_event_remove, + .driver = { + .name = GPIO_EVENT_DEV_NAME, + }, +}; + +static int __devinit gpio_event_init(void) +{ + return platform_driver_register(&gpio_event_driver); +} + +static void __exit gpio_event_exit(void) +{ + platform_driver_unregister(&gpio_event_driver); +} + +module_init(gpio_event_init); +module_exit(gpio_event_exit); + +MODULE_DESCRIPTION("GPIO Event Driver"); +MODULE_LICENSE("GPL"); + diff --git a/drivers/input/misc/gpio_input.c b/drivers/input/misc/gpio_input.c new file mode 100644 index 000000000000..758df480600b --- /dev/null +++ b/drivers/input/misc/gpio_input.c @@ -0,0 +1,353 @@ +/* drivers/input/misc/gpio_input.c + * + * Copyright (C) 2007 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/kernel.h> +#include <linux/gpio.h> +#include <linux/gpio_event.h> +#include <linux/hrtimer.h> +#include <linux/input.h> +#include <linux/interrupt.h> +#include <linux/slab.h> +#include <linux/wakelock.h> + +enum { + DEBOUNCE_UNSTABLE = BIT(0), /* Got irq, while debouncing */ + DEBOUNCE_PRESSED = BIT(1), + DEBOUNCE_NOTPRESSED = BIT(2), + DEBOUNCE_WAIT_IRQ = BIT(3), /* Stable irq state */ + DEBOUNCE_POLL = BIT(4), /* Stable polling state */ + + DEBOUNCE_UNKNOWN = + DEBOUNCE_PRESSED | DEBOUNCE_NOTPRESSED, +}; + +struct gpio_key_state { + struct gpio_input_state *ds; + uint8_t debounce; +}; + +struct gpio_input_state { + struct gpio_event_input_devs *input_devs; + const struct gpio_event_input_info *info; + struct hrtimer timer; + int use_irq; + int debounce_count; + spinlock_t irq_lock; + struct wake_lock wake_lock; + struct gpio_key_state key_state[0]; +}; + +static enum hrtimer_restart gpio_event_input_timer_func(struct hrtimer *timer) +{ + int i; + int pressed; + struct gpio_input_state *ds = + container_of(timer, struct gpio_input_state, timer); + unsigned gpio_flags = ds->info->flags; + unsigned npolarity; + int nkeys = ds->info->keymap_size; + const struct gpio_event_direct_entry *key_entry; + struct gpio_key_state *key_state; + unsigned long irqflags; + uint8_t debounce; + +#if 0 + key_entry = kp->keys_info->keymap; + key_state = kp->key_state; + for (i = 0; i < nkeys; i++, key_entry++, key_state++) + pr_info("gpio_read_detect_status %d %d\n", key_entry->gpio, + gpio_read_detect_status(key_entry->gpio)); +#endif + key_entry = ds->info->keymap; + key_state = ds->key_state; + spin_lock_irqsave(&ds->irq_lock, irqflags); + for (i = 0; i < nkeys; i++, key_entry++, key_state++) { + debounce = key_state->debounce; + if (debounce & DEBOUNCE_WAIT_IRQ) + continue; + if (key_state->debounce & DEBOUNCE_UNSTABLE) { + debounce = key_state->debounce = DEBOUNCE_UNKNOWN; + enable_irq(gpio_to_irq(key_entry->gpio)); + pr_info("gpio_keys_scan_keys: key %x-%x, %d " + "(%d) continue debounce\n", + ds->info->type, key_entry->code, + i, key_entry->gpio); + } + npolarity = !(gpio_flags & GPIOEDF_ACTIVE_HIGH); + pressed = gpio_get_value(key_entry->gpio) ^ npolarity; + if (debounce & DEBOUNCE_POLL) { + if (pressed == !(debounce & DEBOUNCE_PRESSED)) { + ds->debounce_count++; + key_state->debounce = DEBOUNCE_UNKNOWN; + if (gpio_flags & GPIOEDF_PRINT_KEY_DEBOUNCE) + pr_info("gpio_keys_scan_keys: key %x-" + "%x, %d (%d) start debounce\n", + ds->info->type, key_entry->code, + i, key_entry->gpio); + } + continue; + } + if (pressed && (debounce & DEBOUNCE_NOTPRESSED)) { + if (gpio_flags & GPIOEDF_PRINT_KEY_DEBOUNCE) + pr_info("gpio_keys_scan_keys: key %x-%x, %d " + "(%d) debounce pressed 1\n", + ds->info->type, key_entry->code, + i, key_entry->gpio); + key_state->debounce = DEBOUNCE_PRESSED; + continue; + } + if (!pressed && (debounce & DEBOUNCE_PRESSED)) { + if (gpio_flags & GPIOEDF_PRINT_KEY_DEBOUNCE) + pr_info("gpio_keys_scan_keys: key %x-%x, %d " + "(%d) debounce pressed 0\n", + ds->info->type, key_entry->code, + i, key_entry->gpio); + key_state->debounce = DEBOUNCE_NOTPRESSED; + continue; + } + /* key is stable */ + ds->debounce_count--; + if (ds->use_irq) + key_state->debounce |= DEBOUNCE_WAIT_IRQ; + else + key_state->debounce |= DEBOUNCE_POLL; + if (gpio_flags & GPIOEDF_PRINT_KEYS) + pr_info("gpio_keys_scan_keys: key %x-%x, %d (%d) " + "changed to %d\n", ds->info->type, + key_entry->code, i, key_entry->gpio, pressed); + input_event(ds->input_devs->dev[key_entry->dev], ds->info->type, + key_entry->code, pressed); + } + +#if 0 + key_entry = kp->keys_info->keymap; + key_state = kp->key_state; + for (i = 0; i < nkeys; i++, key_entry++, key_state++) { + pr_info("gpio_read_detect_status %d %d\n", key_entry->gpio, + gpio_read_detect_status(key_entry->gpio)); + } +#endif + + if (ds->debounce_count) + hrtimer_start(timer, ds->info->debounce_time, HRTIMER_MODE_REL); + else if (!ds->use_irq) + hrtimer_start(timer, ds->info->poll_time, HRTIMER_MODE_REL); + else + wake_unlock(&ds->wake_lock); + + spin_unlock_irqrestore(&ds->irq_lock, irqflags); + + return HRTIMER_NORESTART; +} + +static irqreturn_t gpio_event_input_irq_handler(int irq, void *dev_id) +{ + struct gpio_key_state *ks = dev_id; + struct gpio_input_state *ds = ks->ds; + int keymap_index = ks - ds->key_state; + const struct gpio_event_direct_entry *key_entry; + unsigned long irqflags; + int pressed; + + if (!ds->use_irq) + return IRQ_HANDLED; + + key_entry = &ds->info->keymap[keymap_index]; + + if (ds->info->debounce_time.tv64) { + spin_lock_irqsave(&ds->irq_lock, irqflags); + if (ks->debounce & DEBOUNCE_WAIT_IRQ) { + ks->debounce = DEBOUNCE_UNKNOWN; + if (ds->debounce_count++ == 0) { + wake_lock(&ds->wake_lock); + hrtimer_start( + &ds->timer, ds->info->debounce_time, + HRTIMER_MODE_REL); + } + if (ds->info->flags & GPIOEDF_PRINT_KEY_DEBOUNCE) + pr_info("gpio_event_input_irq_handler: " + "key %x-%x, %d (%d) start debounce\n", + ds->info->type, key_entry->code, + keymap_index, key_entry->gpio); + } else { + disable_irq_nosync(irq); + ks->debounce = DEBOUNCE_UNSTABLE; + } + spin_unlock_irqrestore(&ds->irq_lock, irqflags); + } else { + pressed = gpio_get_value(key_entry->gpio) ^ + !(ds->info->flags & GPIOEDF_ACTIVE_HIGH); + if (ds->info->flags & GPIOEDF_PRINT_KEYS) + pr_info("gpio_event_input_irq_handler: key %x-%x, %d " + "(%d) changed to %d\n", + ds->info->type, key_entry->code, keymap_index, + key_entry->gpio, pressed); + input_event(ds->input_devs->dev[key_entry->dev], ds->info->type, + key_entry->code, pressed); + } + return IRQ_HANDLED; +} + +static int gpio_event_input_request_irqs(struct gpio_input_state *ds) +{ + int i; + int err; + unsigned int irq; + unsigned long req_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING; + + for (i = 0; i < ds->info->keymap_size; i++) { + err = irq = gpio_to_irq(ds->info->keymap[i].gpio); + if (err < 0) + goto err_gpio_get_irq_num_failed; + err = request_irq(irq, gpio_event_input_irq_handler, + req_flags, "gpio_keys", &ds->key_state[i]); + if (err) { + pr_err("gpio_event_input_request_irqs: request_irq " + "failed for input %d, irq %d\n", + ds->info->keymap[i].gpio, irq); + goto err_request_irq_failed; + } + enable_irq_wake(irq); + } + return 0; + + for (i = ds->info->keymap_size - 1; i >= 0; i--) { + free_irq(gpio_to_irq(ds->info->keymap[i].gpio), + &ds->key_state[i]); +err_request_irq_failed: +err_gpio_get_irq_num_failed: + ; + } + return err; +} + +int gpio_event_input_func(struct gpio_event_input_devs *input_devs, + struct gpio_event_info *info, void **data, int func) +{ + int ret; + int i; + unsigned long irqflags; + struct gpio_event_input_info *di; + struct gpio_input_state *ds = *data; + + di = container_of(info, struct gpio_event_input_info, info); + + if (func == GPIO_EVENT_FUNC_SUSPEND) { + if (ds->use_irq) + for (i = 0; i < di->keymap_size; i++) + disable_irq(gpio_to_irq(di->keymap[i].gpio)); + hrtimer_cancel(&ds->timer); + return 0; + } + if (func == GPIO_EVENT_FUNC_RESUME) { + spin_lock_irqsave(&ds->irq_lock, irqflags); + if (ds->use_irq) + for (i = 0; i < di->keymap_size; i++) + enable_irq(gpio_to_irq(di->keymap[i].gpio)); + hrtimer_start(&ds->timer, ktime_set(0, 0), HRTIMER_MODE_REL); + spin_unlock_irqrestore(&ds->irq_lock, irqflags); + return 0; + } + + if (func == GPIO_EVENT_FUNC_INIT) { + if (ktime_to_ns(di->poll_time) <= 0) + di->poll_time = ktime_set(0, 20 * NSEC_PER_MSEC); + + *data = ds = kzalloc(sizeof(*ds) + sizeof(ds->key_state[0]) * + di->keymap_size, GFP_KERNEL); + if (ds == NULL) { + ret = -ENOMEM; + pr_err("gpio_event_input_func: " + "Failed to allocate private data\n"); + goto err_ds_alloc_failed; + } + ds->debounce_count = di->keymap_size; + ds->input_devs = input_devs; + ds->info = di; + wake_lock_init(&ds->wake_lock, WAKE_LOCK_SUSPEND, "gpio_input"); + spin_lock_init(&ds->irq_lock); + + for (i = 0; i < di->keymap_size; i++) { + int dev = di->keymap[i].dev; + if (dev >= input_devs->count) { + pr_err("gpio_event_input_func: bad device " + "index %d >= %d for key code %d\n", + dev, input_devs->count, + di->keymap[i].code); + ret = -EINVAL; + goto err_bad_keymap; + } + input_set_capability(input_devs->dev[dev], di->type, + di->keymap[i].code); + ds->key_state[i].ds = ds; + ds->key_state[i].debounce = DEBOUNCE_UNKNOWN; + } + + for (i = 0; i < di->keymap_size; i++) { + ret = gpio_request(di->keymap[i].gpio, "gpio_kp_in"); + if (ret) { + pr_err("gpio_event_input_func: gpio_request " + "failed for %d\n", di->keymap[i].gpio); + goto err_gpio_request_failed; + } + ret = gpio_direction_input(di->keymap[i].gpio); + if (ret) { + pr_err("gpio_event_input_func: " + "gpio_direction_input failed for %d\n", + di->keymap[i].gpio); + goto err_gpio_configure_failed; + } + } + + ret = gpio_event_input_request_irqs(ds); + + spin_lock_irqsave(&ds->irq_lock, irqflags); + ds->use_irq = ret == 0; + + pr_info("GPIO Input Driver: Start gpio inputs for %s%s in %s " + "mode\n", input_devs->dev[0]->name, + (input_devs->count > 1) ? "..." : "", + ret == 0 ? "interrupt" : "polling"); + + hrtimer_init(&ds->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + ds->timer.function = gpio_event_input_timer_func; + hrtimer_start(&ds->timer, ktime_set(0, 0), HRTIMER_MODE_REL); + spin_unlock_irqrestore(&ds->irq_lock, irqflags); + return 0; + } + + ret = 0; + spin_lock_irqsave(&ds->irq_lock, irqflags); + hrtimer_cancel(&ds->timer); + if (ds->use_irq) { + for (i = di->keymap_size - 1; i >= 0; i--) { + free_irq(gpio_to_irq(di->keymap[i].gpio), + &ds->key_state[i]); + } + } + spin_unlock_irqrestore(&ds->irq_lock, irqflags); + + for (i = di->keymap_size - 1; i >= 0; i--) { +err_gpio_configure_failed: + gpio_free(di->keymap[i].gpio); +err_gpio_request_failed: + ; + } +err_bad_keymap: + wake_lock_destroy(&ds->wake_lock); + kfree(ds); +err_ds_alloc_failed: + return ret; +} diff --git a/drivers/input/misc/gpio_matrix.c b/drivers/input/misc/gpio_matrix.c new file mode 100644 index 000000000000..227eb8fe3c09 --- /dev/null +++ b/drivers/input/misc/gpio_matrix.c @@ -0,0 +1,432 @@ +/* drivers/input/misc/gpio_matrix.c + * + * Copyright (C) 2007 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/kernel.h> +#include <linux/gpio.h> +#include <linux/gpio_event.h> +#include <linux/hrtimer.h> +#include <linux/interrupt.h> +#include <linux/slab.h> +#include <linux/wakelock.h> + +struct gpio_kp { + struct gpio_event_input_devs *input_devs; + struct gpio_event_matrix_info *keypad_info; + struct hrtimer timer; + struct wake_lock wake_lock; + int current_output; + unsigned int use_irq:1; + unsigned int key_state_changed:1; + unsigned int last_key_state_changed:1; + unsigned int some_keys_pressed:2; + unsigned int disabled_irq:1; + unsigned long keys_pressed[0]; +}; + +static void clear_phantom_key(struct gpio_kp *kp, int out, int in) +{ + struct gpio_event_matrix_info *mi = kp->keypad_info; + int key_index = out * mi->ninputs + in; + unsigned short keyentry = mi->keymap[key_index]; + unsigned short keycode = keyentry & MATRIX_KEY_MASK; + unsigned short dev = keyentry >> MATRIX_CODE_BITS; + + if (!test_bit(keycode, kp->input_devs->dev[dev]->key)) { + if (mi->flags & GPIOKPF_PRINT_PHANTOM_KEYS) + pr_info("gpiomatrix: phantom key %x, %d-%d (%d-%d) " + "cleared\n", keycode, out, in, + mi->output_gpios[out], mi->input_gpios[in]); + __clear_bit(key_index, kp->keys_pressed); + } else { + if (mi->flags & GPIOKPF_PRINT_PHANTOM_KEYS) + pr_info("gpiomatrix: phantom key %x, %d-%d (%d-%d) " + "not cleared\n", keycode, out, in, + mi->output_gpios[out], mi->input_gpios[in]); + } +} + +static int restore_keys_for_input(struct gpio_kp *kp, int out, int in) +{ + int rv = 0; + int key_index; + + key_index = out * kp->keypad_info->ninputs + in; + while (out < kp->keypad_info->noutputs) { + if (test_bit(key_index, kp->keys_pressed)) { + rv = 1; + clear_phantom_key(kp, out, in); + } + key_index += kp->keypad_info->ninputs; + out++; + } + return rv; +} + +static void remove_phantom_keys(struct gpio_kp *kp) +{ + int out, in, inp; + int key_index; + + if (kp->some_keys_pressed < 3) + return; + + for (out = 0; out < kp->keypad_info->noutputs; out++) { + inp = -1; + key_index = out * kp->keypad_info->ninputs; + for (in = 0; in < kp->keypad_info->ninputs; in++, key_index++) { + if (test_bit(key_index, kp->keys_pressed)) { + if (inp == -1) { + inp = in; + continue; + } + if (inp >= 0) { + if (!restore_keys_for_input(kp, out + 1, + inp)) + break; + clear_phantom_key(kp, out, inp); + inp = -2; + } + restore_keys_for_input(kp, out, in); + } + } + } +} + +static void report_key(struct gpio_kp *kp, int key_index, int out, int in) +{ + struct gpio_event_matrix_info *mi = kp->keypad_info; + int pressed = test_bit(key_index, kp->keys_pressed); + unsigned short keyentry = mi->keymap[key_index]; + unsigned short keycode = keyentry & MATRIX_KEY_MASK; + unsigned short dev = keyentry >> MATRIX_CODE_BITS; + + if (pressed != test_bit(keycode, kp->input_devs->dev[dev]->key)) { + if (keycode == KEY_RESERVED) { + if (mi->flags & GPIOKPF_PRINT_UNMAPPED_KEYS) + pr_info("gpiomatrix: unmapped key, %d-%d " + "(%d-%d) changed to %d\n", + out, in, mi->output_gpios[out], + mi->input_gpios[in], pressed); + } else { + if (mi->flags & GPIOKPF_PRINT_MAPPED_KEYS) + pr_info("gpiomatrix: key %x, %d-%d (%d-%d) " + "changed to %d\n", keycode, + out, in, mi->output_gpios[out], + mi->input_gpios[in], pressed); + input_report_key(kp->input_devs->dev[dev], keycode, pressed); + } + } +} + +static enum hrtimer_restart gpio_keypad_timer_func(struct hrtimer *timer) +{ + int out, in; + int key_index; + int gpio; + struct gpio_kp *kp = container_of(timer, struct gpio_kp, timer); + struct gpio_event_matrix_info *mi = kp->keypad_info; + unsigned gpio_keypad_flags = mi->flags; + unsigned polarity = !!(gpio_keypad_flags & GPIOKPF_ACTIVE_HIGH); + + out = kp->current_output; + if (out == mi->noutputs) { + out = 0; + kp->last_key_state_changed = kp->key_state_changed; + kp->key_state_changed = 0; + kp->some_keys_pressed = 0; + } else { + key_index = out * mi->ninputs; + for (in = 0; in < mi->ninputs; in++, key_index++) { + gpio = mi->input_gpios[in]; + if (gpio_get_value(gpio) ^ !polarity) { + if (kp->some_keys_pressed < 3) + kp->some_keys_pressed++; + kp->key_state_changed |= !__test_and_set_bit( + key_index, kp->keys_pressed); + } else + kp->key_state_changed |= __test_and_clear_bit( + key_index, kp->keys_pressed); + } + gpio = mi->output_gpios[out]; + if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE) + gpio_set_value(gpio, !polarity); + else + gpio_direction_input(gpio); + out++; + } + kp->current_output = out; + if (out < mi->noutputs) { + gpio = mi->output_gpios[out]; + if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE) + gpio_set_value(gpio, polarity); + else + gpio_direction_output(gpio, polarity); + hrtimer_start(timer, mi->settle_time, HRTIMER_MODE_REL); + return HRTIMER_NORESTART; + } + if (gpio_keypad_flags & GPIOKPF_DEBOUNCE) { + if (kp->key_state_changed) { + hrtimer_start(&kp->timer, mi->debounce_delay, + HRTIMER_MODE_REL); + return HRTIMER_NORESTART; + } + kp->key_state_changed = kp->last_key_state_changed; + } + if (kp->key_state_changed) { + if (gpio_keypad_flags & GPIOKPF_REMOVE_SOME_PHANTOM_KEYS) + remove_phantom_keys(kp); + key_index = 0; + for (out = 0; out < mi->noutputs; out++) + for (in = 0; in < mi->ninputs; in++, key_index++) + report_key(kp, key_index, out, in); + } + if (!kp->use_irq || kp->some_keys_pressed) { + hrtimer_start(timer, mi->poll_time, HRTIMER_MODE_REL); + return HRTIMER_NORESTART; + } + + /* No keys are pressed, reenable interrupt */ + for (out = 0; out < mi->noutputs; out++) { + if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE) + gpio_set_value(mi->output_gpios[out], polarity); + else + gpio_direction_output(mi->output_gpios[out], polarity); + } + for (in = 0; in < mi->ninputs; in++) + enable_irq(gpio_to_irq(mi->input_gpios[in])); + wake_unlock(&kp->wake_lock); + return HRTIMER_NORESTART; +} + +static irqreturn_t gpio_keypad_irq_handler(int irq_in, void *dev_id) +{ + int i; + struct gpio_kp *kp = dev_id; + struct gpio_event_matrix_info *mi = kp->keypad_info; + unsigned gpio_keypad_flags = mi->flags; + + if (!kp->use_irq) { + /* ignore interrupt while registering the handler */ + kp->disabled_irq = 1; + disable_irq_nosync(irq_in); + return IRQ_HANDLED; + } + + for (i = 0; i < mi->ninputs; i++) + disable_irq_nosync(gpio_to_irq(mi->input_gpios[i])); + for (i = 0; i < mi->noutputs; i++) { + if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE) + gpio_set_value(mi->output_gpios[i], + !(gpio_keypad_flags & GPIOKPF_ACTIVE_HIGH)); + else + gpio_direction_input(mi->output_gpios[i]); + } + wake_lock(&kp->wake_lock); + hrtimer_start(&kp->timer, ktime_set(0, 0), HRTIMER_MODE_REL); + return IRQ_HANDLED; +} + +static int gpio_keypad_request_irqs(struct gpio_kp *kp) +{ + int i; + int err; + unsigned int irq; + unsigned long request_flags; + struct gpio_event_matrix_info *mi = kp->keypad_info; + + switch (mi->flags & (GPIOKPF_ACTIVE_HIGH|GPIOKPF_LEVEL_TRIGGERED_IRQ)) { + default: + request_flags = IRQF_TRIGGER_FALLING; + break; + case GPIOKPF_ACTIVE_HIGH: + request_flags = IRQF_TRIGGER_RISING; + break; + case GPIOKPF_LEVEL_TRIGGERED_IRQ: + request_flags = IRQF_TRIGGER_LOW; + break; + case GPIOKPF_LEVEL_TRIGGERED_IRQ | GPIOKPF_ACTIVE_HIGH: + request_flags = IRQF_TRIGGER_HIGH; + break; + } + + for (i = 0; i < mi->ninputs; i++) { + err = irq = gpio_to_irq(mi->input_gpios[i]); + if (err < 0) + goto err_gpio_get_irq_num_failed; + err = request_irq(irq, gpio_keypad_irq_handler, request_flags, + "gpio_kp", kp); + if (err) { + pr_err("gpiomatrix: request_irq failed for input %d, " + "irq %d\n", mi->input_gpios[i], irq); + goto err_request_irq_failed; + } + err = set_irq_wake(irq, 1); + if (err) { + pr_err("gpiomatrix: set_irq_wake failed for input %d, " + "irq %d\n", mi->input_gpios[i], irq); + } + disable_irq(irq); + if (kp->disabled_irq) { + kp->disabled_irq = 0; + enable_irq(irq); + } + } + return 0; + + for (i = mi->noutputs - 1; i >= 0; i--) { + free_irq(gpio_to_irq(mi->input_gpios[i]), kp); +err_request_irq_failed: +err_gpio_get_irq_num_failed: + ; + } + return err; +} + +int gpio_event_matrix_func(struct gpio_event_input_devs *input_devs, + struct gpio_event_info *info, void **data, int func) +{ + int i; + int err; + int key_count; + struct gpio_kp *kp; + struct gpio_event_matrix_info *mi; + + mi = container_of(info, struct gpio_event_matrix_info, info); + if (func == GPIO_EVENT_FUNC_SUSPEND || func == GPIO_EVENT_FUNC_RESUME) { + /* TODO: disable scanning */ + return 0; + } + + if (func == GPIO_EVENT_FUNC_INIT) { + if (mi->keymap == NULL || + mi->input_gpios == NULL || + mi->output_gpios == NULL) { + err = -ENODEV; + pr_err("gpiomatrix: Incomplete pdata\n"); + goto err_invalid_platform_data; + } + key_count = mi->ninputs * mi->noutputs; + + *data = kp = kzalloc(sizeof(*kp) + sizeof(kp->keys_pressed[0]) * + BITS_TO_LONGS(key_count), GFP_KERNEL); + if (kp == NULL) { + err = -ENOMEM; + pr_err("gpiomatrix: Failed to allocate private data\n"); + goto err_kp_alloc_failed; + } + kp->input_devs = input_devs; + kp->keypad_info = mi; + for (i = 0; i < key_count; i++) { + unsigned short keyentry = mi->keymap[i]; + unsigned short keycode = keyentry & MATRIX_KEY_MASK; + unsigned short dev = keyentry >> MATRIX_CODE_BITS; + if (dev >= input_devs->count) { + pr_err("gpiomatrix: bad device index %d >= " + "%d for key code %d\n", + dev, input_devs->count, keycode); + err = -EINVAL; + goto err_bad_keymap; + } + if (keycode && keycode <= KEY_MAX) + input_set_capability(input_devs->dev[dev], + EV_KEY, keycode); + } + + for (i = 0; i < mi->noutputs; i++) { + err = gpio_request(mi->output_gpios[i], "gpio_kp_out"); + if (err) { + pr_err("gpiomatrix: gpio_request failed for " + "output %d\n", mi->output_gpios[i]); + goto err_request_output_gpio_failed; + } + if (gpio_cansleep(mi->output_gpios[i])) { + pr_err("gpiomatrix: unsupported output gpio %d," + " can sleep\n", mi->output_gpios[i]); + err = -EINVAL; + goto err_output_gpio_configure_failed; + } + if (mi->flags & GPIOKPF_DRIVE_INACTIVE) + err = gpio_direction_output(mi->output_gpios[i], + !(mi->flags & GPIOKPF_ACTIVE_HIGH)); + else + err = gpio_direction_input(mi->output_gpios[i]); + if (err) { + pr_err("gpiomatrix: gpio_configure failed for " + "output %d\n", mi->output_gpios[i]); + goto err_output_gpio_configure_failed; + } + } + for (i = 0; i < mi->ninputs; i++) { + err = gpio_request(mi->input_gpios[i], "gpio_kp_in"); + if (err) { + pr_err("gpiomatrix: gpio_request failed for " + "input %d\n", mi->input_gpios[i]); + goto err_request_input_gpio_failed; + } + err = gpio_direction_input(mi->input_gpios[i]); + if (err) { + pr_err("gpiomatrix: gpio_direction_input failed" + " for input %d\n", mi->input_gpios[i]); + goto err_gpio_direction_input_failed; + } + } + kp->current_output = mi->noutputs; + kp->key_state_changed = 1; + + hrtimer_init(&kp->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + kp->timer.function = gpio_keypad_timer_func; + wake_lock_init(&kp->wake_lock, WAKE_LOCK_SUSPEND, "gpio_kp"); + err = gpio_keypad_request_irqs(kp); + kp->use_irq = err == 0; + + pr_info("GPIO Matrix Keypad Driver: Start keypad matrix for " + "%s%s in %s mode\n", input_devs->dev[0]->name, + (input_devs->count > 1) ? "..." : "", + kp->use_irq ? "interrupt" : "polling"); + + if (kp->use_irq) + wake_lock(&kp->wake_lock); + hrtimer_start(&kp->timer, ktime_set(0, 0), HRTIMER_MODE_REL); + + return 0; + } + + err = 0; + kp = *data; + + if (kp->use_irq) + for (i = mi->noutputs - 1; i >= 0; i--) + free_irq(gpio_to_irq(mi->input_gpios[i]), kp); + + hrtimer_cancel(&kp->timer); + wake_lock_destroy(&kp->wake_lock); + for (i = mi->noutputs - 1; i >= 0; i--) { +err_gpio_direction_input_failed: + gpio_free(mi->input_gpios[i]); +err_request_input_gpio_failed: + ; + } + for (i = mi->noutputs - 1; i >= 0; i--) { +err_output_gpio_configure_failed: + gpio_free(mi->output_gpios[i]); +err_request_output_gpio_failed: + ; + } +err_bad_keymap: + kfree(kp); +err_kp_alloc_failed: +err_invalid_platform_data: + return err; +} diff --git a/drivers/input/misc/gpio_output.c b/drivers/input/misc/gpio_output.c new file mode 100644 index 000000000000..2aac2fad0a17 --- /dev/null +++ b/drivers/input/misc/gpio_output.c @@ -0,0 +1,97 @@ +/* drivers/input/misc/gpio_output.c + * + * Copyright (C) 2007 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/kernel.h> +#include <linux/gpio.h> +#include <linux/gpio_event.h> + +int gpio_event_output_event( + struct gpio_event_input_devs *input_devs, struct gpio_event_info *info, + void **data, unsigned int dev, unsigned int type, + unsigned int code, int value) +{ + int i; + struct gpio_event_output_info *oi; + oi = container_of(info, struct gpio_event_output_info, info); + if (type != oi->type) + return 0; + if (!(oi->flags & GPIOEDF_ACTIVE_HIGH)) + value = !value; + for (i = 0; i < oi->keymap_size; i++) + if (dev == oi->keymap[i].dev && code == oi->keymap[i].code) + gpio_set_value(oi->keymap[i].gpio, value); + return 0; +} + +int gpio_event_output_func( + struct gpio_event_input_devs *input_devs, struct gpio_event_info *info, + void **data, int func) +{ + int ret; + int i; + struct gpio_event_output_info *oi; + oi = container_of(info, struct gpio_event_output_info, info); + + if (func == GPIO_EVENT_FUNC_SUSPEND || func == GPIO_EVENT_FUNC_RESUME) + return 0; + + if (func == GPIO_EVENT_FUNC_INIT) { + int output_level = !(oi->flags & GPIOEDF_ACTIVE_HIGH); + + for (i = 0; i < oi->keymap_size; i++) { + int dev = oi->keymap[i].dev; + if (dev >= input_devs->count) { + pr_err("gpio_event_output_func: bad device " + "index %d >= %d for key code %d\n", + dev, input_devs->count, + oi->keymap[i].code); + ret = -EINVAL; + goto err_bad_keymap; + } + input_set_capability(input_devs->dev[dev], oi->type, + oi->keymap[i].code); + } + + for (i = 0; i < oi->keymap_size; i++) { + ret = gpio_request(oi->keymap[i].gpio, + "gpio_event_output"); + if (ret) { + pr_err("gpio_event_output_func: gpio_request " + "failed for %d\n", oi->keymap[i].gpio); + goto err_gpio_request_failed; + } + ret = gpio_direction_output(oi->keymap[i].gpio, + output_level); + if (ret) { + pr_err("gpio_event_output_func: " + "gpio_direction_output failed for %d\n", + oi->keymap[i].gpio); + goto err_gpio_direction_output_failed; + } + } + return 0; + } + + ret = 0; + for (i = oi->keymap_size - 1; i >= 0; i--) { +err_gpio_direction_output_failed: + gpio_free(oi->keymap[i].gpio); +err_gpio_request_failed: + ; + } +err_bad_keymap: + return ret; +} + diff --git a/drivers/input/misc/keychord.c b/drivers/input/misc/keychord.c new file mode 100644 index 000000000000..ca23905f3046 --- /dev/null +++ b/drivers/input/misc/keychord.c @@ -0,0 +1,387 @@ +/* + * drivers/input/misc/keychord.c + * + * Copyright (C) 2008 Google, Inc. + * Author: Mike Lockwood <lockwood@android.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * +*/ + +#include <linux/poll.h> +#include <linux/slab.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/smp_lock.h> +#include <linux/fs.h> +#include <linux/miscdevice.h> +#include <linux/keychord.h> +#include <linux/sched.h> + +#define KEYCHORD_NAME "keychord" +#define BUFFER_SIZE 16 + +MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>"); +MODULE_DESCRIPTION("Key chord input driver"); +MODULE_SUPPORTED_DEVICE("keychord"); +MODULE_LICENSE("GPL"); + +#define NEXT_KEYCHORD(kc) ((struct input_keychord *) \ + ((char *)kc + sizeof(struct input_keychord) + \ + kc->count * sizeof(kc->keycodes[0]))) + +struct keychord_device { + struct input_handler input_handler; + int registered; + + /* list of keychords to monitor */ + struct input_keychord *keychords; + int keychord_count; + + /* bitmask of keys contained in our keychords */ + unsigned long keybit[BITS_TO_LONGS(KEY_CNT)]; + /* current state of the keys */ + unsigned long keystate[BITS_TO_LONGS(KEY_CNT)]; + /* number of keys that are currently pressed */ + int key_down; + + /* second input_device_id is needed for null termination */ + struct input_device_id device_ids[2]; + + spinlock_t lock; + wait_queue_head_t waitq; + unsigned char head; + unsigned char tail; + __u16 buff[BUFFER_SIZE]; +}; + +static int check_keychord(struct keychord_device *kdev, + struct input_keychord *keychord) +{ + int i; + + if (keychord->count != kdev->key_down) + return 0; + + for (i = 0; i < keychord->count; i++) { + if (!test_bit(keychord->keycodes[i], kdev->keystate)) + return 0; + } + + /* we have a match */ + return 1; +} + +static void keychord_event(struct input_handle *handle, unsigned int type, + unsigned int code, int value) +{ + struct keychord_device *kdev = handle->private; + struct input_keychord *keychord; + unsigned long flags; + int i, got_chord = 0; + + if (type != EV_KEY || code >= KEY_MAX) + return; + + spin_lock_irqsave(&kdev->lock, flags); + /* do nothing if key state did not change */ + if (!test_bit(code, kdev->keystate) == !value) + goto done; + __change_bit(code, kdev->keystate); + if (value) + kdev->key_down++; + else + kdev->key_down--; + + /* don't notify on key up */ + if (!value) + goto done; + /* ignore this event if it is not one of the keys we are monitoring */ + if (!test_bit(code, kdev->keybit)) + goto done; + + keychord = kdev->keychords; + if (!keychord) + goto done; + + /* check to see if the keyboard state matches any keychords */ + for (i = 0; i < kdev->keychord_count; i++) { + if (check_keychord(kdev, keychord)) { + kdev->buff[kdev->head] = keychord->id; + kdev->head = (kdev->head + 1) % BUFFER_SIZE; + got_chord = 1; + break; + } + /* skip to next keychord */ + keychord = NEXT_KEYCHORD(keychord); + } + +done: + spin_unlock_irqrestore(&kdev->lock, flags); + + if (got_chord) + wake_up_interruptible(&kdev->waitq); +} + +static int keychord_connect(struct input_handler *handler, + struct input_dev *dev, + const struct input_device_id *id) +{ + int i, ret; + struct input_handle *handle; + struct keychord_device *kdev = + container_of(handler, struct keychord_device, input_handler); + + /* + * ignore this input device if it does not contain any keycodes + * that we are monitoring + */ + for (i = 0; i < KEY_MAX; i++) { + if (test_bit(i, kdev->keybit) && test_bit(i, dev->keybit)) + break; + } + if (i == KEY_MAX) + return -ENODEV; + + handle = kzalloc(sizeof(*handle), GFP_KERNEL); + if (!handle) + return -ENOMEM; + + handle->dev = dev; + handle->handler = handler; + handle->name = KEYCHORD_NAME; + handle->private = kdev; + + ret = input_register_handle(handle); + if (ret) + goto err_input_register_handle; + + ret = input_open_device(handle); + if (ret) + goto err_input_open_device; + + pr_info("keychord: using input dev %s for fevent\n", dev->name); + + return 0; + +err_input_open_device: + input_unregister_handle(handle); +err_input_register_handle: + kfree(handle); + return ret; +} + +static void keychord_disconnect(struct input_handle *handle) +{ + input_close_device(handle); + input_unregister_handle(handle); + kfree(handle); +} + +/* + * keychord_read is used to read keychord events from the driver + */ +static ssize_t keychord_read(struct file *file, char __user *buffer, + size_t count, loff_t *ppos) +{ + struct keychord_device *kdev = file->private_data; + __u16 id; + int retval; + unsigned long flags; + + if (count < sizeof(id)) + return -EINVAL; + count = sizeof(id); + + if (kdev->head == kdev->tail && (file->f_flags & O_NONBLOCK)) + return -EAGAIN; + + retval = wait_event_interruptible(kdev->waitq, + kdev->head != kdev->tail); + if (retval) + return retval; + + spin_lock_irqsave(&kdev->lock, flags); + /* pop a keychord ID off the queue */ + id = kdev->buff[kdev->tail]; + kdev->tail = (kdev->tail + 1) % BUFFER_SIZE; + spin_unlock_irqrestore(&kdev->lock, flags); + + if (copy_to_user(buffer, &id, count)) + return -EFAULT; + + return count; +} + +/* + * keychord_write is used to configure the driver + */ +static ssize_t keychord_write(struct file *file, const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct keychord_device *kdev = file->private_data; + struct input_keychord *keychords = 0; + struct input_keychord *keychord, *next, *end; + int ret, i, key; + unsigned long flags; + + if (count < sizeof(struct input_keychord)) + return -EINVAL; + keychords = kzalloc(count, GFP_KERNEL); + if (!keychords) + return -ENOMEM; + + /* read list of keychords from userspace */ + if (copy_from_user(keychords, buffer, count)) { + kfree(keychords); + return -EFAULT; + } + + /* unregister handler before changing configuration */ + if (kdev->registered) { + input_unregister_handler(&kdev->input_handler); + kdev->registered = 0; + } + + spin_lock_irqsave(&kdev->lock, flags); + /* clear any existing configuration */ + kfree(kdev->keychords); + kdev->keychords = 0; + kdev->keychord_count = 0; + kdev->key_down = 0; + memset(kdev->keybit, 0, sizeof(kdev->keybit)); + memset(kdev->keystate, 0, sizeof(kdev->keystate)); + kdev->head = kdev->tail = 0; + + keychord = keychords; + end = (struct input_keychord *)((char *)keychord + count); + + while (keychord < end) { + next = NEXT_KEYCHORD(keychord); + if (keychord->count <= 0 || next > end) { + pr_err("keychord: invalid keycode count %d\n", + keychord->count); + goto err_unlock_return; + } + if (keychord->version != KEYCHORD_VERSION) { + pr_err("keychord: unsupported version %d\n", + keychord->version); + goto err_unlock_return; + } + + /* keep track of the keys we are monitoring in keybit */ + for (i = 0; i < keychord->count; i++) { + key = keychord->keycodes[i]; + if (key < 0 || key >= KEY_CNT) { + pr_err("keychord: keycode %d out of range\n", + key); + goto err_unlock_return; + } + __set_bit(key, kdev->keybit); + } + + kdev->keychord_count++; + keychord = next; + } + + kdev->keychords = keychords; + spin_unlock_irqrestore(&kdev->lock, flags); + + ret = input_register_handler(&kdev->input_handler); + if (ret) { + kfree(keychords); + kdev->keychords = 0; + return ret; + } + kdev->registered = 1; + + return count; + +err_unlock_return: + spin_unlock_irqrestore(&kdev->lock, flags); + kfree(keychords); + return -EINVAL; +} + +static unsigned int keychord_poll(struct file *file, poll_table *wait) +{ + struct keychord_device *kdev = file->private_data; + + poll_wait(file, &kdev->waitq, wait); + + if (kdev->head != kdev->tail) + return POLLIN | POLLRDNORM; + + return 0; +} + +static int keychord_open(struct inode *inode, struct file *file) +{ + struct keychord_device *kdev; + + kdev = kzalloc(sizeof(struct keychord_device), GFP_KERNEL); + if (!kdev) + return -ENOMEM; + + spin_lock_init(&kdev->lock); + init_waitqueue_head(&kdev->waitq); + + kdev->input_handler.event = keychord_event; + kdev->input_handler.connect = keychord_connect; + kdev->input_handler.disconnect = keychord_disconnect; + kdev->input_handler.name = KEYCHORD_NAME; + kdev->input_handler.id_table = kdev->device_ids; + + kdev->device_ids[0].flags = INPUT_DEVICE_ID_MATCH_EVBIT; + __set_bit(EV_KEY, kdev->device_ids[0].evbit); + + file->private_data = kdev; + + return 0; +} + +static int keychord_release(struct inode *inode, struct file *file) +{ + struct keychord_device *kdev = file->private_data; + + if (kdev->registered) + input_unregister_handler(&kdev->input_handler); + kfree(kdev); + + return 0; +} + +static const struct file_operations keychord_fops = { + .owner = THIS_MODULE, + .open = keychord_open, + .release = keychord_release, + .read = keychord_read, + .write = keychord_write, + .poll = keychord_poll, +}; + +static struct miscdevice keychord_misc = { + .fops = &keychord_fops, + .name = KEYCHORD_NAME, + .minor = MISC_DYNAMIC_MINOR, +}; + +static int __init keychord_init(void) +{ + return misc_register(&keychord_misc); +} + +static void __exit keychord_exit(void) +{ + misc_deregister(&keychord_misc); +} + +module_init(keychord_init); +module_exit(keychord_exit); diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig index cab10a00e414..d50fd5603604 100644 --- a/drivers/input/touchscreen/Kconfig +++ b/drivers/input/touchscreen/Kconfig @@ -341,6 +341,12 @@ config TOUCHSCREEN_PANJIT_I2C To compile this driver as a module, choose M here: the module will be called panjit_i2c. +config TOUCHSCREEN_SYNAPTICS_I2C_RMI + tristate "Synaptics i2c touchscreen" + depends on I2C + help + This enables support for Synaptics RMI over I2C based touchscreens. + config TOUCHSCREEN_TOUCHRIGHT tristate "Touchright serial touchscreen" select SERIO diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile index a8c08aa6ea66..93e641bc320e 100644 --- a/drivers/input/touchscreen/Makefile +++ b/drivers/input/touchscreen/Makefile @@ -38,6 +38,7 @@ obj-$(CONFIG_TOUCHSCREEN_PENMOUNT) += penmount.o obj-$(CONFIG_TOUCHSCREEN_QT602240) += qt602240_ts.o obj-$(CONFIG_TOUCHSCREEN_S3C2410) += s3c2410_ts.o obj-$(CONFIG_TOUCHSCREEN_STMPE) += stmpe-ts.o +obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI) += synaptics_i2c_rmi.o obj-$(CONFIG_TOUCHSCREEN_TOUCHIT213) += touchit213.o obj-$(CONFIG_TOUCHSCREEN_TOUCHRIGHT) += touchright.o obj-$(CONFIG_TOUCHSCREEN_TOUCHWIN) += touchwin.o diff --git a/drivers/input/touchscreen/panjit_i2c.c b/drivers/input/touchscreen/panjit_i2c.c index 16df9313a10f..4fccebc52ea8 100644 --- a/drivers/input/touchscreen/panjit_i2c.c +++ b/drivers/input/touchscreen/panjit_i2c.c @@ -24,6 +24,7 @@ #include <linux/device.h> #include <linux/input.h> #include <linux/delay.h> +#include <linux/earlysuspend.h> #include <linux/i2c.h> #include <linux/i2c/panjit_ts.h> #include <linux/interrupt.h> @@ -38,10 +39,16 @@ #define DRIVER_NAME "panjit_touch" +#ifdef CONFIG_HAS_EARLYSUSPEND +static void pj_early_suspend(struct early_suspend *h); +static void pj_late_resume(struct early_suspend *h); +#endif + struct pj_data { struct input_dev *input_dev; struct i2c_client *client; int gpio_reset; + struct early_suspend early_suspend; }; struct pj_event { @@ -208,6 +215,12 @@ static int pj_probe(struct i2c_client *client, goto fail_irq; } +#ifdef CONFIG_HAS_EARLYSUSPEND + touch->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1; + touch->early_suspend.suspend = pj_early_suspend; + touch->early_suspend.resume = pj_late_resume; + register_early_suspend(&touch->early_suspend); +#endif dev_info(&client->dev, "%s: initialized\n", __func__); return 0; @@ -267,6 +280,26 @@ static int pj_resume(struct i2c_client *client) return 0; } +#ifdef CONFIG_HAS_EARLYSUSPEND +static void pj_early_suspend(struct early_suspend *es) +{ + struct pj_data *touch; + touch = container_of(es, struct pj_data, early_suspend); + + if (pj_suspend(touch->client, PMSG_SUSPEND) != 0) + dev_err(&touch->client->dev, "%s: failed\n", __func__); +} + +static void pj_late_resume(struct early_suspend *es) +{ + struct pj_data *touch; + touch = container_of(es, struct pj_data, early_suspend); + + if (pj_resume(touch->client) != 0) + dev_err(&touch->client->dev, "%s: failed\n", __func__); +} +#endif + static int pj_remove(struct i2c_client *client) { struct pj_data *touch = i2c_get_clientdata(client); @@ -274,6 +307,9 @@ static int pj_remove(struct i2c_client *client) if (!touch) return -EINVAL; +#ifdef CONFIG_HAS_EARLYSUSPEND + unregister_early_suspend(&touch->early_suspend); +#endif free_irq(touch->client->irq, touch); if (touch->gpio_reset >= 0) gpio_free(touch->gpio_reset); @@ -291,8 +327,10 @@ static const struct i2c_device_id panjit_ts_id[] = { static struct i2c_driver panjit_driver = { .probe = pj_probe, .remove = pj_remove, +#ifndef CONFIG_HAS_EARLYSUSPEND .suspend = pj_suspend, .resume = pj_resume, +#endif .id_table = panjit_ts_id, .driver = { .name = DRIVER_NAME, diff --git a/drivers/input/touchscreen/synaptics_i2c_rmi.c b/drivers/input/touchscreen/synaptics_i2c_rmi.c new file mode 100644 index 000000000000..5729602cbb63 --- /dev/null +++ b/drivers/input/touchscreen/synaptics_i2c_rmi.c @@ -0,0 +1,675 @@ +/* drivers/input/keyboard/synaptics_i2c_rmi.c + * + * Copyright (C) 2007 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/earlysuspend.h> +#include <linux/hrtimer.h> +#include <linux/i2c.h> +#include <linux/input.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/synaptics_i2c_rmi.h> + +static struct workqueue_struct *synaptics_wq; + +struct synaptics_ts_data { + uint16_t addr; + struct i2c_client *client; + struct input_dev *input_dev; + int use_irq; + bool has_relative_report; + struct hrtimer timer; + struct work_struct work; + uint16_t max[2]; + int snap_state[2][2]; + int snap_down_on[2]; + int snap_down_off[2]; + int snap_up_on[2]; + int snap_up_off[2]; + int snap_down[2]; + int snap_up[2]; + uint32_t flags; + int reported_finger_count; + int8_t sensitivity_adjust; + int (*power)(int on); + struct early_suspend early_suspend; +}; + +#ifdef CONFIG_HAS_EARLYSUSPEND +static void synaptics_ts_early_suspend(struct early_suspend *h); +static void synaptics_ts_late_resume(struct early_suspend *h); +#endif + +static int synaptics_init_panel(struct synaptics_ts_data *ts) +{ + int ret; + + ret = i2c_smbus_write_byte_data(ts->client, 0xff, 0x10); /* page select = 0x10 */ + if (ret < 0) { + printk(KERN_ERR "i2c_smbus_write_byte_data failed for page select\n"); + goto err_page_select_failed; + } + ret = i2c_smbus_write_byte_data(ts->client, 0x41, 0x04); /* Set "No Clip Z" */ + if (ret < 0) + printk(KERN_ERR "i2c_smbus_write_byte_data failed for No Clip Z\n"); + + ret = i2c_smbus_write_byte_data(ts->client, 0x44, + ts->sensitivity_adjust); + if (ret < 0) + pr_err("synaptics_ts: failed to set Sensitivity Adjust\n"); + +err_page_select_failed: + ret = i2c_smbus_write_byte_data(ts->client, 0xff, 0x04); /* page select = 0x04 */ + if (ret < 0) + printk(KERN_ERR "i2c_smbus_write_byte_data failed for page select\n"); + ret = i2c_smbus_write_byte_data(ts->client, 0xf0, 0x81); /* normal operation, 80 reports per second */ + if (ret < 0) + printk(KERN_ERR "synaptics_ts_resume: i2c_smbus_write_byte_data failed\n"); + return ret; +} + +static void synaptics_ts_work_func(struct work_struct *work) +{ + int i; + int ret; + int bad_data = 0; + struct i2c_msg msg[2]; + uint8_t start_reg; + uint8_t buf[15]; + struct synaptics_ts_data *ts = container_of(work, struct synaptics_ts_data, work); + int buf_len = ts->has_relative_report ? 15 : 13; + + msg[0].addr = ts->client->addr; + msg[0].flags = 0; + msg[0].len = 1; + msg[0].buf = &start_reg; + start_reg = 0x00; + msg[1].addr = ts->client->addr; + msg[1].flags = I2C_M_RD; + msg[1].len = buf_len; + msg[1].buf = buf; + + /* printk("synaptics_ts_work_func\n"); */ + for (i = 0; i < ((ts->use_irq && !bad_data) ? 1 : 10); i++) { + ret = i2c_transfer(ts->client->adapter, msg, 2); + if (ret < 0) { + printk(KERN_ERR "synaptics_ts_work_func: i2c_transfer failed\n"); + bad_data = 1; + } else { + /* printk("synaptics_ts_work_func: %x %x %x %x %x %x" */ + /* " %x %x %x %x %x %x %x %x %x, ret %d\n", */ + /* buf[0], buf[1], buf[2], buf[3], */ + /* buf[4], buf[5], buf[6], buf[7], */ + /* buf[8], buf[9], buf[10], buf[11], */ + /* buf[12], buf[13], buf[14], ret); */ + if ((buf[buf_len - 1] & 0xc0) != 0x40) { + printk(KERN_WARNING "synaptics_ts_work_func:" + " bad read %x %x %x %x %x %x %x %x %x" + " %x %x %x %x %x %x, ret %d\n", + buf[0], buf[1], buf[2], buf[3], + buf[4], buf[5], buf[6], buf[7], + buf[8], buf[9], buf[10], buf[11], + buf[12], buf[13], buf[14], ret); + if (bad_data) + synaptics_init_panel(ts); + bad_data = 1; + continue; + } + bad_data = 0; + if ((buf[buf_len - 1] & 1) == 0) { + /* printk("read %d coordinates\n", i); */ + break; + } else { + int pos[2][2]; + int f, a; + int base; + /* int x = buf[3] | (uint16_t)(buf[2] & 0x1f) << 8; */ + /* int y = buf[5] | (uint16_t)(buf[4] & 0x1f) << 8; */ + int z = buf[1]; + int w = buf[0] >> 4; + int finger = buf[0] & 7; + + /* int x2 = buf[3+6] | (uint16_t)(buf[2+6] & 0x1f) << 8; */ + /* int y2 = buf[5+6] | (uint16_t)(buf[4+6] & 0x1f) << 8; */ + /* int z2 = buf[1+6]; */ + /* int w2 = buf[0+6] >> 4; */ + /* int finger2 = buf[0+6] & 7; */ + + /* int dx = (int8_t)buf[12]; */ + /* int dy = (int8_t)buf[13]; */ + int finger2_pressed; + + /* printk("x %4d, y %4d, z %3d, w %2d, F %d, 2nd: x %4d, y %4d, z %3d, w %2d, F %d, dx %4d, dy %4d\n", */ + /* x, y, z, w, finger, */ + /* x2, y2, z2, w2, finger2, */ + /* dx, dy); */ + + base = 2; + for (f = 0; f < 2; f++) { + uint32_t flip_flag = SYNAPTICS_FLIP_X; + for (a = 0; a < 2; a++) { + int p = buf[base + 1]; + p |= (uint16_t)(buf[base] & 0x1f) << 8; + if (ts->flags & flip_flag) + p = ts->max[a] - p; + if (ts->flags & SYNAPTICS_SNAP_TO_INACTIVE_EDGE) { + if (ts->snap_state[f][a]) { + if (p <= ts->snap_down_off[a]) + p = ts->snap_down[a]; + else if (p >= ts->snap_up_off[a]) + p = ts->snap_up[a]; + else + ts->snap_state[f][a] = 0; + } else { + if (p <= ts->snap_down_on[a]) { + p = ts->snap_down[a]; + ts->snap_state[f][a] = 1; + } else if (p >= ts->snap_up_on[a]) { + p = ts->snap_up[a]; + ts->snap_state[f][a] = 1; + } + } + } + pos[f][a] = p; + base += 2; + flip_flag <<= 1; + } + base += 2; + if (ts->flags & SYNAPTICS_SWAP_XY) + swap(pos[f][0], pos[f][1]); + } + if (z) { + input_report_abs(ts->input_dev, ABS_X, pos[0][0]); + input_report_abs(ts->input_dev, ABS_Y, pos[0][1]); + } + input_report_abs(ts->input_dev, ABS_PRESSURE, z); + input_report_abs(ts->input_dev, ABS_TOOL_WIDTH, w); + input_report_key(ts->input_dev, BTN_TOUCH, finger); + finger2_pressed = finger > 1 && finger != 7; + input_report_key(ts->input_dev, BTN_2, finger2_pressed); + if (finger2_pressed) { + input_report_abs(ts->input_dev, ABS_HAT0X, pos[1][0]); + input_report_abs(ts->input_dev, ABS_HAT0Y, pos[1][1]); + } + + if (!finger) + z = 0; + input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR, z); + input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, w); + input_report_abs(ts->input_dev, ABS_MT_POSITION_X, pos[0][0]); + input_report_abs(ts->input_dev, ABS_MT_POSITION_Y, pos[0][1]); + input_mt_sync(ts->input_dev); + if (finger2_pressed) { + input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR, z); + input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, w); + input_report_abs(ts->input_dev, ABS_MT_POSITION_X, pos[1][0]); + input_report_abs(ts->input_dev, ABS_MT_POSITION_Y, pos[1][1]); + input_mt_sync(ts->input_dev); + } else if (ts->reported_finger_count > 1) { + input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR, 0); + input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, 0); + input_mt_sync(ts->input_dev); + } + ts->reported_finger_count = finger; + input_sync(ts->input_dev); + } + } + } + if (ts->use_irq) + enable_irq(ts->client->irq); +} + +static enum hrtimer_restart synaptics_ts_timer_func(struct hrtimer *timer) +{ + struct synaptics_ts_data *ts = container_of(timer, struct synaptics_ts_data, timer); + /* printk("synaptics_ts_timer_func\n"); */ + + queue_work(synaptics_wq, &ts->work); + + hrtimer_start(&ts->timer, ktime_set(0, 12500000), HRTIMER_MODE_REL); + return HRTIMER_NORESTART; +} + +static irqreturn_t synaptics_ts_irq_handler(int irq, void *dev_id) +{ + struct synaptics_ts_data *ts = dev_id; + + /* printk("synaptics_ts_irq_handler\n"); */ + disable_irq_nosync(ts->client->irq); + queue_work(synaptics_wq, &ts->work); + return IRQ_HANDLED; +} + +static int synaptics_ts_probe( + struct i2c_client *client, const struct i2c_device_id *id) +{ + struct synaptics_ts_data *ts; + uint8_t buf0[4]; + uint8_t buf1[8]; + struct i2c_msg msg[2]; + int ret = 0; + uint16_t max_x, max_y; + int fuzz_x, fuzz_y, fuzz_p, fuzz_w; + struct synaptics_i2c_rmi_platform_data *pdata; + unsigned long irqflags; + int inactive_area_left; + int inactive_area_right; + int inactive_area_top; + int inactive_area_bottom; + int snap_left_on; + int snap_left_off; + int snap_right_on; + int snap_right_off; + int snap_top_on; + int snap_top_off; + int snap_bottom_on; + int snap_bottom_off; + uint32_t panel_version; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { + printk(KERN_ERR "synaptics_ts_probe: need I2C_FUNC_I2C\n"); + ret = -ENODEV; + goto err_check_functionality_failed; + } + + ts = kzalloc(sizeof(*ts), GFP_KERNEL); + if (ts == NULL) { + ret = -ENOMEM; + goto err_alloc_data_failed; + } + INIT_WORK(&ts->work, synaptics_ts_work_func); + ts->client = client; + i2c_set_clientdata(client, ts); + pdata = client->dev.platform_data; + if (pdata) + ts->power = pdata->power; + if (ts->power) { + ret = ts->power(1); + if (ret < 0) { + printk(KERN_ERR "synaptics_ts_probe power on failed\n"); + goto err_power_failed; + } + } + + ret = i2c_smbus_write_byte_data(ts->client, 0xf4, 0x01); /* device command = reset */ + if (ret < 0) { + printk(KERN_ERR "i2c_smbus_write_byte_data failed\n"); + /* fail? */ + } + { + int retry = 10; + while (retry-- > 0) { + ret = i2c_smbus_read_byte_data(ts->client, 0xe4); + if (ret >= 0) + break; + msleep(100); + } + } + if (ret < 0) { + printk(KERN_ERR "i2c_smbus_read_byte_data failed\n"); + goto err_detect_failed; + } + printk(KERN_INFO "synaptics_ts_probe: Product Major Version %x\n", ret); + panel_version = ret << 8; + ret = i2c_smbus_read_byte_data(ts->client, 0xe5); + if (ret < 0) { + printk(KERN_ERR "i2c_smbus_read_byte_data failed\n"); + goto err_detect_failed; + } + printk(KERN_INFO "synaptics_ts_probe: Product Minor Version %x\n", ret); + panel_version |= ret; + + ret = i2c_smbus_read_byte_data(ts->client, 0xe3); + if (ret < 0) { + printk(KERN_ERR "i2c_smbus_read_byte_data failed\n"); + goto err_detect_failed; + } + printk(KERN_INFO "synaptics_ts_probe: product property %x\n", ret); + + if (pdata) { + while (pdata->version > panel_version) + pdata++; + ts->flags = pdata->flags; + ts->sensitivity_adjust = pdata->sensitivity_adjust; + irqflags = pdata->irqflags; + inactive_area_left = pdata->inactive_left; + inactive_area_right = pdata->inactive_right; + inactive_area_top = pdata->inactive_top; + inactive_area_bottom = pdata->inactive_bottom; + snap_left_on = pdata->snap_left_on; + snap_left_off = pdata->snap_left_off; + snap_right_on = pdata->snap_right_on; + snap_right_off = pdata->snap_right_off; + snap_top_on = pdata->snap_top_on; + snap_top_off = pdata->snap_top_off; + snap_bottom_on = pdata->snap_bottom_on; + snap_bottom_off = pdata->snap_bottom_off; + fuzz_x = pdata->fuzz_x; + fuzz_y = pdata->fuzz_y; + fuzz_p = pdata->fuzz_p; + fuzz_w = pdata->fuzz_w; + } else { + irqflags = 0; + inactive_area_left = 0; + inactive_area_right = 0; + inactive_area_top = 0; + inactive_area_bottom = 0; + snap_left_on = 0; + snap_left_off = 0; + snap_right_on = 0; + snap_right_off = 0; + snap_top_on = 0; + snap_top_off = 0; + snap_bottom_on = 0; + snap_bottom_off = 0; + fuzz_x = 0; + fuzz_y = 0; + fuzz_p = 0; + fuzz_w = 0; + } + + ret = i2c_smbus_read_byte_data(ts->client, 0xf0); + if (ret < 0) { + printk(KERN_ERR "i2c_smbus_read_byte_data failed\n"); + goto err_detect_failed; + } + printk(KERN_INFO "synaptics_ts_probe: device control %x\n", ret); + + ret = i2c_smbus_read_byte_data(ts->client, 0xf1); + if (ret < 0) { + printk(KERN_ERR "i2c_smbus_read_byte_data failed\n"); + goto err_detect_failed; + } + printk(KERN_INFO "synaptics_ts_probe: interrupt enable %x\n", ret); + + ret = i2c_smbus_write_byte_data(ts->client, 0xf1, 0); /* disable interrupt */ + if (ret < 0) { + printk(KERN_ERR "i2c_smbus_write_byte_data failed\n"); + goto err_detect_failed; + } + + msg[0].addr = ts->client->addr; + msg[0].flags = 0; + msg[0].len = 1; + msg[0].buf = buf0; + buf0[0] = 0xe0; + msg[1].addr = ts->client->addr; + msg[1].flags = I2C_M_RD; + msg[1].len = 8; + msg[1].buf = buf1; + ret = i2c_transfer(ts->client->adapter, msg, 2); + if (ret < 0) { + printk(KERN_ERR "i2c_transfer failed\n"); + goto err_detect_failed; + } + printk(KERN_INFO "synaptics_ts_probe: 0xe0: %x %x %x %x %x %x %x %x\n", + buf1[0], buf1[1], buf1[2], buf1[3], + buf1[4], buf1[5], buf1[6], buf1[7]); + + ret = i2c_smbus_write_byte_data(ts->client, 0xff, 0x10); /* page select = 0x10 */ + if (ret < 0) { + printk(KERN_ERR "i2c_smbus_write_byte_data failed for page select\n"); + goto err_detect_failed; + } + ret = i2c_smbus_read_word_data(ts->client, 0x02); + if (ret < 0) { + printk(KERN_ERR "i2c_smbus_read_word_data failed\n"); + goto err_detect_failed; + } + ts->has_relative_report = !(ret & 0x100); + printk(KERN_INFO "synaptics_ts_probe: Sensor properties %x\n", ret); + ret = i2c_smbus_read_word_data(ts->client, 0x04); + if (ret < 0) { + printk(KERN_ERR "i2c_smbus_read_word_data failed\n"); + goto err_detect_failed; + } + ts->max[0] = max_x = (ret >> 8 & 0xff) | ((ret & 0x1f) << 8); + ret = i2c_smbus_read_word_data(ts->client, 0x06); + if (ret < 0) { + printk(KERN_ERR "i2c_smbus_read_word_data failed\n"); + goto err_detect_failed; + } + ts->max[1] = max_y = (ret >> 8 & 0xff) | ((ret & 0x1f) << 8); + if (ts->flags & SYNAPTICS_SWAP_XY) + swap(max_x, max_y); + + ret = synaptics_init_panel(ts); /* will also switch back to page 0x04 */ + if (ret < 0) { + printk(KERN_ERR "synaptics_init_panel failed\n"); + goto err_detect_failed; + } + + ts->input_dev = input_allocate_device(); + if (ts->input_dev == NULL) { + ret = -ENOMEM; + printk(KERN_ERR "synaptics_ts_probe: Failed to allocate input device\n"); + goto err_input_dev_alloc_failed; + } + ts->input_dev->name = "synaptics-rmi-touchscreen"; + set_bit(EV_SYN, ts->input_dev->evbit); + set_bit(EV_KEY, ts->input_dev->evbit); + set_bit(BTN_TOUCH, ts->input_dev->keybit); + set_bit(BTN_2, ts->input_dev->keybit); + set_bit(EV_ABS, ts->input_dev->evbit); + inactive_area_left = inactive_area_left * max_x / 0x10000; + inactive_area_right = inactive_area_right * max_x / 0x10000; + inactive_area_top = inactive_area_top * max_y / 0x10000; + inactive_area_bottom = inactive_area_bottom * max_y / 0x10000; + snap_left_on = snap_left_on * max_x / 0x10000; + snap_left_off = snap_left_off * max_x / 0x10000; + snap_right_on = snap_right_on * max_x / 0x10000; + snap_right_off = snap_right_off * max_x / 0x10000; + snap_top_on = snap_top_on * max_y / 0x10000; + snap_top_off = snap_top_off * max_y / 0x10000; + snap_bottom_on = snap_bottom_on * max_y / 0x10000; + snap_bottom_off = snap_bottom_off * max_y / 0x10000; + fuzz_x = fuzz_x * max_x / 0x10000; + fuzz_y = fuzz_y * max_y / 0x10000; + ts->snap_down[!!(ts->flags & SYNAPTICS_SWAP_XY)] = -inactive_area_left; + ts->snap_up[!!(ts->flags & SYNAPTICS_SWAP_XY)] = max_x + inactive_area_right; + ts->snap_down[!(ts->flags & SYNAPTICS_SWAP_XY)] = -inactive_area_top; + ts->snap_up[!(ts->flags & SYNAPTICS_SWAP_XY)] = max_y + inactive_area_bottom; + ts->snap_down_on[!!(ts->flags & SYNAPTICS_SWAP_XY)] = snap_left_on; + ts->snap_down_off[!!(ts->flags & SYNAPTICS_SWAP_XY)] = snap_left_off; + ts->snap_up_on[!!(ts->flags & SYNAPTICS_SWAP_XY)] = max_x - snap_right_on; + ts->snap_up_off[!!(ts->flags & SYNAPTICS_SWAP_XY)] = max_x - snap_right_off; + ts->snap_down_on[!(ts->flags & SYNAPTICS_SWAP_XY)] = snap_top_on; + ts->snap_down_off[!(ts->flags & SYNAPTICS_SWAP_XY)] = snap_top_off; + ts->snap_up_on[!(ts->flags & SYNAPTICS_SWAP_XY)] = max_y - snap_bottom_on; + ts->snap_up_off[!(ts->flags & SYNAPTICS_SWAP_XY)] = max_y - snap_bottom_off; + printk(KERN_INFO "synaptics_ts_probe: max_x %d, max_y %d\n", max_x, max_y); + printk(KERN_INFO "synaptics_ts_probe: inactive_x %d %d, inactive_y %d %d\n", + inactive_area_left, inactive_area_right, + inactive_area_top, inactive_area_bottom); + printk(KERN_INFO "synaptics_ts_probe: snap_x %d-%d %d-%d, snap_y %d-%d %d-%d\n", + snap_left_on, snap_left_off, snap_right_on, snap_right_off, + snap_top_on, snap_top_off, snap_bottom_on, snap_bottom_off); + input_set_abs_params(ts->input_dev, ABS_X, -inactive_area_left, max_x + inactive_area_right, fuzz_x, 0); + input_set_abs_params(ts->input_dev, ABS_Y, -inactive_area_top, max_y + inactive_area_bottom, fuzz_y, 0); + input_set_abs_params(ts->input_dev, ABS_PRESSURE, 0, 255, fuzz_p, 0); + input_set_abs_params(ts->input_dev, ABS_TOOL_WIDTH, 0, 15, fuzz_w, 0); + input_set_abs_params(ts->input_dev, ABS_HAT0X, -inactive_area_left, max_x + inactive_area_right, fuzz_x, 0); + input_set_abs_params(ts->input_dev, ABS_HAT0Y, -inactive_area_top, max_y + inactive_area_bottom, fuzz_y, 0); + input_set_abs_params(ts->input_dev, ABS_MT_POSITION_X, -inactive_area_left, max_x + inactive_area_right, fuzz_x, 0); + input_set_abs_params(ts->input_dev, ABS_MT_POSITION_Y, -inactive_area_top, max_y + inactive_area_bottom, fuzz_y, 0); + input_set_abs_params(ts->input_dev, ABS_MT_TOUCH_MAJOR, 0, 255, fuzz_p, 0); + input_set_abs_params(ts->input_dev, ABS_MT_WIDTH_MAJOR, 0, 15, fuzz_w, 0); + /* ts->input_dev->name = ts->keypad_info->name; */ + ret = input_register_device(ts->input_dev); + if (ret) { + printk(KERN_ERR "synaptics_ts_probe: Unable to register %s input device\n", ts->input_dev->name); + goto err_input_register_device_failed; + } + if (client->irq) { + ret = request_irq(client->irq, synaptics_ts_irq_handler, irqflags, client->name, ts); + if (ret == 0) { + ret = i2c_smbus_write_byte_data(ts->client, 0xf1, 0x01); /* enable abs int */ + if (ret) + free_irq(client->irq, ts); + } + if (ret == 0) + ts->use_irq = 1; + else + dev_err(&client->dev, "request_irq failed\n"); + } + if (!ts->use_irq) { + hrtimer_init(&ts->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + ts->timer.function = synaptics_ts_timer_func; + hrtimer_start(&ts->timer, ktime_set(1, 0), HRTIMER_MODE_REL); + } +#ifdef CONFIG_HAS_EARLYSUSPEND + ts->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1; + ts->early_suspend.suspend = synaptics_ts_early_suspend; + ts->early_suspend.resume = synaptics_ts_late_resume; + register_early_suspend(&ts->early_suspend); +#endif + + printk(KERN_INFO "synaptics_ts_probe: Start touchscreen %s in %s mode\n", ts->input_dev->name, ts->use_irq ? "interrupt" : "polling"); + + return 0; + +err_input_register_device_failed: + input_free_device(ts->input_dev); + +err_input_dev_alloc_failed: +err_detect_failed: +err_power_failed: + kfree(ts); +err_alloc_data_failed: +err_check_functionality_failed: + return ret; +} + +static int synaptics_ts_remove(struct i2c_client *client) +{ + struct synaptics_ts_data *ts = i2c_get_clientdata(client); + unregister_early_suspend(&ts->early_suspend); + if (ts->use_irq) + free_irq(client->irq, ts); + else + hrtimer_cancel(&ts->timer); + input_unregister_device(ts->input_dev); + kfree(ts); + return 0; +} + +static int synaptics_ts_suspend(struct i2c_client *client, pm_message_t mesg) +{ + int ret; + struct synaptics_ts_data *ts = i2c_get_clientdata(client); + + if (ts->use_irq) + disable_irq(client->irq); + else + hrtimer_cancel(&ts->timer); + ret = cancel_work_sync(&ts->work); + if (ret && ts->use_irq) /* if work was pending disable-count is now 2 */ + enable_irq(client->irq); + ret = i2c_smbus_write_byte_data(ts->client, 0xf1, 0); /* disable interrupt */ + if (ret < 0) + printk(KERN_ERR "synaptics_ts_suspend: i2c_smbus_write_byte_data failed\n"); + + ret = i2c_smbus_write_byte_data(client, 0xf0, 0x86); /* deep sleep */ + if (ret < 0) + printk(KERN_ERR "synaptics_ts_suspend: i2c_smbus_write_byte_data failed\n"); + if (ts->power) { + ret = ts->power(0); + if (ret < 0) + printk(KERN_ERR "synaptics_ts_resume power off failed\n"); + } + return 0; +} + +static int synaptics_ts_resume(struct i2c_client *client) +{ + int ret; + struct synaptics_ts_data *ts = i2c_get_clientdata(client); + + if (ts->power) { + ret = ts->power(1); + if (ret < 0) + printk(KERN_ERR "synaptics_ts_resume power on failed\n"); + } + + synaptics_init_panel(ts); + + if (ts->use_irq) + enable_irq(client->irq); + + if (!ts->use_irq) + hrtimer_start(&ts->timer, ktime_set(1, 0), HRTIMER_MODE_REL); + else + i2c_smbus_write_byte_data(ts->client, 0xf1, 0x01); /* enable abs int */ + + return 0; +} + +#ifdef CONFIG_HAS_EARLYSUSPEND +static void synaptics_ts_early_suspend(struct early_suspend *h) +{ + struct synaptics_ts_data *ts; + ts = container_of(h, struct synaptics_ts_data, early_suspend); + synaptics_ts_suspend(ts->client, PMSG_SUSPEND); +} + +static void synaptics_ts_late_resume(struct early_suspend *h) +{ + struct synaptics_ts_data *ts; + ts = container_of(h, struct synaptics_ts_data, early_suspend); + synaptics_ts_resume(ts->client); +} +#endif + +static const struct i2c_device_id synaptics_ts_id[] = { + { SYNAPTICS_I2C_RMI_NAME, 0 }, + { } +}; + +static struct i2c_driver synaptics_ts_driver = { + .probe = synaptics_ts_probe, + .remove = synaptics_ts_remove, +#ifndef CONFIG_HAS_EARLYSUSPEND + .suspend = synaptics_ts_suspend, + .resume = synaptics_ts_resume, +#endif + .id_table = synaptics_ts_id, + .driver = { + .name = SYNAPTICS_I2C_RMI_NAME, + }, +}; + +static int __devinit synaptics_ts_init(void) +{ + synaptics_wq = create_singlethread_workqueue("synaptics_wq"); + if (!synaptics_wq) + return -ENOMEM; + return i2c_add_driver(&synaptics_ts_driver); +} + +static void __exit synaptics_ts_exit(void) +{ + i2c_del_driver(&synaptics_ts_driver); + if (synaptics_wq) + destroy_workqueue(synaptics_wq); +} + +module_init(synaptics_ts_init); +module_exit(synaptics_ts_exit); + +MODULE_DESCRIPTION("Synaptics Touchscreen Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig index e4112622e5a2..3845131ee555 100644 --- a/drivers/leds/Kconfig +++ b/drivers/leds/Kconfig @@ -373,6 +373,12 @@ config LEDS_TRIGGER_DEFAULT_ON This allows LEDs to be initialised in the ON state. If unsure, say Y. +config LEDS_TRIGGER_SLEEP + tristate "LED Sleep Mode Trigger" + depends on LEDS_TRIGGERS && HAS_EARLYSUSPEND + help + This turns LEDs on when the screen is off but the cpu still running. + comment "iptables trigger is under Netfilter config (LED target)" depends on LEDS_TRIGGERS diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile index 7d6b95831f8e..5fe6885c77a3 100644 --- a/drivers/leds/Makefile +++ b/drivers/leds/Makefile @@ -49,3 +49,4 @@ obj-$(CONFIG_LEDS_TRIGGER_HEARTBEAT) += ledtrig-heartbeat.o obj-$(CONFIG_LEDS_TRIGGER_BACKLIGHT) += ledtrig-backlight.o obj-$(CONFIG_LEDS_TRIGGER_GPIO) += ledtrig-gpio.o obj-$(CONFIG_LEDS_TRIGGER_DEFAULT_ON) += ledtrig-default-on.o +obj-$(CONFIG_LEDS_TRIGGER_SLEEP) += ledtrig-sleep.o diff --git a/drivers/leds/ledtrig-sleep.c b/drivers/leds/ledtrig-sleep.c new file mode 100644 index 000000000000..f16404212152 --- /dev/null +++ b/drivers/leds/ledtrig-sleep.c @@ -0,0 +1,80 @@ +/* drivers/leds/ledtrig-sleep.c + * + * Copyright (C) 2007 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/earlysuspend.h> +#include <linux/leds.h> +#include <linux/suspend.h> + +static int ledtrig_sleep_pm_callback(struct notifier_block *nfb, + unsigned long action, + void *ignored); + +DEFINE_LED_TRIGGER(ledtrig_sleep) +static struct notifier_block ledtrig_sleep_pm_notifier = { + .notifier_call = ledtrig_sleep_pm_callback, + .priority = 0, +}; + +static void ledtrig_sleep_early_suspend(struct early_suspend *h) +{ + led_trigger_event(ledtrig_sleep, LED_FULL); +} + +static void ledtrig_sleep_early_resume(struct early_suspend *h) +{ + led_trigger_event(ledtrig_sleep, LED_OFF); +} + +static struct early_suspend ledtrig_sleep_early_suspend_handler = { + .suspend = ledtrig_sleep_early_suspend, + .resume = ledtrig_sleep_early_resume, +}; + +static int ledtrig_sleep_pm_callback(struct notifier_block *nfb, + unsigned long action, + void *ignored) +{ + switch (action) { + case PM_HIBERNATION_PREPARE: + case PM_SUSPEND_PREPARE: + led_trigger_event(ledtrig_sleep, LED_OFF); + return NOTIFY_OK; + case PM_POST_HIBERNATION: + case PM_POST_SUSPEND: + led_trigger_event(ledtrig_sleep, LED_FULL); + return NOTIFY_OK; + } + + return NOTIFY_DONE; +} + +static int __init ledtrig_sleep_init(void) +{ + led_trigger_register_simple("sleep", &ledtrig_sleep); + register_pm_notifier(&ledtrig_sleep_pm_notifier); + register_early_suspend(&ledtrig_sleep_early_suspend_handler); + return 0; +} + +static void __exit ledtrig_sleep_exit(void) +{ + unregister_early_suspend(&ledtrig_sleep_early_suspend_handler); + unregister_pm_notifier(&ledtrig_sleep_pm_notifier); + led_trigger_unregister_simple(ledtrig_sleep); +} + +module_init(ledtrig_sleep_init); +module_exit(ledtrig_sleep_exit); + diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index b74331260744..0c273c04a306 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -53,6 +53,10 @@ config AD525X_DPOT_SPI To compile this driver as a module, choose M here: the module will be called ad525x_dpot-spi. +config ANDROID_PMEM + bool "Android pmem allocator" + default y + config ATMEL_PWM tristate "Atmel AT32/AT91 PWM support" depends on AVR32 || ARCH_AT91SAM9263 || ARCH_AT91SAM9RL || ARCH_AT91CAP9 @@ -199,6 +203,13 @@ config ENCLOSURE_SERVICES driver (SCSI/ATA) which supports enclosures or a SCSI enclosure device (SES) to use these services. +config KERNEL_DEBUGGER_CORE + bool "Kernel Debugger Core" + default n + ---help--- + Generic kernel debugging command processor used by low level + (interrupt context) platform-specific debuggers. + config SGI_XP tristate "Support communication between SGI SSIs" depends on NET @@ -321,6 +332,14 @@ config HMC6352 This driver provides support for the Honeywell HMC6352 compass, providing configuration and heading data via sysfs. +config SENSORS_AK8975 + tristate "AK8975 compass support" + default n + depends on I2C + help + If you say yes here you get support for Asahi Kasei's + orientation sensor AK8975. + config EP93XX_PWM tristate "EP93xx PWM support" depends on ARCH_EP93XX @@ -354,6 +373,10 @@ config TI_DAC7512 This driver can also be built as a module. If so, the module will be calles ti_dac7512. +config UID_STAT + bool "UID based statistics tracking exported to /proc/uid_stat" + default n + config VMWARE_BALLOON tristate "VMware Balloon Driver" depends on X86 @@ -390,6 +413,29 @@ config BMP085 To compile this driver as a module, choose M here: the module will be called bmp085. +config WL127X_RFKILL + tristate "Bluetooth power control driver for TI wl127x" + depends on RFKILL + default n + ---help--- + Creates an rfkill entry in sysfs for power control of Bluetooth + TI wl127x chips. + +config APANIC + bool "Android kernel panic diagnostics driver" + default n + ---help--- + Driver which handles kernel panics and attempts to write + critical debugging data to flash. + +config APANIC_PLABEL + string "Android panic dump flash partition label" + depends on APANIC + default "kpanic" + ---help--- + If your platform uses a different flash partition label for storing + crashdumps, enter it here. + source "drivers/misc/c2port/Kconfig" source "drivers/misc/eeprom/Kconfig" source "drivers/misc/cb710/Kconfig" diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index 42eab95cde2a..249ac2683aa5 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -16,8 +16,10 @@ obj-$(CONFIG_TIFM_CORE) += tifm_core.o obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o obj-$(CONFIG_PHANTOM) += phantom.o obj-$(CONFIG_SENSORS_BH1780) += bh1780gli.o +obj-$(CONFIG_ANDROID_PMEM) += pmem.o obj-$(CONFIG_SGI_IOC4) += ioc4.o obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o +obj-$(CONFIG_KERNEL_DEBUGGER_CORE) += kernel_debugger.o obj-$(CONFIG_KGDB_TESTS) += kgdbts.o obj-$(CONFIG_SGI_XP) += sgi-xp/ obj-$(CONFIG_SGI_GRU) += sgi-gru/ @@ -28,6 +30,7 @@ obj-$(CONFIG_SENSORS_TSL2550) += tsl2550.o obj-$(CONFIG_EP93XX_PWM) += ep93xx_pwm.o obj-$(CONFIG_DS1682) += ds1682.o obj-$(CONFIG_TI_DAC7512) += ti_dac7512.o +obj-$(CONFIG_UID_STAT) += uid_stat.o obj-$(CONFIG_C2PORT) += c2port/ obj-$(CONFIG_IWMC3200TOP) += iwmc3200top/ obj-$(CONFIG_HMC6352) += hmc6352.o @@ -35,3 +38,6 @@ obj-y += eeprom/ obj-y += cb710/ obj-$(CONFIG_VMWARE_BALLOON) += vmw_balloon.o obj-$(CONFIG_ARM_CHARLCD) += arm-charlcd.o +obj-$(CONFIG_WL127X_RFKILL) += wl127x-rfkill.o +obj-$(CONFIG_APANIC) += apanic.o +obj-$(CONFIG_SENSORS_AK8975) += akm8975.o diff --git a/drivers/misc/akm8975.c b/drivers/misc/akm8975.c new file mode 100644 index 000000000000..830d2897afd6 --- /dev/null +++ b/drivers/misc/akm8975.c @@ -0,0 +1,732 @@ +/* drivers/misc/akm8975.c - akm8975 compass driver + * + * Copyright (C) 2007-2008 HTC Corporation. + * Author: Hou-Kun Chen <houkun.chen@gmail.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +/* + * Revised by AKM 2009/04/02 + * Revised by Motorola 2010/05/27 + * + */ + +#include <linux/interrupt.h> +#include <linux/i2c.h> +#include <linux/slab.h> +#include <linux/irq.h> +#include <linux/miscdevice.h> +#include <linux/gpio.h> +#include <linux/uaccess.h> +#include <linux/delay.h> +#include <linux/input.h> +#include <linux/workqueue.h> +#include <linux/freezer.h> +#include <linux/akm8975.h> +#include <linux/earlysuspend.h> + +#define AK8975DRV_CALL_DBG 0 +#if AK8975DRV_CALL_DBG +#define FUNCDBG(msg) pr_err("%s:%s\n", __func__, msg); +#else +#define FUNCDBG(msg) +#endif + +#define AK8975DRV_DATA_DBG 0 +#define MAX_FAILURE_COUNT 10 + +struct akm8975_data { + struct i2c_client *this_client; + struct akm8975_platform_data *pdata; + struct input_dev *input_dev; + struct work_struct work; + struct mutex flags_lock; +#ifdef CONFIG_HAS_EARLYSUSPEND + struct early_suspend early_suspend; +#endif +}; + +/* +* Because misc devices can not carry a pointer from driver register to +* open, we keep this global. This limits the driver to a single instance. +*/ +struct akm8975_data *akmd_data; + +static DECLARE_WAIT_QUEUE_HEAD(open_wq); + +static atomic_t open_flag; + +static short m_flag; +static short a_flag; +static short t_flag; +static short mv_flag; + +static short akmd_delay; + +static ssize_t akm8975_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + return sprintf(buf, "%u\n", i2c_smbus_read_byte_data(client, + AK8975_REG_CNTL)); +} +static ssize_t akm8975_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + unsigned long val; + strict_strtoul(buf, 10, &val); + if (val > 0xff) + return -EINVAL; + i2c_smbus_write_byte_data(client, AK8975_REG_CNTL, val); + return count; +} +static DEVICE_ATTR(akm_ms1, S_IWUSR | S_IRUGO, akm8975_show, akm8975_store); + +static int akm8975_i2c_rxdata(struct akm8975_data *akm, char *buf, int length) +{ + struct i2c_msg msgs[] = { + { + .addr = akm->this_client->addr, + .flags = 0, + .len = 1, + .buf = buf, + }, + { + .addr = akm->this_client->addr, + .flags = I2C_M_RD, + .len = length, + .buf = buf, + }, + }; + + FUNCDBG("called"); + + if (i2c_transfer(akm->this_client->adapter, msgs, 2) < 0) { + pr_err("akm8975_i2c_rxdata: transfer error\n"); + return EIO; + } else + return 0; +} + +static int akm8975_i2c_txdata(struct akm8975_data *akm, char *buf, int length) +{ + struct i2c_msg msgs[] = { + { + .addr = akm->this_client->addr, + .flags = 0, + .len = length, + .buf = buf, + }, + }; + + FUNCDBG("called"); + + if (i2c_transfer(akm->this_client->adapter, msgs, 1) < 0) { + pr_err("akm8975_i2c_txdata: transfer error\n"); + return -EIO; + } else + return 0; +} + +static void akm8975_ecs_report_value(struct akm8975_data *akm, short *rbuf) +{ + struct akm8975_data *data = i2c_get_clientdata(akm->this_client); + + FUNCDBG("called"); + +#if AK8975DRV_DATA_DBG + pr_info("akm8975_ecs_report_value: yaw = %d, pitch = %d, roll = %d\n", + rbuf[0], rbuf[1], rbuf[2]); + pr_info("tmp = %d, m_stat= %d, g_stat=%d\n", rbuf[3], rbuf[4], rbuf[5]); + pr_info("Acceleration: x = %d LSB, y = %d LSB, z = %d LSB\n", + rbuf[6], rbuf[7], rbuf[8]); + pr_info("Magnetic: x = %d LSB, y = %d LSB, z = %d LSB\n\n", + rbuf[9], rbuf[10], rbuf[11]); +#endif + mutex_lock(&akm->flags_lock); + /* Report magnetic sensor information */ + if (m_flag) { + input_report_abs(data->input_dev, ABS_RX, rbuf[0]); + input_report_abs(data->input_dev, ABS_RY, rbuf[1]); + input_report_abs(data->input_dev, ABS_RZ, rbuf[2]); + input_report_abs(data->input_dev, ABS_RUDDER, rbuf[4]); + } + + /* Report acceleration sensor information */ + if (a_flag) { + input_report_abs(data->input_dev, ABS_X, rbuf[6]); + input_report_abs(data->input_dev, ABS_Y, rbuf[7]); + input_report_abs(data->input_dev, ABS_Z, rbuf[8]); + input_report_abs(data->input_dev, ABS_WHEEL, rbuf[5]); + } + + /* Report temperature information */ + if (t_flag) + input_report_abs(data->input_dev, ABS_THROTTLE, rbuf[3]); + + if (mv_flag) { + input_report_abs(data->input_dev, ABS_HAT0X, rbuf[9]); + input_report_abs(data->input_dev, ABS_HAT0Y, rbuf[10]); + input_report_abs(data->input_dev, ABS_BRAKE, rbuf[11]); + } + mutex_unlock(&akm->flags_lock); + + input_sync(data->input_dev); +} + +static void akm8975_ecs_close_done(struct akm8975_data *akm) +{ + FUNCDBG("called"); + mutex_lock(&akm->flags_lock); + m_flag = 1; + a_flag = 1; + t_flag = 1; + mv_flag = 1; + mutex_unlock(&akm->flags_lock); +} + +static int akm_aot_open(struct inode *inode, struct file *file) +{ + int ret = -1; + + FUNCDBG("called"); + if (atomic_cmpxchg(&open_flag, 0, 1) == 0) { + wake_up(&open_wq); + ret = 0; + } + + ret = nonseekable_open(inode, file); + if (ret) + return ret; + + file->private_data = akmd_data; + + return ret; +} + +static int akm_aot_release(struct inode *inode, struct file *file) +{ + FUNCDBG("called"); + atomic_set(&open_flag, 0); + wake_up(&open_wq); + return 0; +} + +static int akm_aot_ioctl(struct inode *inode, struct file *file, + unsigned int cmd, unsigned long arg) +{ + void __user *argp = (void __user *) arg; + short flag; + struct akm8975_data *akm = file->private_data; + + FUNCDBG("called"); + + switch (cmd) { + case ECS_IOCTL_APP_SET_MFLAG: + case ECS_IOCTL_APP_SET_AFLAG: + case ECS_IOCTL_APP_SET_MVFLAG: + if (copy_from_user(&flag, argp, sizeof(flag))) + return -EFAULT; + if (flag < 0 || flag > 1) + return -EINVAL; + break; + case ECS_IOCTL_APP_SET_DELAY: + if (copy_from_user(&flag, argp, sizeof(flag))) + return -EFAULT; + break; + default: + break; + } + + mutex_lock(&akm->flags_lock); + switch (cmd) { + case ECS_IOCTL_APP_SET_MFLAG: + m_flag = flag; + break; + case ECS_IOCTL_APP_GET_MFLAG: + flag = m_flag; + break; + case ECS_IOCTL_APP_SET_AFLAG: + a_flag = flag; + break; + case ECS_IOCTL_APP_GET_AFLAG: + flag = a_flag; + break; + case ECS_IOCTL_APP_SET_MVFLAG: + mv_flag = flag; + break; + case ECS_IOCTL_APP_GET_MVFLAG: + flag = mv_flag; + break; + case ECS_IOCTL_APP_SET_DELAY: + akmd_delay = flag; + break; + case ECS_IOCTL_APP_GET_DELAY: + flag = akmd_delay; + break; + default: + return -ENOTTY; + } + mutex_unlock(&akm->flags_lock); + + switch (cmd) { + case ECS_IOCTL_APP_GET_MFLAG: + case ECS_IOCTL_APP_GET_AFLAG: + case ECS_IOCTL_APP_GET_MVFLAG: + case ECS_IOCTL_APP_GET_DELAY: + if (copy_to_user(argp, &flag, sizeof(flag))) + return -EFAULT; + break; + default: + break; + } + + return 0; +} + +static int akmd_open(struct inode *inode, struct file *file) +{ + int err = 0; + + FUNCDBG("called"); + err = nonseekable_open(inode, file); + if (err) + return err; + + file->private_data = akmd_data; + return 0; +} + +static int akmd_release(struct inode *inode, struct file *file) +{ + struct akm8975_data *akm = file->private_data; + + FUNCDBG("called"); + akm8975_ecs_close_done(akm); + return 0; +} + +static int akmd_ioctl(struct inode *inode, struct file *file, unsigned int cmd, + unsigned long arg) +{ + void __user *argp = (void __user *) arg; + + char rwbuf[16]; + int ret = -1; + int status; + short value[12]; + short delay; + struct akm8975_data *akm = file->private_data; + + FUNCDBG("called"); + + switch (cmd) { + case ECS_IOCTL_READ: + case ECS_IOCTL_WRITE: + if (copy_from_user(&rwbuf, argp, sizeof(rwbuf))) + return -EFAULT; + break; + + case ECS_IOCTL_SET_YPR: + if (copy_from_user(&value, argp, sizeof(value))) + return -EFAULT; + break; + + default: + break; + } + + switch (cmd) { + case ECS_IOCTL_READ: + if (rwbuf[0] < 1) + return -EINVAL; + + ret = akm8975_i2c_rxdata(akm, &rwbuf[1], rwbuf[0]); + if (ret < 0) + return ret; + break; + + case ECS_IOCTL_WRITE: + if (rwbuf[0] < 2) + return -EINVAL; + + ret = akm8975_i2c_txdata(akm, &rwbuf[1], rwbuf[0]); + if (ret < 0) + return ret; + break; + case ECS_IOCTL_SET_YPR: + akm8975_ecs_report_value(akm, value); + break; + + case ECS_IOCTL_GET_OPEN_STATUS: + wait_event_interruptible(open_wq, + (atomic_read(&open_flag) != 0)); + status = atomic_read(&open_flag); + break; + case ECS_IOCTL_GET_CLOSE_STATUS: + wait_event_interruptible(open_wq, + (atomic_read(&open_flag) == 0)); + status = atomic_read(&open_flag); + break; + + case ECS_IOCTL_GET_DELAY: + delay = akmd_delay; + break; + + default: + FUNCDBG("Unknown cmd\n"); + return -ENOTTY; + } + + switch (cmd) { + case ECS_IOCTL_READ: + if (copy_to_user(argp, &rwbuf, sizeof(rwbuf))) + return -EFAULT; + break; + case ECS_IOCTL_GET_OPEN_STATUS: + case ECS_IOCTL_GET_CLOSE_STATUS: + if (copy_to_user(argp, &status, sizeof(status))) + return -EFAULT; + break; + case ECS_IOCTL_GET_DELAY: + if (copy_to_user(argp, &delay, sizeof(delay))) + return -EFAULT; + break; + default: + break; + } + + return 0; +} + +/* needed to clear the int. pin */ +static void akm_work_func(struct work_struct *work) +{ + struct akm8975_data *akm = + container_of(work, struct akm8975_data, work); + + FUNCDBG("called"); + enable_irq(akm->this_client->irq); +} + +static irqreturn_t akm8975_interrupt(int irq, void *dev_id) +{ + struct akm8975_data *akm = dev_id; + FUNCDBG("called"); + + disable_irq_nosync(akm->this_client->irq); + schedule_work(&akm->work); + return IRQ_HANDLED; +} + +static int akm8975_power_off(struct akm8975_data *akm) +{ +#if AK8975DRV_CALL_DBG + pr_info("%s\n", __func__); +#endif + if (akm->pdata->power_off) + akm->pdata->power_off(); + + return 0; +} + +static int akm8975_power_on(struct akm8975_data *akm) +{ + int err; + +#if AK8975DRV_CALL_DBG + pr_info("%s\n", __func__); +#endif + if (akm->pdata->power_on) { + err = akm->pdata->power_on(); + if (err < 0) + return err; + } + return 0; +} + +static int akm8975_suspend(struct i2c_client *client, pm_message_t mesg) +{ + struct akm8975_data *akm = i2c_get_clientdata(client); + +#if AK8975DRV_CALL_DBG + pr_info("%s\n", __func__); +#endif + /* TO DO: might need more work after power mgmt + is enabled */ + return akm8975_power_off(akm); +} + +static int akm8975_resume(struct i2c_client *client) +{ + struct akm8975_data *akm = i2c_get_clientdata(client); + +#if AK8975DRV_CALL_DBG + pr_info("%s\n", __func__); +#endif + /* TO DO: might need more work after power mgmt + is enabled */ + return akm8975_power_on(akm); +} + +#ifdef CONFIG_HAS_EARLYSUSPEND +static void akm8975_early_suspend(struct early_suspend *handler) +{ + struct akm8975_data *akm; + akm = container_of(handler, struct akm8975_data, early_suspend); + +#if AK8975DRV_CALL_DBG + pr_info("%s\n", __func__); +#endif + akm8975_suspend(akm->this_client, PMSG_SUSPEND); +} + +static void akm8975_early_resume(struct early_suspend *handler) +{ + struct akm8975_data *akm; + akm = container_of(handler, struct akm8975_data, early_suspend); + +#if AK8975DRV_CALL_DBG + pr_info("%s\n", __func__); +#endif + akm8975_resume(akm->this_client); +} +#endif + + +static int akm8975_init_client(struct i2c_client *client) +{ + struct akm8975_data *data; + int ret; + + data = i2c_get_clientdata(client); + + ret = request_irq(client->irq, akm8975_interrupt, IRQF_TRIGGER_RISING, + "akm8975", data); + + if (ret < 0) { + pr_err("akm8975_init_client: request irq failed\n"); + goto err; + } + + init_waitqueue_head(&open_wq); + + mutex_lock(&data->flags_lock); + m_flag = 1; + a_flag = 1; + t_flag = 1; + mv_flag = 1; + mutex_unlock(&data->flags_lock); + + return 0; +err: + return ret; +} + +static const struct file_operations akmd_fops = { + .owner = THIS_MODULE, + .open = akmd_open, + .release = akmd_release, + .ioctl = akmd_ioctl, +}; + +static const struct file_operations akm_aot_fops = { + .owner = THIS_MODULE, + .open = akm_aot_open, + .release = akm_aot_release, + .ioctl = akm_aot_ioctl, +}; + +static struct miscdevice akm_aot_device = { + .minor = MISC_DYNAMIC_MINOR, + .name = "akm8975_aot", + .fops = &akm_aot_fops, +}; + +static struct miscdevice akmd_device = { + .minor = MISC_DYNAMIC_MINOR, + .name = "akm8975_dev", + .fops = &akmd_fops, +}; + +int akm8975_probe(struct i2c_client *client, + const struct i2c_device_id *devid) +{ + struct akm8975_data *akm; + int err; + FUNCDBG("called"); + + if (client->dev.platform_data == NULL) { + dev_err(&client->dev, "platform data is NULL. exiting.\n"); + err = -ENODEV; + goto exit_platform_data_null; + } + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { + dev_err(&client->dev, "platform data is NULL. exiting.\n"); + err = -ENODEV; + goto exit_check_functionality_failed; + } + + akm = kzalloc(sizeof(struct akm8975_data), GFP_KERNEL); + if (!akm) { + dev_err(&client->dev, + "failed to allocate memory for module data\n"); + err = -ENOMEM; + goto exit_alloc_data_failed; + } + + akm->pdata = client->dev.platform_data; + + mutex_init(&akm->flags_lock); + INIT_WORK(&akm->work, akm_work_func); + i2c_set_clientdata(client, akm); + + err = akm8975_power_on(akm); + if (err < 0) + goto exit_power_on_failed; + + akm8975_init_client(client); + akm->this_client = client; + akmd_data = akm; + + akm->input_dev = input_allocate_device(); + if (!akm->input_dev) { + err = -ENOMEM; + dev_err(&akm->this_client->dev, + "input device allocate failed\n"); + goto exit_input_dev_alloc_failed; + } + + set_bit(EV_ABS, akm->input_dev->evbit); + + /* yaw */ + input_set_abs_params(akm->input_dev, ABS_RX, 0, 23040, 0, 0); + /* pitch */ + input_set_abs_params(akm->input_dev, ABS_RY, -11520, 11520, 0, 0); + /* roll */ + input_set_abs_params(akm->input_dev, ABS_RZ, -5760, 5760, 0, 0); + /* x-axis acceleration */ + input_set_abs_params(akm->input_dev, ABS_X, -5760, 5760, 0, 0); + /* y-axis acceleration */ + input_set_abs_params(akm->input_dev, ABS_Y, -5760, 5760, 0, 0); + /* z-axis acceleration */ + input_set_abs_params(akm->input_dev, ABS_Z, -5760, 5760, 0, 0); + /* temparature */ + input_set_abs_params(akm->input_dev, ABS_THROTTLE, -30, 85, 0, 0); + /* status of magnetic sensor */ + input_set_abs_params(akm->input_dev, ABS_RUDDER, 0, 3, 0, 0); + /* status of acceleration sensor */ + input_set_abs_params(akm->input_dev, ABS_WHEEL, 0, 3, 0, 0); + /* x-axis of raw magnetic vector */ + input_set_abs_params(akm->input_dev, ABS_HAT0X, -20480, 20479, 0, 0); + /* y-axis of raw magnetic vector */ + input_set_abs_params(akm->input_dev, ABS_HAT0Y, -20480, 20479, 0, 0); + /* z-axis of raw magnetic vector */ + input_set_abs_params(akm->input_dev, ABS_BRAKE, -20480, 20479, 0, 0); + + akm->input_dev->name = "compass"; + + err = input_register_device(akm->input_dev); + if (err) { + pr_err("akm8975_probe: Unable to register input device: %s\n", + akm->input_dev->name); + goto exit_input_register_device_failed; + } + + err = misc_register(&akmd_device); + if (err) { + pr_err("akm8975_probe: akmd_device register failed\n"); + goto exit_misc_device_register_failed; + } + + err = misc_register(&akm_aot_device); + if (err) { + pr_err("akm8975_probe: akm_aot_device register failed\n"); + goto exit_misc_device_register_failed; + } + + err = device_create_file(&client->dev, &dev_attr_akm_ms1); + +#ifdef CONFIG_HAS_EARLYSUSPEND + akm->early_suspend.suspend = akm8975_early_suspend; + akm->early_suspend.resume = akm8975_early_resume; + register_early_suspend(&akm->early_suspend); +#endif + return 0; + +exit_misc_device_register_failed: +exit_input_register_device_failed: + input_free_device(akm->input_dev); +exit_input_dev_alloc_failed: + akm8975_power_off(akm); +exit_power_on_failed: + kfree(akm); +exit_alloc_data_failed: +exit_check_functionality_failed: +exit_platform_data_null: + return err; +} + +static int __devexit akm8975_remove(struct i2c_client *client) +{ + struct akm8975_data *akm = i2c_get_clientdata(client); + FUNCDBG("called"); + free_irq(client->irq, NULL); + input_unregister_device(akm->input_dev); + misc_deregister(&akmd_device); + misc_deregister(&akm_aot_device); + akm8975_power_off(akm); + kfree(akm); + return 0; +} + +static const struct i2c_device_id akm8975_id[] = { + { "akm8975", 0 }, + { } +}; + +MODULE_DEVICE_TABLE(i2c, akm8975_id); + +static struct i2c_driver akm8975_driver = { + .probe = akm8975_probe, + .remove = akm8975_remove, +#ifndef CONFIG_HAS_EARLYSUSPEND + .resume = akm8975_resume, + .suspend = akm8975_suspend, +#endif + .id_table = akm8975_id, + .driver = { + .name = "akm8975", + }, +}; + +static int __init akm8975_init(void) +{ + pr_info("AK8975 compass driver: init\n"); + FUNCDBG("AK8975 compass driver: init\n"); + return i2c_add_driver(&akm8975_driver); +} + +static void __exit akm8975_exit(void) +{ + FUNCDBG("AK8975 compass driver: exit\n"); + i2c_del_driver(&akm8975_driver); +} + +module_init(akm8975_init); +module_exit(akm8975_exit); + +MODULE_AUTHOR("Hou-Kun Chen <hk_chen@htc.com>"); +MODULE_DESCRIPTION("AK8975 compass driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/apanic.c b/drivers/misc/apanic.c new file mode 100644 index 000000000000..ca875f89da7a --- /dev/null +++ b/drivers/misc/apanic.c @@ -0,0 +1,606 @@ +/* drivers/misc/apanic.c + * + * Copyright (C) 2009 Google, Inc. + * Author: San Mehat <san@android.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/string.h> +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/device.h> +#include <linux/types.h> +#include <linux/delay.h> +#include <linux/sched.h> +#include <linux/wait.h> +#include <linux/wakelock.h> +#include <linux/platform_device.h> +#include <linux/uaccess.h> +#include <linux/mtd/mtd.h> +#include <linux/notifier.h> +#include <linux/mtd/mtd.h> +#include <linux/debugfs.h> +#include <linux/fs.h> +#include <linux/proc_fs.h> +#include <linux/mutex.h> +#include <linux/workqueue.h> +#include <linux/preempt.h> + +extern void ram_console_enable_console(int); + +struct panic_header { + u32 magic; +#define PANIC_MAGIC 0xdeadf00d + + u32 version; +#define PHDR_VERSION 0x01 + + u32 console_offset; + u32 console_length; + + u32 threads_offset; + u32 threads_length; +}; + +struct apanic_data { + struct mtd_info *mtd; + struct panic_header curr; + void *bounce; + struct proc_dir_entry *apanic_console; + struct proc_dir_entry *apanic_threads; +}; + +static struct apanic_data drv_ctx; +static struct work_struct proc_removal_work; +static DEFINE_MUTEX(drv_mutex); + +static unsigned int *apanic_bbt; +static unsigned int apanic_erase_blocks; +static unsigned int apanic_good_blocks; + +static void set_bb(unsigned int block, unsigned int *bbt) +{ + unsigned int flag = 1; + + BUG_ON(block >= apanic_erase_blocks); + + flag = flag << (block%32); + apanic_bbt[block/32] |= flag; + apanic_good_blocks--; +} + +static unsigned int get_bb(unsigned int block, unsigned int *bbt) +{ + unsigned int flag; + + BUG_ON(block >= apanic_erase_blocks); + + flag = 1 << (block%32); + return apanic_bbt[block/32] & flag; +} + +static void alloc_bbt(struct mtd_info *mtd, unsigned int *bbt) +{ + int bbt_size; + apanic_erase_blocks = (mtd->size)>>(mtd->erasesize_shift); + bbt_size = (apanic_erase_blocks+32)/32; + + apanic_bbt = kmalloc(bbt_size*4, GFP_KERNEL); + memset(apanic_bbt, 0, bbt_size*4); + apanic_good_blocks = apanic_erase_blocks; +} +static void scan_bbt(struct mtd_info *mtd, unsigned int *bbt) +{ + int i; + + for (i = 0; i < apanic_erase_blocks; i++) { + if (mtd->block_isbad(mtd, i*mtd->erasesize)) + set_bb(i, apanic_bbt); + } +} + +#define APANIC_INVALID_OFFSET 0xFFFFFFFF + +static unsigned int phy_offset(struct mtd_info *mtd, unsigned int offset) +{ + unsigned int logic_block = offset>>(mtd->erasesize_shift); + unsigned int phy_block; + unsigned good_block = 0; + + for (phy_block = 0; phy_block < apanic_erase_blocks; phy_block++) { + if (!get_bb(phy_block, apanic_bbt)) + good_block++; + if (good_block == (logic_block + 1)) + break; + } + + if (good_block != (logic_block + 1)) + return APANIC_INVALID_OFFSET; + + return offset + ((phy_block-logic_block)<<mtd->erasesize_shift); +} + +static void apanic_erase_callback(struct erase_info *done) +{ + wait_queue_head_t *wait_q = (wait_queue_head_t *) done->priv; + wake_up(wait_q); +} + +static int apanic_proc_read(char *buffer, char **start, off_t offset, + int count, int *peof, void *dat) +{ + struct apanic_data *ctx = &drv_ctx; + size_t file_length; + off_t file_offset; + unsigned int page_no; + off_t page_offset; + int rc; + size_t len; + + if (!count) + return 0; + + mutex_lock(&drv_mutex); + + switch ((int) dat) { + case 1: /* apanic_console */ + file_length = ctx->curr.console_length; + file_offset = ctx->curr.console_offset; + break; + case 2: /* apanic_threads */ + file_length = ctx->curr.threads_length; + file_offset = ctx->curr.threads_offset; + break; + default: + pr_err("Bad dat (%d)\n", (int) dat); + mutex_unlock(&drv_mutex); + return -EINVAL; + } + + if ((offset + count) > file_length) { + mutex_unlock(&drv_mutex); + return 0; + } + + /* We only support reading a maximum of a flash page */ + if (count > ctx->mtd->writesize) + count = ctx->mtd->writesize; + + page_no = (file_offset + offset) / ctx->mtd->writesize; + page_offset = (file_offset + offset) % ctx->mtd->writesize; + + + if (phy_offset(ctx->mtd, (page_no * ctx->mtd->writesize)) + == APANIC_INVALID_OFFSET) { + pr_err("apanic: reading an invalid address\n"); + mutex_unlock(&drv_mutex); + return -EINVAL; + } + rc = ctx->mtd->read(ctx->mtd, + phy_offset(ctx->mtd, (page_no * ctx->mtd->writesize)), + ctx->mtd->writesize, + &len, ctx->bounce); + + if (page_offset) + count -= page_offset; + memcpy(buffer, ctx->bounce + page_offset, count); + + *start = count; + + if ((offset + count) == file_length) + *peof = 1; + + mutex_unlock(&drv_mutex); + return count; +} + +static void mtd_panic_erase(void) +{ + struct apanic_data *ctx = &drv_ctx; + struct erase_info erase; + DECLARE_WAITQUEUE(wait, current); + wait_queue_head_t wait_q; + int rc, i; + + init_waitqueue_head(&wait_q); + erase.mtd = ctx->mtd; + erase.callback = apanic_erase_callback; + erase.len = ctx->mtd->erasesize; + erase.priv = (u_long)&wait_q; + for (i = 0; i < ctx->mtd->size; i += ctx->mtd->erasesize) { + erase.addr = i; + set_current_state(TASK_INTERRUPTIBLE); + add_wait_queue(&wait_q, &wait); + + if (get_bb(erase.addr>>ctx->mtd->erasesize_shift, apanic_bbt)) { + printk(KERN_WARNING + "apanic: Skipping erase of bad " + "block @%llx\n", erase.addr); + set_current_state(TASK_RUNNING); + remove_wait_queue(&wait_q, &wait); + continue; + } + + rc = ctx->mtd->erase(ctx->mtd, &erase); + if (rc) { + set_current_state(TASK_RUNNING); + remove_wait_queue(&wait_q, &wait); + printk(KERN_ERR + "apanic: Erase of 0x%llx, 0x%llx failed\n", + (unsigned long long) erase.addr, + (unsigned long long) erase.len); + if (rc == -EIO) { + if (ctx->mtd->block_markbad(ctx->mtd, + erase.addr)) { + printk(KERN_ERR + "apanic: Err marking blk bad\n"); + goto out; + } + printk(KERN_INFO + "apanic: Marked a bad block" + " @%llx\n", erase.addr); + set_bb(erase.addr>>ctx->mtd->erasesize_shift, + apanic_bbt); + continue; + } + goto out; + } + schedule(); + remove_wait_queue(&wait_q, &wait); + } + printk(KERN_DEBUG "apanic: %s partition erased\n", + CONFIG_APANIC_PLABEL); +out: + return; +} + +static void apanic_remove_proc_work(struct work_struct *work) +{ + struct apanic_data *ctx = &drv_ctx; + + mutex_lock(&drv_mutex); + mtd_panic_erase(); + memset(&ctx->curr, 0, sizeof(struct panic_header)); + if (ctx->apanic_console) { + remove_proc_entry("apanic_console", NULL); + ctx->apanic_console = NULL; + } + if (ctx->apanic_threads) { + remove_proc_entry("apanic_threads", NULL); + ctx->apanic_threads = NULL; + } + mutex_unlock(&drv_mutex); +} + +static int apanic_proc_write(struct file *file, const char __user *buffer, + unsigned long count, void *data) +{ + schedule_work(&proc_removal_work); + return count; +} + +static void mtd_panic_notify_add(struct mtd_info *mtd) +{ + struct apanic_data *ctx = &drv_ctx; + struct panic_header *hdr = ctx->bounce; + size_t len; + int rc; + int proc_entry_created = 0; + + if (strcmp(mtd->name, CONFIG_APANIC_PLABEL)) + return; + + ctx->mtd = mtd; + + alloc_bbt(mtd, apanic_bbt); + scan_bbt(mtd, apanic_bbt); + + if (apanic_good_blocks == 0) { + printk(KERN_ERR "apanic: no any good blocks?!\n"); + goto out_err; + } + + rc = mtd->read(mtd, phy_offset(mtd, 0), mtd->writesize, + &len, ctx->bounce); + if (rc && rc == -EBADMSG) { + printk(KERN_WARNING + "apanic: Bad ECC on block 0 (ignored)\n"); + } else if (rc && rc != -EUCLEAN) { + printk(KERN_ERR "apanic: Error reading block 0 (%d)\n", rc); + goto out_err; + } + + if (len != mtd->writesize) { + printk(KERN_ERR "apanic: Bad read size (%d)\n", rc); + goto out_err; + } + + printk(KERN_INFO "apanic: Bound to mtd partition '%s'\n", mtd->name); + + if (hdr->magic != PANIC_MAGIC) { + printk(KERN_INFO "apanic: No panic data available\n"); + mtd_panic_erase(); + return; + } + + if (hdr->version != PHDR_VERSION) { + printk(KERN_INFO "apanic: Version mismatch (%d != %d)\n", + hdr->version, PHDR_VERSION); + mtd_panic_erase(); + return; + } + + memcpy(&ctx->curr, hdr, sizeof(struct panic_header)); + + printk(KERN_INFO "apanic: c(%u, %u) t(%u, %u)\n", + hdr->console_offset, hdr->console_length, + hdr->threads_offset, hdr->threads_length); + + if (hdr->console_length) { + ctx->apanic_console = create_proc_entry("apanic_console", + S_IFREG | S_IRUGO, NULL); + if (!ctx->apanic_console) + printk(KERN_ERR "%s: failed creating procfile\n", + __func__); + else { + ctx->apanic_console->read_proc = apanic_proc_read; + ctx->apanic_console->write_proc = apanic_proc_write; + ctx->apanic_console->size = hdr->console_length; + ctx->apanic_console->data = (void *) 1; + proc_entry_created = 1; + } + } + + if (hdr->threads_length) { + ctx->apanic_threads = create_proc_entry("apanic_threads", + S_IFREG | S_IRUGO, NULL); + if (!ctx->apanic_threads) + printk(KERN_ERR "%s: failed creating procfile\n", + __func__); + else { + ctx->apanic_threads->read_proc = apanic_proc_read; + ctx->apanic_threads->write_proc = apanic_proc_write; + ctx->apanic_threads->size = hdr->threads_length; + ctx->apanic_threads->data = (void *) 2; + proc_entry_created = 1; + } + } + + if (!proc_entry_created) + mtd_panic_erase(); + + return; +out_err: + ctx->mtd = NULL; +} + +static void mtd_panic_notify_remove(struct mtd_info *mtd) +{ + struct apanic_data *ctx = &drv_ctx; + if (mtd == ctx->mtd) { + ctx->mtd = NULL; + printk(KERN_INFO "apanic: Unbound from %s\n", mtd->name); + } +} + +static struct mtd_notifier mtd_panic_notifier = { + .add = mtd_panic_notify_add, + .remove = mtd_panic_notify_remove, +}; + +static int in_panic = 0; + +static int apanic_writeflashpage(struct mtd_info *mtd, loff_t to, + const u_char *buf) +{ + int rc; + size_t wlen; + int panic = in_interrupt() | in_atomic(); + + if (panic && !mtd->panic_write) { + printk(KERN_EMERG "%s: No panic_write available\n", __func__); + return 0; + } else if (!panic && !mtd->write) { + printk(KERN_EMERG "%s: No write available\n", __func__); + return 0; + } + + to = phy_offset(mtd, to); + if (to == APANIC_INVALID_OFFSET) { + printk(KERN_EMERG "apanic: write to invalid address\n"); + return 0; + } + + if (panic) + rc = mtd->panic_write(mtd, to, mtd->writesize, &wlen, buf); + else + rc = mtd->write(mtd, to, mtd->writesize, &wlen, buf); + + if (rc) { + printk(KERN_EMERG + "%s: Error writing data to flash (%d)\n", + __func__, rc); + return rc; + } + + return wlen; +} + +extern int log_buf_copy(char *dest, int idx, int len); +extern void log_buf_clear(void); + +/* + * Writes the contents of the console to the specified offset in flash. + * Returns number of bytes written + */ +static int apanic_write_console(struct mtd_info *mtd, unsigned int off) +{ + struct apanic_data *ctx = &drv_ctx; + int saved_oip; + int idx = 0; + int rc, rc2; + unsigned int last_chunk = 0; + + while (!last_chunk) { + saved_oip = oops_in_progress; + oops_in_progress = 1; + rc = log_buf_copy(ctx->bounce, idx, mtd->writesize); + if (rc < 0) + break; + + if (rc != mtd->writesize) + last_chunk = rc; + + oops_in_progress = saved_oip; + if (rc <= 0) + break; + if (rc != mtd->writesize) + memset(ctx->bounce + rc, 0, mtd->writesize - rc); + + rc2 = apanic_writeflashpage(mtd, off, ctx->bounce); + if (rc2 <= 0) { + printk(KERN_EMERG + "apanic: Flash write failed (%d)\n", rc2); + return idx; + } + if (!last_chunk) + idx += rc2; + else + idx += last_chunk; + off += rc2; + } + return idx; +} + +static int apanic(struct notifier_block *this, unsigned long event, + void *ptr) +{ + struct apanic_data *ctx = &drv_ctx; + struct panic_header *hdr = (struct panic_header *) ctx->bounce; + int console_offset = 0; + int console_len = 0; + int threads_offset = 0; + int threads_len = 0; + int rc; + + if (in_panic) + return NOTIFY_DONE; + in_panic = 1; +#ifdef CONFIG_PREEMPT + /* Ensure that cond_resched() won't try to preempt anybody */ + add_preempt_count(PREEMPT_ACTIVE); +#endif + touch_softlockup_watchdog(); + + if (!ctx->mtd) + goto out; + + if (ctx->curr.magic) { + printk(KERN_EMERG "Crash partition in use!\n"); + goto out; + } + console_offset = ctx->mtd->writesize; + + /* + * Write out the console + */ + console_len = apanic_write_console(ctx->mtd, console_offset); + if (console_len < 0) { + printk(KERN_EMERG "Error writing console to panic log! (%d)\n", + console_len); + console_len = 0; + } + + /* + * Write out all threads + */ + threads_offset = ALIGN(console_offset + console_len, + ctx->mtd->writesize); + if (!threads_offset) + threads_offset = ctx->mtd->writesize; + + ram_console_enable_console(0); + + log_buf_clear(); + show_state_filter(0); + threads_len = apanic_write_console(ctx->mtd, threads_offset); + if (threads_len < 0) { + printk(KERN_EMERG "Error writing threads to panic log! (%d)\n", + threads_len); + threads_len = 0; + } + + /* + * Finally write the panic header + */ + memset(ctx->bounce, 0, PAGE_SIZE); + hdr->magic = PANIC_MAGIC; + hdr->version = PHDR_VERSION; + + hdr->console_offset = console_offset; + hdr->console_length = console_len; + + hdr->threads_offset = threads_offset; + hdr->threads_length = threads_len; + + rc = apanic_writeflashpage(ctx->mtd, 0, ctx->bounce); + if (rc <= 0) { + printk(KERN_EMERG "apanic: Header write failed (%d)\n", + rc); + goto out; + } + + printk(KERN_EMERG "apanic: Panic dump sucessfully written to flash\n"); + + out: +#ifdef CONFIG_PREEMPT + sub_preempt_count(PREEMPT_ACTIVE); +#endif + in_panic = 0; + return NOTIFY_DONE; +} + +static struct notifier_block panic_blk = { + .notifier_call = apanic, +}; + +static int panic_dbg_get(void *data, u64 *val) +{ + apanic(NULL, 0, NULL); + return 0; +} + +static int panic_dbg_set(void *data, u64 val) +{ + BUG(); + return -1; +} + +DEFINE_SIMPLE_ATTRIBUTE(panic_dbg_fops, panic_dbg_get, panic_dbg_set, "%llu\n"); + +int __init apanic_init(void) +{ + register_mtd_user(&mtd_panic_notifier); + atomic_notifier_chain_register(&panic_notifier_list, &panic_blk); + debugfs_create_file("apanic", 0644, NULL, NULL, &panic_dbg_fops); + memset(&drv_ctx, 0, sizeof(drv_ctx)); + drv_ctx.bounce = (void *) __get_free_page(GFP_KERNEL); + INIT_WORK(&proc_removal_work, apanic_remove_proc_work); + printk(KERN_INFO "Android kernel panic handler initialized (bind=%s)\n", + CONFIG_APANIC_PLABEL); + return 0; +} + +module_init(apanic_init); diff --git a/drivers/misc/kernel_debugger.c b/drivers/misc/kernel_debugger.c new file mode 100644 index 000000000000..20e1abbfbec7 --- /dev/null +++ b/drivers/misc/kernel_debugger.c @@ -0,0 +1,79 @@ +/* drivers/android/kernel_debugger.c + * + * Guts of the kernel debugger. + * Needs something to actually push commands to it. + * + * Copyright (C) 2007-2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/ctype.h> +#include <linux/device.h> +#include <linux/sched.h> +#include <linux/spinlock.h> +#include <linux/sysrq.h> +#include <linux/kernel_debugger.h> + +#define dprintf(fmt...) (ctxt->printf(ctxt->cookie, fmt)) + +static void do_ps(struct kdbg_ctxt *ctxt) +{ + struct task_struct *g, *p; + unsigned state; + static const char stat_nam[] = "RSDTtZX"; + + dprintf("pid ppid prio task pc\n"); + read_lock(&tasklist_lock); + do_each_thread(g, p) { + state = p->state ? __ffs(p->state) + 1 : 0; + dprintf("%5d %5d %4d ", p->pid, p->parent->pid, p->prio); + dprintf("%-13.13s %c", p->comm, + state >= sizeof(stat_nam) ? '?' : stat_nam[state]); + if (state == TASK_RUNNING) + dprintf(" running\n"); + else + dprintf(" %08lx\n", thread_saved_pc(p)); + } while_each_thread(g, p); + read_unlock(&tasklist_lock); +} + +int log_buf_copy(char *dest, int idx, int len); +extern int do_syslog(int type, char __user *bug, int count); +static void do_sysrq(struct kdbg_ctxt *ctxt, char rq) +{ + char buf[128]; + int ret; + int idx = 0; + do_syslog(5 /* clear */, NULL, 0); + handle_sysrq(rq); + while (1) { + ret = log_buf_copy(buf, idx, sizeof(buf) - 1); + if (ret <= 0) + break; + buf[ret] = 0; + dprintf("%s", buf); + idx += ret; + } +} + +int kernel_debugger(struct kdbg_ctxt *ctxt, char *cmd) +{ + if (!strcmp(cmd, "ps")) + do_ps(ctxt); + if (!strcmp(cmd, "sysrq")) + do_sysrq(ctxt, 'h'); + if (!strncmp(cmd, "sysrq ", 6)) + do_sysrq(ctxt, cmd[6]); + + return 0; +} + diff --git a/drivers/misc/pmem.c b/drivers/misc/pmem.c new file mode 100644 index 000000000000..890831e2deb7 --- /dev/null +++ b/drivers/misc/pmem.c @@ -0,0 +1,1344 @@ +/* drivers/android/pmem.c + * + * Copyright (C) 2007 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/miscdevice.h> +#include <linux/platform_device.h> +#include <linux/fs.h> +#include <linux/file.h> +#include <linux/mm.h> +#include <linux/list.h> +#include <linux/debugfs.h> +#include <linux/android_pmem.h> +#include <linux/mempolicy.h> +#include <linux/sched.h> +#include <asm/io.h> +#include <asm/uaccess.h> +#include <asm/cacheflush.h> + +#define PMEM_MAX_DEVICES 10 +#define PMEM_MAX_ORDER 128 +#define PMEM_MIN_ALLOC PAGE_SIZE + +#define PMEM_DEBUG 1 + +/* indicates that a refernce to this file has been taken via get_pmem_file, + * the file should not be released until put_pmem_file is called */ +#define PMEM_FLAGS_BUSY 0x1 +/* indicates that this is a suballocation of a larger master range */ +#define PMEM_FLAGS_CONNECTED 0x1 << 1 +/* indicates this is a master and not a sub allocation and that it is mmaped */ +#define PMEM_FLAGS_MASTERMAP 0x1 << 2 +/* submap and unsubmap flags indicate: + * 00: subregion has never been mmaped + * 10: subregion has been mmaped, reference to the mm was taken + * 11: subretion has ben released, refernece to the mm still held + * 01: subretion has been released, reference to the mm has been released + */ +#define PMEM_FLAGS_SUBMAP 0x1 << 3 +#define PMEM_FLAGS_UNSUBMAP 0x1 << 4 + + +struct pmem_data { + /* in alloc mode: an index into the bitmap + * in no_alloc mode: the size of the allocation */ + int index; + /* see flags above for descriptions */ + unsigned int flags; + /* protects this data field, if the mm_mmap sem will be held at the + * same time as this sem, the mm sem must be taken first (as this is + * the order for vma_open and vma_close ops */ + struct rw_semaphore sem; + /* info about the mmaping process */ + struct vm_area_struct *vma; + /* task struct of the mapping process */ + struct task_struct *task; + /* process id of teh mapping process */ + pid_t pid; + /* file descriptor of the master */ + int master_fd; + /* file struct of the master */ + struct file *master_file; + /* a list of currently available regions if this is a suballocation */ + struct list_head region_list; + /* a linked list of data so we can access them for debugging */ + struct list_head list; +#if PMEM_DEBUG + int ref; +#endif +}; + +struct pmem_bits { + unsigned allocated:1; /* 1 if allocated, 0 if free */ + unsigned order:7; /* size of the region in pmem space */ +}; + +struct pmem_region_node { + struct pmem_region region; + struct list_head list; +}; + +#define PMEM_DEBUG_MSGS 0 +#if PMEM_DEBUG_MSGS +#define DLOG(fmt,args...) \ + do { printk(KERN_INFO "[%s:%s:%d] "fmt, __FILE__, __func__, __LINE__, \ + ##args); } \ + while (0) +#else +#define DLOG(x...) do {} while (0) +#endif + +struct pmem_info { + struct miscdevice dev; + /* physical start address of the remaped pmem space */ + unsigned long base; + /* vitual start address of the remaped pmem space */ + unsigned char __iomem *vbase; + /* total size of the pmem space */ + unsigned long size; + /* number of entries in the pmem space */ + unsigned long num_entries; + /* pfn of the garbage page in memory */ + unsigned long garbage_pfn; + /* index of the garbage page in the pmem space */ + int garbage_index; + /* the bitmap for the region indicating which entries are allocated + * and which are free */ + struct pmem_bits *bitmap; + /* indicates the region should not be managed with an allocator */ + unsigned no_allocator; + /* indicates maps of this region should be cached, if a mix of + * cached and uncached is desired, set this and open the device with + * O_SYNC to get an uncached region */ + unsigned cached; + unsigned buffered; + /* in no_allocator mode the first mapper gets the whole space and sets + * this flag */ + unsigned allocated; + /* for debugging, creates a list of pmem file structs, the + * data_list_sem should be taken before pmem_data->sem if both are + * needed */ + struct semaphore data_list_sem; + struct list_head data_list; + /* pmem_sem protects the bitmap array + * a write lock should be held when modifying entries in bitmap + * a read lock should be held when reading data from bits or + * dereferencing a pointer into bitmap + * + * pmem_data->sem protects the pmem data of a particular file + * Many of the function that require the pmem_data->sem have a non- + * locking version for when the caller is already holding that sem. + * + * IF YOU TAKE BOTH LOCKS TAKE THEM IN THIS ORDER: + * down(pmem_data->sem) => down(bitmap_sem) + */ + struct rw_semaphore bitmap_sem; + + long (*ioctl)(struct file *, unsigned int, unsigned long); + int (*release)(struct inode *, struct file *); +}; + +static struct pmem_info pmem[PMEM_MAX_DEVICES]; +static int id_count; + +#define PMEM_IS_FREE(id, index) !(pmem[id].bitmap[index].allocated) +#define PMEM_ORDER(id, index) pmem[id].bitmap[index].order +#define PMEM_BUDDY_INDEX(id, index) (index ^ (1 << PMEM_ORDER(id, index))) +#define PMEM_NEXT_INDEX(id, index) (index + (1 << PMEM_ORDER(id, index))) +#define PMEM_OFFSET(index) (index * PMEM_MIN_ALLOC) +#define PMEM_START_ADDR(id, index) (PMEM_OFFSET(index) + pmem[id].base) +#define PMEM_LEN(id, index) ((1 << PMEM_ORDER(id, index)) * PMEM_MIN_ALLOC) +#define PMEM_END_ADDR(id, index) (PMEM_START_ADDR(id, index) + \ + PMEM_LEN(id, index)) +#define PMEM_START_VADDR(id, index) (PMEM_OFFSET(id, index) + pmem[id].vbase) +#define PMEM_END_VADDR(id, index) (PMEM_START_VADDR(id, index) + \ + PMEM_LEN(id, index)) +#define PMEM_REVOKED(data) (data->flags & PMEM_FLAGS_REVOKED) +#define PMEM_IS_PAGE_ALIGNED(addr) (!((addr) & (~PAGE_MASK))) +#define PMEM_IS_SUBMAP(data) ((data->flags & PMEM_FLAGS_SUBMAP) && \ + (!(data->flags & PMEM_FLAGS_UNSUBMAP))) + +static int pmem_release(struct inode *, struct file *); +static int pmem_mmap(struct file *, struct vm_area_struct *); +static int pmem_open(struct inode *, struct file *); +static long pmem_ioctl(struct file *, unsigned int, unsigned long); + +struct file_operations pmem_fops = { + .release = pmem_release, + .mmap = pmem_mmap, + .open = pmem_open, + .unlocked_ioctl = pmem_ioctl, +}; + +static int get_id(struct file *file) +{ + return MINOR(file->f_dentry->d_inode->i_rdev); +} + +int is_pmem_file(struct file *file) +{ + int id; + + if (unlikely(!file || !file->f_dentry || !file->f_dentry->d_inode)) + return 0; + id = get_id(file); + if (unlikely(id >= PMEM_MAX_DEVICES)) + return 0; + if (unlikely(file->f_dentry->d_inode->i_rdev != + MKDEV(MISC_MAJOR, pmem[id].dev.minor))) + return 0; + return 1; +} + +static int has_allocation(struct file *file) +{ + struct pmem_data *data; + /* check is_pmem_file first if not accessed via pmem_file_ops */ + + if (unlikely(!file->private_data)) + return 0; + data = (struct pmem_data *)file->private_data; + if (unlikely(data->index < 0)) + return 0; + return 1; +} + +static int is_master_owner(struct file *file) +{ + struct file *master_file; + struct pmem_data *data; + int put_needed, ret = 0; + + if (!is_pmem_file(file) || !has_allocation(file)) + return 0; + data = (struct pmem_data *)file->private_data; + if (PMEM_FLAGS_MASTERMAP & data->flags) + return 1; + master_file = fget_light(data->master_fd, &put_needed); + if (master_file && data->master_file == master_file) + ret = 1; + fput_light(master_file, put_needed); + return ret; +} + +static int pmem_free(int id, int index) +{ + /* caller should hold the write lock on pmem_sem! */ + int buddy, curr = index; + DLOG("index %d\n", index); + + if (pmem[id].no_allocator) { + pmem[id].allocated = 0; + return 0; + } + /* clean up the bitmap, merging any buddies */ + pmem[id].bitmap[curr].allocated = 0; + /* find a slots buddy Buddy# = Slot# ^ (1 << order) + * if the buddy is also free merge them + * repeat until the buddy is not free or end of the bitmap is reached + */ + do { + buddy = PMEM_BUDDY_INDEX(id, curr); + if (PMEM_IS_FREE(id, buddy) && + PMEM_ORDER(id, buddy) == PMEM_ORDER(id, curr)) { + PMEM_ORDER(id, buddy)++; + PMEM_ORDER(id, curr)++; + curr = min(buddy, curr); + } else { + break; + } + } while (curr < pmem[id].num_entries); + + return 0; +} + +static void pmem_revoke(struct file *file, struct pmem_data *data); + +static int pmem_release(struct inode *inode, struct file *file) +{ + struct pmem_data *data = (struct pmem_data *)file->private_data; + struct pmem_region_node *region_node; + struct list_head *elt, *elt2; + int id = get_id(file), ret = 0; + + + down(&pmem[id].data_list_sem); + /* if this file is a master, revoke all the memory in the connected + * files */ + if (PMEM_FLAGS_MASTERMAP & data->flags) { + struct pmem_data *sub_data; + list_for_each(elt, &pmem[id].data_list) { + sub_data = list_entry(elt, struct pmem_data, list); + down_read(&sub_data->sem); + if (PMEM_IS_SUBMAP(sub_data) && + file == sub_data->master_file) { + up_read(&sub_data->sem); + pmem_revoke(file, sub_data); + } else + up_read(&sub_data->sem); + } + } + list_del(&data->list); + up(&pmem[id].data_list_sem); + + + down_write(&data->sem); + + /* if its not a conencted file and it has an allocation, free it */ + if (!(PMEM_FLAGS_CONNECTED & data->flags) && has_allocation(file)) { + down_write(&pmem[id].bitmap_sem); + ret = pmem_free(id, data->index); + up_write(&pmem[id].bitmap_sem); + } + + /* if this file is a submap (mapped, connected file), downref the + * task struct */ + if (PMEM_FLAGS_SUBMAP & data->flags) + if (data->task) { + put_task_struct(data->task); + data->task = NULL; + } + + file->private_data = NULL; + + list_for_each_safe(elt, elt2, &data->region_list) { + region_node = list_entry(elt, struct pmem_region_node, list); + list_del(elt); + kfree(region_node); + } + BUG_ON(!list_empty(&data->region_list)); + + up_write(&data->sem); + kfree(data); + if (pmem[id].release) + ret = pmem[id].release(inode, file); + + return ret; +} + +static int pmem_open(struct inode *inode, struct file *file) +{ + struct pmem_data *data; + int id = get_id(file); + int ret = 0; + + DLOG("current %u file %p(%d)\n", current->pid, file, file_count(file)); + /* setup file->private_data to indicate its unmapped */ + /* you can only open a pmem device one time */ + if (file->private_data != NULL) + return -1; + data = kmalloc(sizeof(struct pmem_data), GFP_KERNEL); + if (!data) { + printk("pmem: unable to allocate memory for pmem metadata."); + return -1; + } + data->flags = 0; + data->index = -1; + data->task = NULL; + data->vma = NULL; + data->pid = 0; + data->master_file = NULL; +#if PMEM_DEBUG + data->ref = 0; +#endif + INIT_LIST_HEAD(&data->region_list); + init_rwsem(&data->sem); + + file->private_data = data; + INIT_LIST_HEAD(&data->list); + + down(&pmem[id].data_list_sem); + list_add(&data->list, &pmem[id].data_list); + up(&pmem[id].data_list_sem); + return ret; +} + +static unsigned long pmem_order(unsigned long len) +{ + int i; + + len = (len + PMEM_MIN_ALLOC - 1)/PMEM_MIN_ALLOC; + len--; + for (i = 0; i < sizeof(len)*8; i++) + if (len >> i == 0) + break; + return i; +} + +static int pmem_allocate(int id, unsigned long len) +{ + /* caller should hold the write lock on pmem_sem! */ + /* return the corresponding pdata[] entry */ + int curr = 0; + int end = pmem[id].num_entries; + int best_fit = -1; + unsigned long order = pmem_order(len); + + if (pmem[id].no_allocator) { + DLOG("no allocator"); + if ((len > pmem[id].size) || pmem[id].allocated) + return -1; + pmem[id].allocated = 1; + return len; + } + + if (order > PMEM_MAX_ORDER) + return -1; + DLOG("order %lx\n", order); + + /* look through the bitmap: + * if you find a free slot of the correct order use it + * otherwise, use the best fit (smallest with size > order) slot + */ + while (curr < end) { + if (PMEM_IS_FREE(id, curr)) { + if (PMEM_ORDER(id, curr) == (unsigned char)order) { + /* set the not free bit and clear others */ + best_fit = curr; + break; + } + if (PMEM_ORDER(id, curr) > (unsigned char)order && + (best_fit < 0 || + PMEM_ORDER(id, curr) < PMEM_ORDER(id, best_fit))) + best_fit = curr; + } + curr = PMEM_NEXT_INDEX(id, curr); + } + + /* if best_fit < 0, there are no suitable slots, + * return an error + */ + if (best_fit < 0) { + printk("pmem: no space left to allocate!\n"); + return -1; + } + + /* now partition the best fit: + * split the slot into 2 buddies of order - 1 + * repeat until the slot is of the correct order + */ + while (PMEM_ORDER(id, best_fit) > (unsigned char)order) { + int buddy; + PMEM_ORDER(id, best_fit) -= 1; + buddy = PMEM_BUDDY_INDEX(id, best_fit); + PMEM_ORDER(id, buddy) = PMEM_ORDER(id, best_fit); + } + pmem[id].bitmap[best_fit].allocated = 1; + return best_fit; +} + +static pgprot_t phys_mem_access_prot(struct file *file, pgprot_t vma_prot) +{ + int id = get_id(file); +#ifdef pgprot_noncached + if (pmem[id].cached == 0 || file->f_flags & O_SYNC) + return pgprot_noncached(vma_prot); +#endif +#ifdef pgprot_ext_buffered + else if (pmem[id].buffered) + return pgprot_ext_buffered(vma_prot); +#endif + return vma_prot; +} + +static unsigned long pmem_start_addr(int id, struct pmem_data *data) +{ + if (pmem[id].no_allocator) + return PMEM_START_ADDR(id, 0); + else + return PMEM_START_ADDR(id, data->index); + +} + +static void *pmem_start_vaddr(int id, struct pmem_data *data) +{ + return pmem_start_addr(id, data) - pmem[id].base + pmem[id].vbase; +} + +static unsigned long pmem_len(int id, struct pmem_data *data) +{ + if (pmem[id].no_allocator) + return data->index; + else + return PMEM_LEN(id, data->index); +} + +static int pmem_map_garbage(int id, struct vm_area_struct *vma, + struct pmem_data *data, unsigned long offset, + unsigned long len) +{ + int i, garbage_pages = len >> PAGE_SHIFT; + + vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP | VM_SHARED | VM_WRITE; + for (i = 0; i < garbage_pages; i++) { + if (vm_insert_pfn(vma, vma->vm_start + offset + (i * PAGE_SIZE), + pmem[id].garbage_pfn)) + return -EAGAIN; + } + return 0; +} + +static int pmem_unmap_pfn_range(int id, struct vm_area_struct *vma, + struct pmem_data *data, unsigned long offset, + unsigned long len) +{ + int garbage_pages; + DLOG("unmap offset %lx len %lx\n", offset, len); + + BUG_ON(!PMEM_IS_PAGE_ALIGNED(len)); + + garbage_pages = len >> PAGE_SHIFT; + zap_page_range(vma, vma->vm_start + offset, len, NULL); + pmem_map_garbage(id, vma, data, offset, len); + return 0; +} + +static int pmem_map_pfn_range(int id, struct vm_area_struct *vma, + struct pmem_data *data, unsigned long offset, + unsigned long len) +{ + DLOG("map offset %lx len %lx\n", offset, len); + BUG_ON(!PMEM_IS_PAGE_ALIGNED(vma->vm_start)); + BUG_ON(!PMEM_IS_PAGE_ALIGNED(vma->vm_end)); + BUG_ON(!PMEM_IS_PAGE_ALIGNED(len)); + BUG_ON(!PMEM_IS_PAGE_ALIGNED(offset)); + + if (io_remap_pfn_range(vma, vma->vm_start + offset, + (pmem_start_addr(id, data) + offset) >> PAGE_SHIFT, + len, vma->vm_page_prot)) { + return -EAGAIN; + } + return 0; +} + +static int pmem_remap_pfn_range(int id, struct vm_area_struct *vma, + struct pmem_data *data, unsigned long offset, + unsigned long len) +{ + /* hold the mm semp for the vma you are modifying when you call this */ + BUG_ON(!vma); + zap_page_range(vma, vma->vm_start + offset, len, NULL); + return pmem_map_pfn_range(id, vma, data, offset, len); +} + +static void pmem_vma_open(struct vm_area_struct *vma) +{ + struct file *file = vma->vm_file; + struct pmem_data *data = file->private_data; + int id = get_id(file); + /* this should never be called as we don't support copying pmem + * ranges via fork */ + BUG_ON(!has_allocation(file)); + down_write(&data->sem); + /* remap the garbage pages, forkers don't get access to the data */ + pmem_unmap_pfn_range(id, vma, data, 0, vma->vm_start - vma->vm_end); + up_write(&data->sem); +} + +static void pmem_vma_close(struct vm_area_struct *vma) +{ + struct file *file = vma->vm_file; + struct pmem_data *data = file->private_data; + + DLOG("current %u ppid %u file %p count %d\n", current->pid, + current->parent->pid, file, file_count(file)); + if (unlikely(!is_pmem_file(file) || !has_allocation(file))) { + printk(KERN_WARNING "pmem: something is very wrong, you are " + "closing a vm backing an allocation that doesn't " + "exist!\n"); + return; + } + down_write(&data->sem); + if (data->vma == vma) { + data->vma = NULL; + if ((data->flags & PMEM_FLAGS_CONNECTED) && + (data->flags & PMEM_FLAGS_SUBMAP)) + data->flags |= PMEM_FLAGS_UNSUBMAP; + } + /* the kernel is going to free this vma now anyway */ + up_write(&data->sem); +} + +static struct vm_operations_struct vm_ops = { + .open = pmem_vma_open, + .close = pmem_vma_close, +}; + +static int pmem_mmap(struct file *file, struct vm_area_struct *vma) +{ + struct pmem_data *data; + int index; + unsigned long vma_size = vma->vm_end - vma->vm_start; + int ret = 0, id = get_id(file); + + if (vma->vm_pgoff || !PMEM_IS_PAGE_ALIGNED(vma_size)) { +#if PMEM_DEBUG + printk(KERN_ERR "pmem: mmaps must be at offset zero, aligned" + " and a multiple of pages_size.\n"); +#endif + return -EINVAL; + } + + data = (struct pmem_data *)file->private_data; + down_write(&data->sem); + /* check this file isn't already mmaped, for submaps check this file + * has never been mmaped */ + if ((data->flags & PMEM_FLAGS_SUBMAP) || + (data->flags & PMEM_FLAGS_UNSUBMAP)) { +#if PMEM_DEBUG + printk(KERN_ERR "pmem: you can only mmap a pmem file once, " + "this file is already mmaped. %x\n", data->flags); +#endif + ret = -EINVAL; + goto error; + } + /* if file->private_data == unalloced, alloc*/ + if (data && data->index == -1) { + down_write(&pmem[id].bitmap_sem); + index = pmem_allocate(id, vma->vm_end - vma->vm_start); + up_write(&pmem[id].bitmap_sem); + data->index = index; + } + /* either no space was available or an error occured */ + if (!has_allocation(file)) { + ret = -EINVAL; + printk("pmem: could not find allocation for map.\n"); + goto error; + } + + if (pmem_len(id, data) < vma_size) { +#if PMEM_DEBUG + printk(KERN_WARNING "pmem: mmap size [%lu] does not match" + "size of backing region [%lu].\n", vma_size, + pmem_len(id, data)); +#endif + ret = -EINVAL; + goto error; + } + + vma->vm_pgoff = pmem_start_addr(id, data) >> PAGE_SHIFT; + vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_page_prot); + + if (data->flags & PMEM_FLAGS_CONNECTED) { + struct pmem_region_node *region_node; + struct list_head *elt; + if (pmem_map_garbage(id, vma, data, 0, vma_size)) { + printk("pmem: mmap failed in kernel!\n"); + ret = -EAGAIN; + goto error; + } + list_for_each(elt, &data->region_list) { + region_node = list_entry(elt, struct pmem_region_node, + list); + DLOG("remapping file: %p %lx %lx\n", file, + region_node->region.offset, + region_node->region.len); + if (pmem_remap_pfn_range(id, vma, data, + region_node->region.offset, + region_node->region.len)) { + ret = -EAGAIN; + goto error; + } + } + data->flags |= PMEM_FLAGS_SUBMAP; + get_task_struct(current->group_leader); + data->task = current->group_leader; + data->vma = vma; +#if PMEM_DEBUG + data->pid = current->pid; +#endif + DLOG("submmapped file %p vma %p pid %u\n", file, vma, + current->pid); + } else { + if (pmem_map_pfn_range(id, vma, data, 0, vma_size)) { + printk(KERN_INFO "pmem: mmap failed in kernel!\n"); + ret = -EAGAIN; + goto error; + } + data->flags |= PMEM_FLAGS_MASTERMAP; + data->pid = current->pid; + } + vma->vm_ops = &vm_ops; +error: + up_write(&data->sem); + return ret; +} + +/* the following are the api for accessing pmem regions by other drivers + * from inside the kernel */ +int get_pmem_user_addr(struct file *file, unsigned long *start, + unsigned long *len) +{ + struct pmem_data *data; + if (!is_pmem_file(file) || !has_allocation(file)) { +#if PMEM_DEBUG + printk(KERN_INFO "pmem: requested pmem data from invalid" + "file.\n"); +#endif + return -1; + } + data = (struct pmem_data *)file->private_data; + down_read(&data->sem); + if (data->vma) { + *start = data->vma->vm_start; + *len = data->vma->vm_end - data->vma->vm_start; + } else { + *start = 0; + *len = 0; + } + up_read(&data->sem); + return 0; +} + +int get_pmem_addr(struct file *file, unsigned long *start, + unsigned long *vstart, unsigned long *len) +{ + struct pmem_data *data; + int id; + + if (!is_pmem_file(file) || !has_allocation(file)) { + return -1; + } + + data = (struct pmem_data *)file->private_data; + if (data->index == -1) { +#if PMEM_DEBUG + printk(KERN_INFO "pmem: requested pmem data from file with no " + "allocation.\n"); + return -1; +#endif + } + id = get_id(file); + + down_read(&data->sem); + *start = pmem_start_addr(id, data); + *len = pmem_len(id, data); + *vstart = (unsigned long)pmem_start_vaddr(id, data); + up_read(&data->sem); +#if PMEM_DEBUG + down_write(&data->sem); + data->ref++; + up_write(&data->sem); +#endif + return 0; +} + +int get_pmem_file(int fd, unsigned long *start, unsigned long *vstart, + unsigned long *len, struct file **filp) +{ + struct file *file; + + file = fget(fd); + if (unlikely(file == NULL)) { + printk(KERN_INFO "pmem: requested data from file descriptor " + "that doesn't exist."); + return -1; + } + + if (get_pmem_addr(file, start, vstart, len)) + goto end; + + if (filp) + *filp = file; + return 0; +end: + fput(file); + return -1; +} + +void put_pmem_file(struct file *file) +{ + struct pmem_data *data; + int id; + + if (!is_pmem_file(file)) + return; + id = get_id(file); + data = (struct pmem_data *)file->private_data; +#if PMEM_DEBUG + down_write(&data->sem); + if (data->ref == 0) { + printk("pmem: pmem_put > pmem_get %s (pid %d)\n", + pmem[id].dev.name, data->pid); + BUG(); + } + data->ref--; + up_write(&data->sem); +#endif + fput(file); +} + +void flush_pmem_file(struct file *file, unsigned long offset, unsigned long len) +{ + struct pmem_data *data; + int id; + void *vaddr; + struct pmem_region_node *region_node; + struct list_head *elt; + void *flush_start, *flush_end; + + if (!is_pmem_file(file) || !has_allocation(file)) { + return; + } + + id = get_id(file); + data = (struct pmem_data *)file->private_data; + if (!pmem[id].cached || file->f_flags & O_SYNC) + return; + + down_read(&data->sem); + vaddr = pmem_start_vaddr(id, data); + /* if this isn't a submmapped file, flush the whole thing */ + if (unlikely(!(data->flags & PMEM_FLAGS_CONNECTED))) { + dmac_flush_range(vaddr, vaddr + pmem_len(id, data)); + goto end; + } + /* otherwise, flush the region of the file we are drawing */ + list_for_each(elt, &data->region_list) { + region_node = list_entry(elt, struct pmem_region_node, list); + if ((offset >= region_node->region.offset) && + ((offset + len) <= (region_node->region.offset + + region_node->region.len))) { + flush_start = vaddr + region_node->region.offset; + flush_end = flush_start + region_node->region.len; + dmac_flush_range(flush_start, flush_end); + break; + } + } +end: + up_read(&data->sem); +} + +static int pmem_connect(unsigned long connect, struct file *file) +{ + struct pmem_data *data = (struct pmem_data *)file->private_data; + struct pmem_data *src_data; + struct file *src_file; + int ret = 0, put_needed; + + down_write(&data->sem); + /* retrieve the src file and check it is a pmem file with an alloc */ + src_file = fget_light(connect, &put_needed); + DLOG("connect %p to %p\n", file, src_file); + if (!src_file) { + printk("pmem: src file not found!\n"); + ret = -EINVAL; + goto err_no_file; + } + if (unlikely(!is_pmem_file(src_file) || !has_allocation(src_file))) { + printk(KERN_INFO "pmem: src file is not a pmem file or has no " + "alloc!\n"); + ret = -EINVAL; + goto err_bad_file; + } + src_data = (struct pmem_data *)src_file->private_data; + + if (has_allocation(file) && (data->index != src_data->index)) { + printk("pmem: file is already mapped but doesn't match this" + " src_file!\n"); + ret = -EINVAL; + goto err_bad_file; + } + data->index = src_data->index; + data->flags |= PMEM_FLAGS_CONNECTED; + data->master_fd = connect; + data->master_file = src_file; + +err_bad_file: + fput_light(src_file, put_needed); +err_no_file: + up_write(&data->sem); + return ret; +} + +static void pmem_unlock_data_and_mm(struct pmem_data *data, + struct mm_struct *mm) +{ + up_write(&data->sem); + if (mm != NULL) { + up_write(&mm->mmap_sem); + mmput(mm); + } +} + +static int pmem_lock_data_and_mm(struct file *file, struct pmem_data *data, + struct mm_struct **locked_mm) +{ + int ret = 0; + struct mm_struct *mm = NULL; + *locked_mm = NULL; +lock_mm: + down_read(&data->sem); + if (PMEM_IS_SUBMAP(data)) { + mm = get_task_mm(data->task); + if (!mm) { +#if PMEM_DEBUG + printk("pmem: can't remap task is gone!\n"); +#endif + up_read(&data->sem); + return -1; + } + } + up_read(&data->sem); + + if (mm) + down_write(&mm->mmap_sem); + + down_write(&data->sem); + /* check that the file didn't get mmaped before we could take the + * data sem, this should be safe b/c you can only submap each file + * once */ + if (PMEM_IS_SUBMAP(data) && !mm) { + pmem_unlock_data_and_mm(data, mm); + up_write(&data->sem); + goto lock_mm; + } + /* now check that vma.mm is still there, it could have been + * deleted by vma_close before we could get the data->sem */ + if ((data->flags & PMEM_FLAGS_UNSUBMAP) && (mm != NULL)) { + /* might as well release this */ + if (data->flags & PMEM_FLAGS_SUBMAP) { + put_task_struct(data->task); + data->task = NULL; + /* lower the submap flag to show the mm is gone */ + data->flags &= ~(PMEM_FLAGS_SUBMAP); + } + pmem_unlock_data_and_mm(data, mm); + return -1; + } + *locked_mm = mm; + return ret; +} + +int pmem_remap(struct pmem_region *region, struct file *file, + unsigned operation) +{ + int ret; + struct pmem_region_node *region_node; + struct mm_struct *mm = NULL; + struct list_head *elt, *elt2; + int id = get_id(file); + struct pmem_data *data = (struct pmem_data *)file->private_data; + + /* pmem region must be aligned on a page boundry */ + if (unlikely(!PMEM_IS_PAGE_ALIGNED(region->offset) || + !PMEM_IS_PAGE_ALIGNED(region->len))) { +#if PMEM_DEBUG + printk("pmem: request for unaligned pmem suballocation " + "%lx %lx\n", region->offset, region->len); +#endif + return -EINVAL; + } + + /* if userspace requests a region of len 0, there's nothing to do */ + if (region->len == 0) + return 0; + + /* lock the mm and data */ + ret = pmem_lock_data_and_mm(file, data, &mm); + if (ret) + return 0; + + /* only the owner of the master file can remap the client fds + * that back in it */ + if (!is_master_owner(file)) { +#if PMEM_DEBUG + printk("pmem: remap requested from non-master process\n"); +#endif + ret = -EINVAL; + goto err; + } + + /* check that the requested range is within the src allocation */ + if (unlikely((region->offset > pmem_len(id, data)) || + (region->len > pmem_len(id, data)) || + (region->offset + region->len > pmem_len(id, data)))) { +#if PMEM_DEBUG + printk(KERN_INFO "pmem: suballoc doesn't fit in src_file!\n"); +#endif + ret = -EINVAL; + goto err; + } + + if (operation == PMEM_MAP) { + region_node = kmalloc(sizeof(struct pmem_region_node), + GFP_KERNEL); + if (!region_node) { + ret = -ENOMEM; +#if PMEM_DEBUG + printk(KERN_INFO "No space to allocate metadata!"); +#endif + goto err; + } + region_node->region = *region; + list_add(®ion_node->list, &data->region_list); + } else if (operation == PMEM_UNMAP) { + int found = 0; + list_for_each_safe(elt, elt2, &data->region_list) { + region_node = list_entry(elt, struct pmem_region_node, + list); + if (region->len == 0 || + (region_node->region.offset == region->offset && + region_node->region.len == region->len)) { + list_del(elt); + kfree(region_node); + found = 1; + } + } + if (!found) { +#if PMEM_DEBUG + printk("pmem: Unmap region does not map any mapped " + "region!"); +#endif + ret = -EINVAL; + goto err; + } + } + + if (data->vma && PMEM_IS_SUBMAP(data)) { + if (operation == PMEM_MAP) + ret = pmem_remap_pfn_range(id, data->vma, data, + region->offset, region->len); + else if (operation == PMEM_UNMAP) + ret = pmem_unmap_pfn_range(id, data->vma, data, + region->offset, region->len); + } + +err: + pmem_unlock_data_and_mm(data, mm); + return ret; +} + +static void pmem_revoke(struct file *file, struct pmem_data *data) +{ + struct pmem_region_node *region_node; + struct list_head *elt, *elt2; + struct mm_struct *mm = NULL; + int id = get_id(file); + int ret = 0; + + data->master_file = NULL; + ret = pmem_lock_data_and_mm(file, data, &mm); + /* if lock_data_and_mm fails either the task that mapped the fd, or + * the vma that mapped it have already gone away, nothing more + * needs to be done */ + if (ret) + return; + /* unmap everything */ + /* delete the regions and region list nothing is mapped any more */ + if (data->vma) + list_for_each_safe(elt, elt2, &data->region_list) { + region_node = list_entry(elt, struct pmem_region_node, + list); + pmem_unmap_pfn_range(id, data->vma, data, + region_node->region.offset, + region_node->region.len); + list_del(elt); + kfree(region_node); + } + /* delete the master file */ + pmem_unlock_data_and_mm(data, mm); +} + +static void pmem_get_size(struct pmem_region *region, struct file *file) +{ + struct pmem_data *data = (struct pmem_data *)file->private_data; + int id = get_id(file); + + if (!has_allocation(file)) { + region->offset = 0; + region->len = 0; + return; + } else { + region->offset = pmem_start_addr(id, data); + region->len = pmem_len(id, data); + } + DLOG("offset %lx len %lx\n", region->offset, region->len); +} + + +static long pmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct pmem_data *data; + int id = get_id(file); + + switch (cmd) { + case PMEM_GET_PHYS: + { + struct pmem_region region; + DLOG("get_phys\n"); + if (!has_allocation(file)) { + region.offset = 0; + region.len = 0; + } else { + data = (struct pmem_data *)file->private_data; + region.offset = pmem_start_addr(id, data); + region.len = pmem_len(id, data); + } + printk(KERN_INFO "pmem: request for physical address of pmem region " + "from process %d.\n", current->pid); + if (copy_to_user((void __user *)arg, ®ion, + sizeof(struct pmem_region))) + return -EFAULT; + break; + } + case PMEM_MAP: + { + struct pmem_region region; + if (copy_from_user(®ion, (void __user *)arg, + sizeof(struct pmem_region))) + return -EFAULT; + data = (struct pmem_data *)file->private_data; + return pmem_remap(®ion, file, PMEM_MAP); + } + break; + case PMEM_UNMAP: + { + struct pmem_region region; + if (copy_from_user(®ion, (void __user *)arg, + sizeof(struct pmem_region))) + return -EFAULT; + data = (struct pmem_data *)file->private_data; + return pmem_remap(®ion, file, PMEM_UNMAP); + break; + } + case PMEM_GET_SIZE: + { + struct pmem_region region; + DLOG("get_size\n"); + pmem_get_size(®ion, file); + if (copy_to_user((void __user *)arg, ®ion, + sizeof(struct pmem_region))) + return -EFAULT; + break; + } + case PMEM_GET_TOTAL_SIZE: + { + struct pmem_region region; + DLOG("get total size\n"); + region.offset = 0; + get_id(file); + region.len = pmem[id].size; + if (copy_to_user((void __user *)arg, ®ion, + sizeof(struct pmem_region))) + return -EFAULT; + break; + } + case PMEM_ALLOCATE: + { + if (has_allocation(file)) + return -EINVAL; + data = (struct pmem_data *)file->private_data; + data->index = pmem_allocate(id, arg); + break; + } + case PMEM_CONNECT: + DLOG("connect\n"); + return pmem_connect(arg, file); + break; + case PMEM_CACHE_FLUSH: + { + struct pmem_region region; + DLOG("flush\n"); + if (copy_from_user(®ion, (void __user *)arg, + sizeof(struct pmem_region))) + return -EFAULT; + flush_pmem_file(file, region.offset, region.len); + break; + } + default: + if (pmem[id].ioctl) + return pmem[id].ioctl(file, cmd, arg); + return -EINVAL; + } + return 0; +} + +#if PMEM_DEBUG +static ssize_t debug_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +static ssize_t debug_read(struct file *file, char __user *buf, size_t count, + loff_t *ppos) +{ + struct list_head *elt, *elt2; + struct pmem_data *data; + struct pmem_region_node *region_node; + int id = (int)file->private_data; + const int debug_bufmax = 4096; + static char buffer[4096]; + int n = 0; + + DLOG("debug open\n"); + n = scnprintf(buffer, debug_bufmax, + "pid #: mapped regions (offset, len) (offset,len)...\n"); + + down(&pmem[id].data_list_sem); + list_for_each(elt, &pmem[id].data_list) { + data = list_entry(elt, struct pmem_data, list); + down_read(&data->sem); + n += scnprintf(buffer + n, debug_bufmax - n, "pid %u:", + data->pid); + list_for_each(elt2, &data->region_list) { + region_node = list_entry(elt2, struct pmem_region_node, + list); + n += scnprintf(buffer + n, debug_bufmax - n, + "(%lx,%lx) ", + region_node->region.offset, + region_node->region.len); + } + n += scnprintf(buffer + n, debug_bufmax - n, "\n"); + up_read(&data->sem); + } + up(&pmem[id].data_list_sem); + + n++; + buffer[n] = 0; + return simple_read_from_buffer(buf, count, ppos, buffer, n); +} + +static struct file_operations debug_fops = { + .read = debug_read, + .open = debug_open, +}; +#endif + +#if 0 +static struct miscdevice pmem_dev = { + .name = "pmem", + .fops = &pmem_fops, +}; +#endif + +int pmem_setup(struct android_pmem_platform_data *pdata, + long (*ioctl)(struct file *, unsigned int, unsigned long), + int (*release)(struct inode *, struct file *)) +{ + int err = 0; + int i, index = 0; + int id = id_count; + id_count++; + + pmem[id].no_allocator = pdata->no_allocator; + pmem[id].cached = pdata->cached; + pmem[id].buffered = pdata->buffered; + pmem[id].base = pdata->start; + pmem[id].size = pdata->size; + pmem[id].ioctl = ioctl; + pmem[id].release = release; + init_rwsem(&pmem[id].bitmap_sem); + init_MUTEX(&pmem[id].data_list_sem); + INIT_LIST_HEAD(&pmem[id].data_list); + pmem[id].dev.name = pdata->name; + pmem[id].dev.minor = id; + pmem[id].dev.fops = &pmem_fops; + printk(KERN_INFO "%s: %d init\n", pdata->name, pdata->cached); + + err = misc_register(&pmem[id].dev); + if (err) { + printk(KERN_ALERT "Unable to register pmem driver!\n"); + goto err_cant_register_device; + } + pmem[id].num_entries = pmem[id].size / PMEM_MIN_ALLOC; + + pmem[id].bitmap = kmalloc(pmem[id].num_entries * + sizeof(struct pmem_bits), GFP_KERNEL); + if (!pmem[id].bitmap) + goto err_no_mem_for_metadata; + + memset(pmem[id].bitmap, 0, sizeof(struct pmem_bits) * + pmem[id].num_entries); + + for (i = sizeof(pmem[id].num_entries) * 8 - 1; i >= 0; i--) { + if ((pmem[id].num_entries) & 1<<i) { + PMEM_ORDER(id, index) = i; + index = PMEM_NEXT_INDEX(id, index); + } + } + + if (pmem[id].cached) + pmem[id].vbase = ioremap_cached(pmem[id].base, + pmem[id].size); +#ifdef ioremap_ext_buffered + else if (pmem[id].buffered) + pmem[id].vbase = ioremap_ext_buffered(pmem[id].base, + pmem[id].size); +#endif + else + pmem[id].vbase = ioremap(pmem[id].base, pmem[id].size); + + if (pmem[id].vbase == 0) + goto error_cant_remap; + + pmem[id].garbage_pfn = page_to_pfn(alloc_page(GFP_KERNEL)); + if (pmem[id].no_allocator) + pmem[id].allocated = 0; + +#if PMEM_DEBUG + debugfs_create_file(pdata->name, S_IFREG | S_IRUGO, NULL, (void *)id, + &debug_fops); +#endif + return 0; +error_cant_remap: + kfree(pmem[id].bitmap); +err_no_mem_for_metadata: + misc_deregister(&pmem[id].dev); +err_cant_register_device: + return -1; +} + +static int pmem_probe(struct platform_device *pdev) +{ + struct android_pmem_platform_data *pdata; + + if (!pdev || !pdev->dev.platform_data) { + printk(KERN_ALERT "Unable to probe pmem!\n"); + return -1; + } + pdata = pdev->dev.platform_data; + return pmem_setup(pdata, NULL, NULL); +} + + +static int pmem_remove(struct platform_device *pdev) +{ + int id = pdev->id; + __free_page(pfn_to_page(pmem[id].garbage_pfn)); + misc_deregister(&pmem[id].dev); + return 0; +} + +static struct platform_driver pmem_driver = { + .probe = pmem_probe, + .remove = pmem_remove, + .driver = { .name = "android_pmem" } +}; + + +static int __init pmem_init(void) +{ + return platform_driver_register(&pmem_driver); +} + +static void __exit pmem_exit(void) +{ + platform_driver_unregister(&pmem_driver); +} + +module_init(pmem_init); +module_exit(pmem_exit); + diff --git a/drivers/misc/uid_stat.c b/drivers/misc/uid_stat.c new file mode 100644 index 000000000000..2141124a6c12 --- /dev/null +++ b/drivers/misc/uid_stat.c @@ -0,0 +1,156 @@ +/* drivers/misc/uid_stat.c + * + * Copyright (C) 2008 - 2009 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <asm/atomic.h> + +#include <linux/err.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/proc_fs.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/stat.h> +#include <linux/uid_stat.h> +#include <net/activity_stats.h> + +static DEFINE_SPINLOCK(uid_lock); +static LIST_HEAD(uid_list); +static struct proc_dir_entry *parent; + +struct uid_stat { + struct list_head link; + uid_t uid; + atomic_t tcp_rcv; + atomic_t tcp_snd; +}; + +static struct uid_stat *find_uid_stat(uid_t uid) { + unsigned long flags; + struct uid_stat *entry; + + spin_lock_irqsave(&uid_lock, flags); + list_for_each_entry(entry, &uid_list, link) { + if (entry->uid == uid) { + spin_unlock_irqrestore(&uid_lock, flags); + return entry; + } + } + spin_unlock_irqrestore(&uid_lock, flags); + return NULL; +} + +static int tcp_snd_read_proc(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + int len; + unsigned int bytes; + char *p = page; + struct uid_stat *uid_entry = (struct uid_stat *) data; + if (!data) + return 0; + + bytes = (unsigned int) (atomic_read(&uid_entry->tcp_snd) + INT_MIN); + p += sprintf(p, "%u\n", bytes); + len = (p - page) - off; + *eof = (len <= count) ? 1 : 0; + *start = page + off; + return len; +} + +static int tcp_rcv_read_proc(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + int len; + unsigned int bytes; + char *p = page; + struct uid_stat *uid_entry = (struct uid_stat *) data; + if (!data) + return 0; + + bytes = (unsigned int) (atomic_read(&uid_entry->tcp_rcv) + INT_MIN); + p += sprintf(p, "%u\n", bytes); + len = (p - page) - off; + *eof = (len <= count) ? 1 : 0; + *start = page + off; + return len; +} + +/* Create a new entry for tracking the specified uid. */ +static struct uid_stat *create_stat(uid_t uid) { + unsigned long flags; + char uid_s[32]; + struct uid_stat *new_uid; + struct proc_dir_entry *entry; + + /* Create the uid stat struct and append it to the list. */ + if ((new_uid = kmalloc(sizeof(struct uid_stat), GFP_KERNEL)) == NULL) + return NULL; + + new_uid->uid = uid; + /* Counters start at INT_MIN, so we can track 4GB of network traffic. */ + atomic_set(&new_uid->tcp_rcv, INT_MIN); + atomic_set(&new_uid->tcp_snd, INT_MIN); + + spin_lock_irqsave(&uid_lock, flags); + list_add_tail(&new_uid->link, &uid_list); + spin_unlock_irqrestore(&uid_lock, flags); + + sprintf(uid_s, "%d", uid); + entry = proc_mkdir(uid_s, parent); + + /* Keep reference to uid_stat so we know what uid to read stats from. */ + create_proc_read_entry("tcp_snd", S_IRUGO, entry , tcp_snd_read_proc, + (void *) new_uid); + + create_proc_read_entry("tcp_rcv", S_IRUGO, entry, tcp_rcv_read_proc, + (void *) new_uid); + + return new_uid; +} + +int uid_stat_tcp_snd(uid_t uid, int size) { + struct uid_stat *entry; + activity_stats_update(); + if ((entry = find_uid_stat(uid)) == NULL && + ((entry = create_stat(uid)) == NULL)) { + return -1; + } + atomic_add(size, &entry->tcp_snd); + return 0; +} + +int uid_stat_tcp_rcv(uid_t uid, int size) { + struct uid_stat *entry; + activity_stats_update(); + if ((entry = find_uid_stat(uid)) == NULL && + ((entry = create_stat(uid)) == NULL)) { + return -1; + } + atomic_add(size, &entry->tcp_rcv); + return 0; +} + +static int __init uid_stat_init(void) +{ + parent = proc_mkdir("uid_stat", NULL); + if (!parent) { + pr_err("uid_stat: failed to create proc entry\n"); + return -1; + } + return 0; +} + +__initcall(uid_stat_init); diff --git a/drivers/misc/wl127x-rfkill.c b/drivers/misc/wl127x-rfkill.c new file mode 100644 index 000000000000..f5b95152948b --- /dev/null +++ b/drivers/misc/wl127x-rfkill.c @@ -0,0 +1,121 @@ +/* + * Bluetooth TI wl127x rfkill power control via GPIO + * + * Copyright (C) 2009 Motorola, Inc. + * Copyright (C) 2008 Texas Instruments + * Initial code: Pavan Savoy <pavan.savoy@gmail.com> (wl127x_power.c) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/gpio.h> +#include <linux/rfkill.h> +#include <linux/platform_device.h> +#include <linux/wl127x-rfkill.h> + +static int wl127x_rfkill_set_power(void *data, enum rfkill_state state) +{ + int nshutdown_gpio = (int) data; + + switch (state) { + case RFKILL_STATE_UNBLOCKED: + gpio_set_value(nshutdown_gpio, 1); + break; + case RFKILL_STATE_SOFT_BLOCKED: + gpio_set_value(nshutdown_gpio, 0); + break; + default: + printk(KERN_ERR "invalid bluetooth rfkill state %d\n", state); + } + return 0; +} + +static int wl127x_rfkill_probe(struct platform_device *pdev) +{ + int rc = 0; + struct wl127x_rfkill_platform_data *pdata = pdev->dev.platform_data; + enum rfkill_state default_state = RFKILL_STATE_SOFT_BLOCKED; /* off */ + + rc = gpio_request(pdata->nshutdown_gpio, "wl127x_nshutdown_gpio"); + if (unlikely(rc)) + return rc; + + rc = gpio_direction_output(pdata->nshutdown_gpio, 0); + if (unlikely(rc)) + return rc; + + rfkill_set_default(RFKILL_TYPE_BLUETOOTH, default_state); + wl127x_rfkill_set_power(NULL, default_state); + + pdata->rfkill = rfkill_allocate(&pdev->dev, RFKILL_TYPE_BLUETOOTH); + if (unlikely(!pdata->rfkill)) + return -ENOMEM; + + pdata->rfkill->name = "wl127x"; + pdata->rfkill->state = default_state; + /* userspace cannot take exclusive control */ + pdata->rfkill->user_claim_unsupported = 1; + pdata->rfkill->user_claim = 0; + pdata->rfkill->data = (void *) pdata->nshutdown_gpio; + pdata->rfkill->toggle_radio = wl127x_rfkill_set_power; + + rc = rfkill_register(pdata->rfkill); + + if (unlikely(rc)) + rfkill_free(pdata->rfkill); + + return 0; +} + +static int wl127x_rfkill_remove(struct platform_device *pdev) +{ + struct wl127x_rfkill_platform_data *pdata = pdev->dev.platform_data; + + rfkill_unregister(pdata->rfkill); + rfkill_free(pdata->rfkill); + gpio_free(pdata->nshutdown_gpio); + + return 0; +} + +static struct platform_driver wl127x_rfkill_platform_driver = { + .probe = wl127x_rfkill_probe, + .remove = wl127x_rfkill_remove, + .driver = { + .name = "wl127x-rfkill", + .owner = THIS_MODULE, + }, +}; + +static int __init wl127x_rfkill_init(void) +{ + return platform_driver_register(&wl127x_rfkill_platform_driver); +} + +static void __exit wl127x_rfkill_exit(void) +{ + platform_driver_unregister(&wl127x_rfkill_platform_driver); +} + +module_init(wl127x_rfkill_init); +module_exit(wl127x_rfkill_exit); + +MODULE_ALIAS("platform:wl127x"); +MODULE_DESCRIPTION("wl127x-rfkill"); +MODULE_AUTHOR("Motorola"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mmc/card/Kconfig b/drivers/mmc/card/Kconfig index 3f2a912659af..86948f90c3ff 100644 --- a/drivers/mmc/card/Kconfig +++ b/drivers/mmc/card/Kconfig @@ -32,6 +32,15 @@ config MMC_BLOCK_BOUNCE If unsure, say Y here. +config MMC_BLOCK_DEFERRED_RESUME + bool "Deferr MMC layer resume until I/O is requested" + depends on MMC_BLOCK + default n + help + Say Y here to enable deferred MMC resume until I/O + is requested. This will reduce overall resume latency and + save power when theres an SD card inserted but not being used. + config SDIO_UART tristate "SDIO UART/GPS class support" help diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index d545f79f6000..7054fd5863a0 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c @@ -87,11 +87,7 @@ static void mmc_blk_put(struct mmc_blk_data *md) mutex_lock(&open_lock); md->usage--; if (md->usage == 0) { - int devmaj = MAJOR(disk_devt(md->disk)); - int devidx = MINOR(disk_devt(md->disk)) >> MMC_SHIFT; - - if (!devmaj) - devidx = md->disk->first_minor >> MMC_SHIFT; + int devidx = md->disk->first_minor >> MMC_SHIFT; blk_cleanup_queue(md->queue.queue); @@ -426,6 +422,8 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req) continue; } status = get_card_status(card, req); + } else if (disable_multi == 1) { + disable_multi = 0; } if (brq.cmd.error) { @@ -544,8 +542,21 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req) return 0; } +static int +mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card); + static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) { +#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME + struct mmc_blk_data *md = mq->data; + struct mmc_card *card = md->queue.card; + + if (mmc_bus_needs_resume(card->host)) { + mmc_resume_bus(card->host); + mmc_blk_set_blksize(md, card); + } +#endif + if (req->cmd_flags & REQ_DISCARD) { if (req->cmd_flags & REQ_SECURE) return mmc_blk_issue_secdiscard_rq(mq, req); @@ -607,6 +618,7 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card) md->disk->private_data = md; md->disk->queue = md->queue.queue; md->disk->driverfs_dev = &card->dev; + md->disk->flags = GENHD_FL_EXT_DEVT; /* * As discussed on lkml, GENHD_FL_REMOVABLE should: @@ -667,7 +679,7 @@ mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card) if (err) { printk(KERN_ERR "%s: unable to set block size to %d: %d\n", - md->disk->disk_name, cmd.arg, err); + md->disk->disk_name, cmd.arg, err); return -EINVAL; } @@ -702,6 +714,9 @@ static int mmc_blk_probe(struct mmc_card *card) cap_str, md->read_only ? "(ro)" : ""); mmc_set_drvdata(card, md); +#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME + mmc_set_bus_resume_policy(card->host, 1); +#endif add_disk(md->disk); return 0; @@ -726,6 +741,9 @@ static void mmc_blk_remove(struct mmc_card *card) mmc_blk_put(md); } mmc_set_drvdata(card, NULL); +#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME + mmc_set_bus_resume_policy(card->host, 0); +#endif } #ifdef CONFIG_PM @@ -744,7 +762,9 @@ static int mmc_blk_resume(struct mmc_card *card) struct mmc_blk_data *md = mmc_get_drvdata(card); if (md) { +#ifndef CONFIG_MMC_BLOCK_DEFERRED_RESUME mmc_blk_set_blksize(md, card); +#endif mmc_queue_resume(&md->queue); } return 0; diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig index bb22ffd76ef8..0eba6658233a 100644 --- a/drivers/mmc/core/Kconfig +++ b/drivers/mmc/core/Kconfig @@ -16,3 +16,20 @@ config MMC_UNSAFE_RESUME This option sets a default which can be overridden by the module parameter "removable=0" or "removable=1". + +config MMC_EMBEDDED_SDIO + boolean "MMC embedded SDIO device support (EXPERIMENTAL)" + depends on EXPERIMENTAL + help + If you say Y here, support will be added for embedded SDIO + devices which do not contain the necessary enumeration + support in hardware to be properly detected. + +config MMC_PARANOID_SD_INIT + bool "Enable paranoid SD card initialization (EXPERIMENTAL)" + depends on EXPERIMENTAL + help + If you say Y here, the MMC layer will be extra paranoid + about re-trying SD init requests. This can be a useful + work-around for buggy controllers and hardware. Enable + if you are experiencing issues with SD detection. diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 5db49b124ffa..708f33f20cb7 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -22,6 +22,7 @@ #include <linux/scatterlist.h> #include <linux/log2.h> #include <linux/regulator/consumer.h> +#include <linux/wakelock.h> #include <linux/mmc/card.h> #include <linux/mmc/host.h> @@ -38,6 +39,7 @@ #include "sdio_ops.h" static struct workqueue_struct *workqueue; +static struct wake_lock mmc_delayed_work_wake_lock; /* * Enabling software CRCs on the data blocks can be a significant (30%) @@ -69,6 +71,7 @@ MODULE_PARM_DESC( static int mmc_schedule_delayed_work(struct delayed_work *work, unsigned long delay) { + wake_lock(&mmc_delayed_work_wake_lock); return queue_delayed_work(workqueue, work, delay); } @@ -545,9 +548,12 @@ void mmc_host_deeper_disable(struct work_struct *work) /* If the host is claimed then we do not want to disable it anymore */ if (!mmc_try_claim_host(host)) - return; + goto out; mmc_host_do_disable(host, 1); mmc_do_release_host(host); + +out: + wake_unlock(&mmc_delayed_work_wake_lock); } /** @@ -907,12 +913,7 @@ static void mmc_power_up(struct mmc_host *host) */ mmc_delay(10); - if (host->f_min > 400000) { - pr_warning("%s: Minimum clock frequency too high for " - "identification mode\n", mmc_hostname(host)); - host->ios.clock = host->f_min; - } else - host->ios.clock = 400000; + host->ios.clock = host->f_min; host->ios.power_mode = MMC_POWER_ON; mmc_set_ios(host); @@ -977,6 +978,30 @@ static inline void mmc_bus_put(struct mmc_host *host) spin_unlock_irqrestore(&host->lock, flags); } +int mmc_resume_bus(struct mmc_host *host) +{ + if (!mmc_bus_needs_resume(host)) + return -EINVAL; + + printk("%s: Starting deferred resume\n", mmc_hostname(host)); + host->bus_resume_flags &= ~MMC_BUSRESUME_NEEDS_RESUME; + mmc_bus_get(host); + if (host->bus_ops && !host->bus_dead) { + mmc_power_up(host); + BUG_ON(!host->bus_ops->resume); + host->bus_ops->resume(host); + } + + if (host->bus_ops->detect && !host->bus_dead) + host->bus_ops->detect(host); + + mmc_bus_put(host); + printk("%s: Deferred resume completed\n", mmc_hostname(host)); + return 0; +} + +EXPORT_SYMBOL(mmc_resume_bus); + /* * Assign a mmc bus handler to a host. Only one bus handler may control a * host at any given time. @@ -1404,6 +1429,7 @@ void mmc_rescan(struct work_struct *work) u32 ocr; int err; unsigned long flags; + int extend_wakelock = 0; spin_lock_irqsave(&host->lock, flags); @@ -1421,6 +1447,12 @@ void mmc_rescan(struct work_struct *work) if ((host->bus_ops != NULL) && host->bus_ops->detect && !host->bus_dead) host->bus_ops->detect(host); + /* If the card was removed the bus will be marked + * as dead - extend the wakelock so userspace + * can respond */ + if (host->bus_dead) + extend_wakelock = 1; + mmc_bus_put(host); @@ -1464,6 +1496,7 @@ void mmc_rescan(struct work_struct *work) if (mmc_attach_sd(host, ocr)) mmc_power_off(host); + extend_wakelock = 1; } goto out; } @@ -1475,6 +1508,7 @@ void mmc_rescan(struct work_struct *work) if (!err) { if (mmc_attach_sd(host, ocr)) mmc_power_off(host); + extend_wakelock = 1; goto out; } @@ -1485,6 +1519,7 @@ void mmc_rescan(struct work_struct *work) if (!err) { if (mmc_attach_mmc(host, ocr)) mmc_power_off(host); + extend_wakelock = 1; goto out; } @@ -1493,6 +1528,11 @@ out_fail: mmc_power_off(host); out: + if (extend_wakelock) + wake_lock_timeout(&mmc_delayed_work_wake_lock, HZ / 2); + else + wake_unlock(&mmc_delayed_work_wake_lock); + if (host->caps & MMC_CAP_NEEDS_POLL) mmc_schedule_delayed_work(&host->detect, HZ); } @@ -1622,6 +1662,9 @@ int mmc_suspend_host(struct mmc_host *host) { int err = 0; + if (mmc_bus_needs_resume(host)) + return 0; + if (host->caps & MMC_CAP_DISABLE) cancel_delayed_work(&host->disable); cancel_delayed_work(&host->detect); @@ -1651,6 +1694,12 @@ int mmc_resume_host(struct mmc_host *host) int err = 0; mmc_bus_get(host); + if (host->bus_resume_flags & MMC_BUSRESUME_MANUAL_RESUME) { + host->bus_resume_flags |= MMC_BUSRESUME_NEEDS_RESUME; + mmc_bus_put(host); + return 0; + } + if (host->bus_ops && !host->bus_dead) { if (!(host->pm_flags & MMC_PM_KEEP_POWER)) { mmc_power_up(host); @@ -1719,10 +1768,28 @@ int mmc_pm_notify(struct notifier_block *notify_block, } #endif +#ifdef CONFIG_MMC_EMBEDDED_SDIO +void mmc_set_embedded_sdio_data(struct mmc_host *host, + struct sdio_cis *cis, + struct sdio_cccr *cccr, + struct sdio_embedded_func *funcs, + int num_funcs) +{ + host->embedded_sdio_data.cis = cis; + host->embedded_sdio_data.cccr = cccr; + host->embedded_sdio_data.funcs = funcs; + host->embedded_sdio_data.num_funcs = num_funcs; +} + +EXPORT_SYMBOL(mmc_set_embedded_sdio_data); +#endif + static int __init mmc_init(void) { int ret; + wake_lock_init(&mmc_delayed_work_wake_lock, WAKE_LOCK_SUSPEND, "mmc_delayed_work"); + workqueue = create_singlethread_workqueue("kmmcd"); if (!workqueue) return -ENOMEM; @@ -1757,6 +1824,7 @@ static void __exit mmc_exit(void) mmc_unregister_host_class(); mmc_unregister_bus(); destroy_workqueue(workqueue); + wake_lock_destroy(&mmc_delayed_work_wake_lock); } subsys_initcall(mmc_init); diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index d80cfdc8edd2..ba684e6d2b6b 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c @@ -137,7 +137,8 @@ int mmc_add_host(struct mmc_host *host) #endif mmc_start_host(host); - register_pm_notifier(&host->pm_notify); + if (!(host->pm_flags & MMC_PM_IGNORE_PM_NOTIFY)) + register_pm_notifier(&host->pm_notify); return 0; } @@ -154,7 +155,9 @@ EXPORT_SYMBOL(mmc_add_host); */ void mmc_remove_host(struct mmc_host *host) { - unregister_pm_notifier(&host->pm_notify); + if (!(host->pm_flags & MMC_PM_IGNORE_PM_NOTIFY)) + unregister_pm_notifier(&host->pm_notify); + mmc_stop_host(host); #ifdef CONFIG_DEBUG_FS diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c index 0f5241085557..7ab8fdc61c77 100644 --- a/drivers/mmc/core/sd.c +++ b/drivers/mmc/core/sd.c @@ -455,6 +455,9 @@ int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card, bool reinit) { int err; +#ifdef CONFIG_MMC_PARANOID_SD_INIT + int retries; +#endif if (!reinit) { /* @@ -481,7 +484,26 @@ int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card, /* * Fetch switch information from card. */ +#ifdef CONFIG_MMC_PARANOID_SD_INIT + for (retries = 1; retries <= 3; retries++) { + err = mmc_read_switch(card); + if (!err) { + if (retries > 1) { + printk(KERN_WARNING + "%s: recovered\n", + mmc_hostname(host)); + } + break; + } else { + printk(KERN_WARNING + "%s: read switch failed (attempt %d)\n", + mmc_hostname(host), retries); + } + } +#else err = mmc_read_switch(card); +#endif + if (err) return err; } @@ -662,18 +684,36 @@ static void mmc_sd_remove(struct mmc_host *host) */ static void mmc_sd_detect(struct mmc_host *host) { - int err; + int err = 0; +#ifdef CONFIG_MMC_PARANOID_SD_INIT + int retries = 5; +#endif BUG_ON(!host); BUG_ON(!host->card); - + mmc_claim_host(host); /* * Just check if our card has been removed. */ +#ifdef CONFIG_MMC_PARANOID_SD_INIT + while(retries) { + err = mmc_send_status(host->card, NULL); + if (err) { + retries--; + udelay(5); + continue; + } + break; + } + if (!retries) { + printk(KERN_ERR "%s(%s): Unable to re-detect card (%d)\n", + __func__, mmc_hostname(host), err); + } +#else err = mmc_send_status(host->card, NULL); - +#endif mmc_release_host(host); if (err) { @@ -711,12 +751,31 @@ static int mmc_sd_suspend(struct mmc_host *host) static int mmc_sd_resume(struct mmc_host *host) { int err; +#ifdef CONFIG_MMC_PARANOID_SD_INIT + int retries; +#endif BUG_ON(!host); BUG_ON(!host->card); mmc_claim_host(host); +#ifdef CONFIG_MMC_PARANOID_SD_INIT + retries = 5; + while (retries) { + err = mmc_sd_init_card(host, host->ocr, host->card); + + if (err) { + printk(KERN_ERR "%s: Re-init card rc = %d (retries = %d)\n", + mmc_hostname(host), err, retries); + mdelay(5); + retries--; + continue; + } + break; + } +#else err = mmc_sd_init_card(host, host->ocr, host->card); +#endif mmc_release_host(host); return err; @@ -763,6 +822,9 @@ static void mmc_sd_attach_bus_ops(struct mmc_host *host) int mmc_attach_sd(struct mmc_host *host, u32 ocr) { int err; +#ifdef CONFIG_MMC_PARANOID_SD_INIT + int retries; +#endif BUG_ON(!host); WARN_ON(!host->claimed); @@ -811,9 +873,27 @@ int mmc_attach_sd(struct mmc_host *host, u32 ocr) /* * Detect and init the card. */ +#ifdef CONFIG_MMC_PARANOID_SD_INIT + retries = 5; + while (retries) { + err = mmc_sd_init_card(host, host->ocr, NULL); + if (err) { + retries--; + continue; + } + break; + } + + if (!retries) { + printk(KERN_ERR "%s: mmc_sd_init_card() failure (err = %d)\n", + mmc_hostname(host), err); + goto err; + } +#else err = mmc_sd_init_card(host, host->ocr, NULL); if (err) goto err; +#endif mmc_release_host(host); diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c index 6b8db0465370..155efb1ac797 100644 --- a/drivers/mmc/core/sdio.c +++ b/drivers/mmc/core/sdio.c @@ -25,6 +25,10 @@ #include "sdio_ops.h" #include "sdio_cis.h" +#ifdef CONFIG_MMC_EMBEDDED_SDIO +#include <linux/mmc/sdio_ids.h> +#endif + static int sdio_read_fbr(struct sdio_func *func) { int ret; @@ -436,19 +440,35 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr, goto finish; } - /* - * Read the common registers. - */ - err = sdio_read_cccr(card); - if (err) - goto remove; +#ifdef CONFIG_MMC_EMBEDDED_SDIO + if (host->embedded_sdio_data.cccr) + memcpy(&card->cccr, host->embedded_sdio_data.cccr, sizeof(struct sdio_cccr)); + else { +#endif + /* + * Read the common registers. + */ + err = sdio_read_cccr(card); + if (err) + goto remove; +#ifdef CONFIG_MMC_EMBEDDED_SDIO + } +#endif - /* - * Read the common CIS tuples. - */ - err = sdio_read_common_cis(card); - if (err) - goto remove; +#ifdef CONFIG_MMC_EMBEDDED_SDIO + if (host->embedded_sdio_data.cis) + memcpy(&card->cis, host->embedded_sdio_data.cis, sizeof(struct sdio_cis)); + else { +#endif + /* + * Read the common CIS tuples. + */ + err = sdio_read_common_cis(card); + if (err) + goto remove; +#ifdef CONFIG_MMC_EMBEDDED_SDIO + } +#endif if (oldcard) { int same = (card->cis.vendor == oldcard->cis.vendor && @@ -707,13 +727,36 @@ int mmc_attach_sdio(struct mmc_host *host, u32 ocr) funcs = (ocr & 0x70000000) >> 28; card->sdio_funcs = 0; +#ifdef CONFIG_MMC_EMBEDDED_SDIO + if (host->embedded_sdio_data.funcs) + card->sdio_funcs = funcs = host->embedded_sdio_data.num_funcs; +#endif + /* * Initialize (but don't add) all present functions. */ for (i = 0; i < funcs; i++, card->sdio_funcs++) { - err = sdio_init_func(host->card, i + 1); - if (err) - goto remove; +#ifdef CONFIG_MMC_EMBEDDED_SDIO + if (host->embedded_sdio_data.funcs) { + struct sdio_func *tmp; + + tmp = sdio_alloc_func(host->card); + if (IS_ERR(tmp)) + goto remove; + tmp->num = (i + 1); + card->sdio_func[i] = tmp; + tmp->class = host->embedded_sdio_data.funcs[i].f_class; + tmp->max_blksize = host->embedded_sdio_data.funcs[i].f_maxblksize; + tmp->vendor = card->cis.vendor; + tmp->device = card->cis.device; + } else { +#endif + err = sdio_init_func(host->card, i + 1); + if (err) + goto remove; +#ifdef CONFIG_MMC_EMBEDDED_SDIO + } +#endif } mmc_release_host(host); @@ -755,3 +798,77 @@ err: return err; } +int sdio_reset_comm(struct mmc_card *card) +{ + struct mmc_host *host = card->host; + u32 ocr; + int err; + + printk("%s():\n", __func__); + mmc_claim_host(host); + + mmc_go_idle(host); + + mmc_set_clock(host, host->f_min); + + err = mmc_send_io_op_cond(host, 0, &ocr); + if (err) + goto err; + + host->ocr = mmc_select_voltage(host, ocr); + if (!host->ocr) { + err = -EINVAL; + goto err; + } + + err = mmc_send_io_op_cond(host, host->ocr, &ocr); + if (err) + goto err; + + if (mmc_host_is_spi(host)) { + err = mmc_spi_set_crc(host, use_spi_crc); + if (err) + goto err; + } + + if (!mmc_host_is_spi(host)) { + err = mmc_send_relative_addr(host, &card->rca); + if (err) + goto err; + mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL); + } + if (!mmc_host_is_spi(host)) { + err = mmc_select_card(card); + if (err) + goto err; + } + + /* + * Switch to high-speed (if supported). + */ + err = sdio_enable_hs(card); + if (err > 0) + mmc_sd_go_highspeed(card); + else if (err) + goto err; + + /* + * Change to the card's maximum speed. + */ + mmc_set_clock(host, mmc_sdio_get_max_clock(card)); + + err = sdio_enable_4bit_bus(card); + if (err > 0) + mmc_set_bus_width(host, MMC_BUS_WIDTH_4); + else if (err) + goto err; + + mmc_release_host(host); + return 0; +err: + printk("%s: Error resetting SDIO communications (%d)\n", + mmc_hostname(host), err); + mmc_release_host(host); + return err; +} +EXPORT_SYMBOL(sdio_reset_comm); diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c index 4a890dcb95ab..f59f92554482 100644 --- a/drivers/mmc/core/sdio_bus.c +++ b/drivers/mmc/core/sdio_bus.c @@ -21,6 +21,10 @@ #include "sdio_cis.h" #include "sdio_bus.h" +#ifdef CONFIG_MMC_EMBEDDED_SDIO +#include <linux/mmc/host.h> +#endif + /* show configuration fields */ #define sdio_config_attr(field, format_string) \ static ssize_t \ @@ -200,7 +204,14 @@ static void sdio_release_func(struct device *dev) { struct sdio_func *func = dev_to_sdio_func(dev); - sdio_free_func_cis(func); +#ifdef CONFIG_MMC_EMBEDDED_SDIO + /* + * If this device is embedded then we never allocated + * cis tables for this func + */ + if (!func->card->host->embedded_sdio_data.funcs) +#endif + sdio_free_func_cis(func); if (func->info) kfree(func->info); diff --git a/drivers/mmc/core/sdio_io.c b/drivers/mmc/core/sdio_io.c index 0f687cdeb064..549a34144646 100644..100755 --- a/drivers/mmc/core/sdio_io.c +++ b/drivers/mmc/core/sdio_io.c @@ -383,6 +383,39 @@ u8 sdio_readb(struct sdio_func *func, unsigned int addr, int *err_ret) EXPORT_SYMBOL_GPL(sdio_readb); /** + * sdio_readb_ext - read a single byte from a SDIO function + * @func: SDIO function to access + * @addr: address to read + * @err_ret: optional status value from transfer + * @in: value to add to argument + * + * Reads a single byte from the address space of a given SDIO + * function. If there is a problem reading the address, 0xff + * is returned and @err_ret will contain the error code. + */ +unsigned char sdio_readb_ext(struct sdio_func *func, unsigned int addr, + int *err_ret, unsigned in) +{ + int ret; + unsigned char val; + + BUG_ON(!func); + + if (err_ret) + *err_ret = 0; + + ret = mmc_io_rw_direct(func->card, 0, func->num, addr, (u8)in, &val); + if (ret) { + if (err_ret) + *err_ret = ret; + return 0xFF; + } + + return val; +} +EXPORT_SYMBOL_GPL(sdio_readb_ext); + +/** * sdio_writeb - write a single byte to a SDIO function * @func: SDIO function to access * @b: byte to write diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig index 8b4b67c8a391..b3fc1df9e278 100644 --- a/drivers/mtd/nand/Kconfig +++ b/drivers/mtd/nand/Kconfig @@ -1,3 +1,10 @@ +config MTD_NAND_IDS + tristate "Include chip ids for known NAND devices." + depends on MTD + help + Useful for NAND drivers that do not use the NAND subsystem but + still like to take advantage of the known chip information. + config MTD_NAND_ECC tristate @@ -122,9 +129,6 @@ config MTD_NAND_OMAP_PREFETCH_DMA or in DMA interrupt mode. Say y for DMA mode or MPU mode will be used -config MTD_NAND_IDS - tristate - config MTD_NAND_RICOH tristate "Ricoh xD card reader" default n diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index d551ddd9537a..c18d3b8f5162 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -3056,6 +3056,44 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips, return 0; } +static void nand_panic_wait(struct mtd_info *mtd) +{ + struct nand_chip *chip = mtd->priv; + int i; + + if (chip->state != FL_READY) + for (i = 0; i < 40; i++) { + if (chip->dev_ready(mtd)) + break; + mdelay(10); + } + chip->state = FL_READY; +} + +static int nand_panic_write(struct mtd_info *mtd, loff_t to, size_t len, + size_t *retlen, const u_char *buf) +{ + struct nand_chip *chip = mtd->priv; + int ret; + + /* Do not allow reads past end of device */ + if ((to + len) > mtd->size) + return -EINVAL; + if (!len) + return 0; + + nand_panic_wait(mtd); + + chip->ops.len = len; + chip->ops.datbuf = (uint8_t *)buf; + chip->ops.oobbuf = NULL; + + ret = nand_do_write_ops(mtd, to, &chip->ops); + + *retlen = chip->ops.retlen; + return ret; +} + /** * nand_scan_tail - [NAND Interface] Scan for the NAND device @@ -3265,6 +3303,7 @@ int nand_scan_tail(struct mtd_info *mtd) mtd->panic_write = panic_nand_write; mtd->read_oob = nand_read_oob; mtd->write_oob = nand_write_oob; + mtd->panic_write = nand_panic_write; mtd->sync = nand_sync; mtd->lock = NULL; mtd->unlock = NULL; diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 2cc81a54cbf3..e31b62b53d08 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -3219,6 +3219,23 @@ config PPPOL2TP used by ISPs and enterprises to tunnel PPP traffic over UDP tunnels. L2TP is replacing PPTP for VPN uses. +config PPPOLAC + tristate "PPP on L2TP Access Concentrator" + depends on PPP && INET + help + L2TP (RFC 2661) is a tunneling protocol widely used in virtual private + networks. This driver handles L2TP data packets between a UDP socket + and a PPP channel, but only permits one session per socket. Thus it is + fairly simple and suited for clients. + +config PPPOPNS + tristate "PPP on PPTP Network Server" + depends on PPP && INET + help + PPTP (RFC 2637) is a tunneling protocol widely used in virtual private + networks. This driver handles PPTP data packets between a RAW socket + and a PPP channel. It is fairly simple and easy to use. + config SLIP tristate "SLIP (serial line) support" ---help--- diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 3e8f150c4b14..90d556afa45d 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -162,6 +162,8 @@ obj-$(CONFIG_PPP_BSDCOMP) += bsd_comp.o obj-$(CONFIG_PPP_MPPE) += ppp_mppe.o obj-$(CONFIG_PPPOE) += pppox.o pppoe.o obj-$(CONFIG_PPPOL2TP) += pppox.o +obj-$(CONFIG_PPPOLAC) += pppox.o pppolac.o +obj-$(CONFIG_PPPOPNS) += pppox.o pppopns.o obj-$(CONFIG_SLIP) += slip.o obj-$(CONFIG_SLHC) += slhc.o diff --git a/drivers/net/pppolac.c b/drivers/net/pppolac.c new file mode 100644 index 000000000000..af3202a920a0 --- /dev/null +++ b/drivers/net/pppolac.c @@ -0,0 +1,380 @@ +/* drivers/net/pppolac.c + * + * Driver for PPP on L2TP Access Concentrator / PPPoLAC Socket (RFC 2661) + * + * Copyright (C) 2009 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* This driver handles L2TP data packets between a UDP socket and a PPP channel. + * To keep things simple, only one session per socket is permitted. Packets are + * sent via the socket, so it must keep connected to the same address. One must + * not set sequencing in ICCN but let LNS controll it. Currently this driver + * only works on IPv4 due to the lack of UDP encapsulation support in IPv6. */ + +#include <linux/module.h> +#include <linux/workqueue.h> +#include <linux/skbuff.h> +#include <linux/file.h> +#include <linux/netdevice.h> +#include <linux/net.h> +#include <linux/udp.h> +#include <linux/ppp_defs.h> +#include <linux/if_ppp.h> +#include <linux/if_pppox.h> +#include <linux/ppp_channel.h> +#include <net/tcp_states.h> +#include <asm/uaccess.h> + +#define L2TP_CONTROL_BIT 0x80 +#define L2TP_LENGTH_BIT 0x40 +#define L2TP_SEQUENCE_BIT 0x08 +#define L2TP_OFFSET_BIT 0x02 +#define L2TP_VERSION 0x02 +#define L2TP_VERSION_MASK 0x0F + +#define PPP_ADDR 0xFF +#define PPP_CTRL 0x03 + +union unaligned { + __u32 u32; +} __attribute__((packed)); + +static inline union unaligned *unaligned(void *ptr) +{ + return (union unaligned *)ptr; +} + +static int pppolac_recv_core(struct sock *sk_udp, struct sk_buff *skb) +{ + struct sock *sk = (struct sock *)sk_udp->sk_user_data; + struct pppolac_opt *opt = &pppox_sk(sk)->proto.lac; + __u8 bits; + __u8 *ptr; + + /* Drop the packet if it is too short. */ + if (skb->len < sizeof(struct udphdr) + 6) + goto drop; + + /* Put it back if it is a control packet. */ + if (skb->data[sizeof(struct udphdr)] & L2TP_CONTROL_BIT) + return opt->backlog_rcv(sk_udp, skb); + + /* Skip UDP header. */ + skb_pull(skb, sizeof(struct udphdr)); + + /* Check the version. */ + if ((skb->data[1] & L2TP_VERSION_MASK) != L2TP_VERSION) + goto drop; + bits = skb->data[0]; + ptr = &skb->data[2]; + + /* Check the length if it is present. */ + if (bits & L2TP_LENGTH_BIT) { + if ((ptr[0] << 8 | ptr[1]) != skb->len) + goto drop; + ptr += 2; + } + + /* Skip all fields including optional ones. */ + if (!skb_pull(skb, 6 + (bits & L2TP_SEQUENCE_BIT ? 4 : 0) + + (bits & L2TP_LENGTH_BIT ? 2 : 0) + + (bits & L2TP_OFFSET_BIT ? 2 : 0))) + goto drop; + + /* Skip the offset padding if it is present. */ + if (bits & L2TP_OFFSET_BIT && + !skb_pull(skb, skb->data[-2] << 8 | skb->data[-1])) + goto drop; + + /* Check the tunnel and the session. */ + if (unaligned(ptr)->u32 != opt->local) + goto drop; + + /* Check the sequence if it is present. According to RFC 2661 section + * 5.4, the only thing to do is to update opt->sequencing. */ + opt->sequencing = bits & L2TP_SEQUENCE_BIT; + + /* Skip PPP address and control if they are present. */ + if (skb->len >= 2 && skb->data[0] == PPP_ADDR && + skb->data[1] == PPP_CTRL) + skb_pull(skb, 2); + + /* Fix PPP protocol if it is compressed. */ + if (skb->len >= 1 && skb->data[0] & 1) + skb_push(skb, 1)[0] = 0; + + /* Finally, deliver the packet to PPP channel. */ + skb_orphan(skb); + ppp_input(&pppox_sk(sk)->chan, skb); + return NET_RX_SUCCESS; +drop: + kfree_skb(skb); + return NET_RX_DROP; +} + +static int pppolac_recv(struct sock *sk_udp, struct sk_buff *skb) +{ + sock_hold(sk_udp); + sk_receive_skb(sk_udp, skb, 0); + return 0; +} + +static struct sk_buff_head delivery_queue; + +static void pppolac_xmit_core(struct work_struct *delivery_work) +{ + mm_segment_t old_fs = get_fs(); + struct sk_buff *skb; + + set_fs(KERNEL_DS); + while ((skb = skb_dequeue(&delivery_queue))) { + struct sock *sk_udp = skb->sk; + struct kvec iov = {.iov_base = skb->data, .iov_len = skb->len}; + struct msghdr msg = { + .msg_iov = (struct iovec *)&iov, + .msg_iovlen = 1, + .msg_flags = MSG_NOSIGNAL | MSG_DONTWAIT, + }; + sk_udp->sk_prot->sendmsg(NULL, sk_udp, &msg, skb->len); + kfree_skb(skb); + } + set_fs(old_fs); +} + +static DECLARE_WORK(delivery_work, pppolac_xmit_core); + +static int pppolac_xmit(struct ppp_channel *chan, struct sk_buff *skb) +{ + struct sock *sk_udp = (struct sock *)chan->private; + struct pppolac_opt *opt = &pppox_sk(sk_udp->sk_user_data)->proto.lac; + + /* Install PPP address and control. */ + skb_push(skb, 2); + skb->data[0] = PPP_ADDR; + skb->data[1] = PPP_CTRL; + + /* Install L2TP header. */ + if (opt->sequencing) { + skb_push(skb, 10); + skb->data[0] = L2TP_SEQUENCE_BIT; + skb->data[6] = opt->sequence >> 8; + skb->data[7] = opt->sequence; + skb->data[8] = 0; + skb->data[9] = 0; + opt->sequence++; + } else { + skb_push(skb, 6); + skb->data[0] = 0; + } + skb->data[1] = L2TP_VERSION; + unaligned(&skb->data[2])->u32 = opt->remote; + + /* Now send the packet via the delivery queue. */ + skb_set_owner_w(skb, sk_udp); + skb_queue_tail(&delivery_queue, skb); + schedule_work(&delivery_work); + return 1; +} + +/******************************************************************************/ + +static struct ppp_channel_ops pppolac_channel_ops = { + .start_xmit = pppolac_xmit, +}; + +static int pppolac_connect(struct socket *sock, struct sockaddr *useraddr, + int addrlen, int flags) +{ + struct sock *sk = sock->sk; + struct pppox_sock *po = pppox_sk(sk); + struct sockaddr_pppolac *addr = (struct sockaddr_pppolac *)useraddr; + struct socket *sock_udp = NULL; + struct sock *sk_udp; + int error; + + if (addrlen != sizeof(struct sockaddr_pppolac) || + !addr->local.tunnel || !addr->local.session || + !addr->remote.tunnel || !addr->remote.session) { + return -EINVAL; + } + + lock_sock(sk); + error = -EALREADY; + if (sk->sk_state != PPPOX_NONE) + goto out; + + sock_udp = sockfd_lookup(addr->udp_socket, &error); + if (!sock_udp) + goto out; + sk_udp = sock_udp->sk; + lock_sock(sk_udp); + + /* Remove this check when IPv6 supports UDP encapsulation. */ + error = -EAFNOSUPPORT; + if (sk_udp->sk_family != AF_INET) + goto out; + error = -EPROTONOSUPPORT; + if (sk_udp->sk_protocol != IPPROTO_UDP) + goto out; + error = -EDESTADDRREQ; + if (sk_udp->sk_state != TCP_ESTABLISHED) + goto out; + error = -EBUSY; + if (udp_sk(sk_udp)->encap_type || sk_udp->sk_user_data) + goto out; + if (!sk_udp->sk_bound_dev_if) { + struct dst_entry *dst = sk_dst_get(sk_udp); + error = -ENODEV; + if (!dst) + goto out; + sk_udp->sk_bound_dev_if = dst->dev->ifindex; + dst_release(dst); + } + + po->chan.hdrlen = 12; + po->chan.private = sk_udp; + po->chan.ops = &pppolac_channel_ops; + po->chan.mtu = PPP_MTU - 80; + po->proto.lac.local = unaligned(&addr->local)->u32; + po->proto.lac.remote = unaligned(&addr->remote)->u32; + po->proto.lac.backlog_rcv = sk_udp->sk_backlog_rcv; + + error = ppp_register_channel(&po->chan); + if (error) + goto out; + + sk->sk_state = PPPOX_CONNECTED; + udp_sk(sk_udp)->encap_type = UDP_ENCAP_L2TPINUDP; + udp_sk(sk_udp)->encap_rcv = pppolac_recv; + sk_udp->sk_backlog_rcv = pppolac_recv_core; + sk_udp->sk_user_data = sk; +out: + if (sock_udp) { + release_sock(sk_udp); + if (error) + sockfd_put(sock_udp); + } + release_sock(sk); + return error; +} + +static int pppolac_release(struct socket *sock) +{ + struct sock *sk = sock->sk; + + if (!sk) + return 0; + + lock_sock(sk); + if (sock_flag(sk, SOCK_DEAD)) { + release_sock(sk); + return -EBADF; + } + + if (sk->sk_state != PPPOX_NONE) { + struct sock *sk_udp = (struct sock *)pppox_sk(sk)->chan.private; + lock_sock(sk_udp); + pppox_unbind_sock(sk); + udp_sk(sk_udp)->encap_type = 0; + udp_sk(sk_udp)->encap_rcv = NULL; + sk_udp->sk_backlog_rcv = pppox_sk(sk)->proto.lac.backlog_rcv; + sk_udp->sk_user_data = NULL; + release_sock(sk_udp); + sockfd_put(sk_udp->sk_socket); + } + + sock_orphan(sk); + sock->sk = NULL; + release_sock(sk); + sock_put(sk); + return 0; +} + +/******************************************************************************/ + +static struct proto pppolac_proto = { + .name = "PPPOLAC", + .owner = THIS_MODULE, + .obj_size = sizeof(struct pppox_sock), +}; + +static struct proto_ops pppolac_proto_ops = { + .family = PF_PPPOX, + .owner = THIS_MODULE, + .release = pppolac_release, + .bind = sock_no_bind, + .connect = pppolac_connect, + .socketpair = sock_no_socketpair, + .accept = sock_no_accept, + .getname = sock_no_getname, + .poll = sock_no_poll, + .ioctl = pppox_ioctl, + .listen = sock_no_listen, + .shutdown = sock_no_shutdown, + .setsockopt = sock_no_setsockopt, + .getsockopt = sock_no_getsockopt, + .sendmsg = sock_no_sendmsg, + .recvmsg = sock_no_recvmsg, + .mmap = sock_no_mmap, +}; + +static int pppolac_create(struct net *net, struct socket *sock) +{ + struct sock *sk; + + sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppolac_proto); + if (!sk) + return -ENOMEM; + + sock_init_data(sock, sk); + sock->state = SS_UNCONNECTED; + sock->ops = &pppolac_proto_ops; + sk->sk_protocol = PX_PROTO_OLAC; + sk->sk_state = PPPOX_NONE; + return 0; +} + +/******************************************************************************/ + +static struct pppox_proto pppolac_pppox_proto = { + .create = pppolac_create, + .owner = THIS_MODULE, +}; + +static int __init pppolac_init(void) +{ + int error; + + error = proto_register(&pppolac_proto, 0); + if (error) + return error; + + error = register_pppox_proto(PX_PROTO_OLAC, &pppolac_pppox_proto); + if (error) + proto_unregister(&pppolac_proto); + else + skb_queue_head_init(&delivery_queue); + return error; +} + +static void __exit pppolac_exit(void) +{ + unregister_pppox_proto(PX_PROTO_OLAC); + proto_unregister(&pppolac_proto); +} + +module_init(pppolac_init); +module_exit(pppolac_exit); + +MODULE_DESCRIPTION("PPP on L2TP Access Concentrator (PPPoLAC)"); +MODULE_AUTHOR("Chia-chi Yeh <chiachi@android.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/pppopns.c b/drivers/net/pppopns.c new file mode 100644 index 000000000000..298097127c90 --- /dev/null +++ b/drivers/net/pppopns.c @@ -0,0 +1,357 @@ +/* drivers/net/pppopns.c + * + * Driver for PPP on PPTP Network Server / PPPoPNS Socket (RFC 2637) + * + * Copyright (C) 2009 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* This driver handles PPTP data packets between a RAW socket and a PPP channel. + * The socket is created in the kernel space and connected to the same address + * of the control socket. To keep things simple, packets are always sent with + * sequence but without acknowledgement. This driver should work on both IPv4 + * and IPv6. */ + +#include <linux/module.h> +#include <linux/workqueue.h> +#include <linux/skbuff.h> +#include <linux/file.h> +#include <linux/netdevice.h> +#include <linux/net.h> +#include <linux/ppp_defs.h> +#include <linux/if.h> +#include <linux/if_ppp.h> +#include <linux/if_pppox.h> +#include <linux/ppp_channel.h> +#include <asm/uaccess.h> + +#define GRE_HEADER_SIZE 8 + +#define PPTP_GRE_BITS htons(0x2001) +#define PPTP_GRE_BITS_MASK htons(0xEF7F) +#define PPTP_GRE_SEQ_BIT htons(0x1000) +#define PPTP_GRE_ACK_BIT htons(0x0080) +#define PPTP_GRE_TYPE htons(0x880B) + +#define PPP_ADDR 0xFF +#define PPP_CTRL 0x03 + +struct header { + __u16 bits; + __u16 type; + __u16 length; + __u16 call; + __u32 sequence; +} __attribute__((packed)); + +static int pppopns_recv_core(struct sock *sk_raw, struct sk_buff *skb) +{ + struct sock *sk = (struct sock *)sk_raw->sk_user_data; + struct pppopns_opt *opt = &pppox_sk(sk)->proto.pns; + struct header *hdr; + + /* Skip transport header */ + skb_pull(skb, skb_transport_header(skb) - skb->data); + + /* Drop the packet if it is too short. */ + if (skb->len < GRE_HEADER_SIZE) + goto drop; + + /* Check the header. */ + hdr = (struct header *)skb->data; + if (hdr->type != PPTP_GRE_TYPE || hdr->call != opt->local || + (hdr->bits & PPTP_GRE_BITS_MASK) != PPTP_GRE_BITS) + goto drop; + + /* Skip all fields including optional ones. */ + if (!skb_pull(skb, GRE_HEADER_SIZE + + (hdr->bits & PPTP_GRE_SEQ_BIT ? 4 : 0) + + (hdr->bits & PPTP_GRE_ACK_BIT ? 4 : 0))) + goto drop; + + /* Check the length. */ + if (skb->len != ntohs(hdr->length)) + goto drop; + + /* Skip PPP address and control if they are present. */ + if (skb->len >= 2 && skb->data[0] == PPP_ADDR && + skb->data[1] == PPP_CTRL) + skb_pull(skb, 2); + + /* Fix PPP protocol if it is compressed. */ + if (skb->len >= 1 && skb->data[0] & 1) + skb_push(skb, 1)[0] = 0; + + /* Finally, deliver the packet to PPP channel. */ + skb_orphan(skb); + ppp_input(&pppox_sk(sk)->chan, skb); + return NET_RX_SUCCESS; +drop: + kfree_skb(skb); + return NET_RX_DROP; +} + +static void pppopns_recv(struct sock *sk_raw, int length) +{ + struct sk_buff *skb; + while ((skb = skb_dequeue(&sk_raw->sk_receive_queue))) { + sock_hold(sk_raw); + sk_receive_skb(sk_raw, skb, 0); + } +} + +static struct sk_buff_head delivery_queue; + +static void pppopns_xmit_core(struct work_struct *delivery_work) +{ + mm_segment_t old_fs = get_fs(); + struct sk_buff *skb; + + set_fs(KERNEL_DS); + while ((skb = skb_dequeue(&delivery_queue))) { + struct sock *sk_raw = skb->sk; + struct kvec iov = {.iov_base = skb->data, .iov_len = skb->len}; + struct msghdr msg = { + .msg_iov = (struct iovec *)&iov, + .msg_iovlen = 1, + .msg_flags = MSG_NOSIGNAL | MSG_DONTWAIT, + }; + sk_raw->sk_prot->sendmsg(NULL, sk_raw, &msg, skb->len); + kfree_skb(skb); + } + set_fs(old_fs); +} + +static DECLARE_WORK(delivery_work, pppopns_xmit_core); + +static int pppopns_xmit(struct ppp_channel *chan, struct sk_buff *skb) +{ + struct sock *sk_raw = (struct sock *)chan->private; + struct pppopns_opt *opt = &pppox_sk(sk_raw->sk_user_data)->proto.pns; + struct header *hdr; + __u16 length; + + /* Install PPP address and control. */ + skb_push(skb, 2); + skb->data[0] = PPP_ADDR; + skb->data[1] = PPP_CTRL; + length = skb->len; + + /* Install PPTP GRE header. */ + hdr = (struct header *)skb_push(skb, 12); + hdr->bits = PPTP_GRE_BITS | PPTP_GRE_SEQ_BIT; + hdr->type = PPTP_GRE_TYPE; + hdr->length = htons(length); + hdr->call = opt->remote; + hdr->sequence = htonl(opt->sequence); + opt->sequence++; + + /* Now send the packet via the delivery queue. */ + skb_set_owner_w(skb, sk_raw); + skb_queue_tail(&delivery_queue, skb); + schedule_work(&delivery_work); + return 1; +} + +/******************************************************************************/ + +static struct ppp_channel_ops pppopns_channel_ops = { + .start_xmit = pppopns_xmit, +}; + +static int pppopns_connect(struct socket *sock, struct sockaddr *useraddr, + int addrlen, int flags) +{ + struct sock *sk = sock->sk; + struct pppox_sock *po = pppox_sk(sk); + struct sockaddr_pppopns *addr = (struct sockaddr_pppopns *)useraddr; + struct sockaddr_storage ss; + struct socket *sock_tcp = NULL; + struct socket *sock_raw = NULL; + struct sock *sk_tcp; + struct sock *sk_raw; + int error; + + if (addrlen != sizeof(struct sockaddr_pppopns)) + return -EINVAL; + + lock_sock(sk); + error = -EALREADY; + if (sk->sk_state != PPPOX_NONE) + goto out; + + sock_tcp = sockfd_lookup(addr->tcp_socket, &error); + if (!sock_tcp) + goto out; + sk_tcp = sock_tcp->sk; + error = -EPROTONOSUPPORT; + if (sk_tcp->sk_protocol != IPPROTO_TCP) + goto out; + addrlen = sizeof(struct sockaddr_storage); + error = kernel_getpeername(sock_tcp, (struct sockaddr *)&ss, &addrlen); + if (error) + goto out; + if (!sk_tcp->sk_bound_dev_if) { + struct dst_entry *dst = sk_dst_get(sk_tcp); + error = -ENODEV; + if (!dst) + goto out; + sk_tcp->sk_bound_dev_if = dst->dev->ifindex; + dst_release(dst); + } + + error = sock_create(ss.ss_family, SOCK_RAW, IPPROTO_GRE, &sock_raw); + if (error) + goto out; + sk_raw = sock_raw->sk; + sk_raw->sk_bound_dev_if = sk_tcp->sk_bound_dev_if; + error = kernel_connect(sock_raw, (struct sockaddr *)&ss, addrlen, 0); + if (error) + goto out; + + po->chan.hdrlen = 14; + po->chan.private = sk_raw; + po->chan.ops = &pppopns_channel_ops; + po->chan.mtu = PPP_MTU - 80; + po->proto.pns.local = addr->local; + po->proto.pns.remote = addr->remote; + po->proto.pns.data_ready = sk_raw->sk_data_ready; + po->proto.pns.backlog_rcv = sk_raw->sk_backlog_rcv; + + error = ppp_register_channel(&po->chan); + if (error) + goto out; + + sk->sk_state = PPPOX_CONNECTED; + lock_sock(sk_raw); + sk_raw->sk_data_ready = pppopns_recv; + sk_raw->sk_backlog_rcv = pppopns_recv_core; + sk_raw->sk_user_data = sk; + release_sock(sk_raw); +out: + if (sock_tcp) + sockfd_put(sock_tcp); + if (error && sock_raw) + sock_release(sock_raw); + release_sock(sk); + return error; +} + +static int pppopns_release(struct socket *sock) +{ + struct sock *sk = sock->sk; + + if (!sk) + return 0; + + lock_sock(sk); + if (sock_flag(sk, SOCK_DEAD)) { + release_sock(sk); + return -EBADF; + } + + if (sk->sk_state != PPPOX_NONE) { + struct sock *sk_raw = (struct sock *)pppox_sk(sk)->chan.private; + lock_sock(sk_raw); + pppox_unbind_sock(sk); + sk_raw->sk_data_ready = pppox_sk(sk)->proto.pns.data_ready; + sk_raw->sk_backlog_rcv = pppox_sk(sk)->proto.pns.backlog_rcv; + sk_raw->sk_user_data = NULL; + release_sock(sk_raw); + sock_release(sk_raw->sk_socket); + } + + sock_orphan(sk); + sock->sk = NULL; + release_sock(sk); + sock_put(sk); + return 0; +} + +/******************************************************************************/ + +static struct proto pppopns_proto = { + .name = "PPPOPNS", + .owner = THIS_MODULE, + .obj_size = sizeof(struct pppox_sock), +}; + +static struct proto_ops pppopns_proto_ops = { + .family = PF_PPPOX, + .owner = THIS_MODULE, + .release = pppopns_release, + .bind = sock_no_bind, + .connect = pppopns_connect, + .socketpair = sock_no_socketpair, + .accept = sock_no_accept, + .getname = sock_no_getname, + .poll = sock_no_poll, + .ioctl = pppox_ioctl, + .listen = sock_no_listen, + .shutdown = sock_no_shutdown, + .setsockopt = sock_no_setsockopt, + .getsockopt = sock_no_getsockopt, + .sendmsg = sock_no_sendmsg, + .recvmsg = sock_no_recvmsg, + .mmap = sock_no_mmap, +}; + +static int pppopns_create(struct net *net, struct socket *sock) +{ + struct sock *sk; + + sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppopns_proto); + if (!sk) + return -ENOMEM; + + sock_init_data(sock, sk); + sock->state = SS_UNCONNECTED; + sock->ops = &pppopns_proto_ops; + sk->sk_protocol = PX_PROTO_OPNS; + sk->sk_state = PPPOX_NONE; + return 0; +} + +/******************************************************************************/ + +static struct pppox_proto pppopns_pppox_proto = { + .create = pppopns_create, + .owner = THIS_MODULE, +}; + +static int __init pppopns_init(void) +{ + int error; + + error = proto_register(&pppopns_proto, 0); + if (error) + return error; + + error = register_pppox_proto(PX_PROTO_OPNS, &pppopns_pppox_proto); + if (error) + proto_unregister(&pppopns_proto); + else + skb_queue_head_init(&delivery_queue); + return error; +} + +static void __exit pppopns_exit(void) +{ + unregister_pppox_proto(PX_PROTO_OPNS); + proto_unregister(&pppopns_proto); +} + +module_init(pppopns_init); +module_exit(pppopns_exit); + +MODULE_DESCRIPTION("PPP on PPTP Network Server (PPPoPNS)"); +MODULE_AUTHOR("Chia-chi Yeh <chiachi@android.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig index 174e3442d519..a37a8293f43b 100644 --- a/drivers/net/wireless/Kconfig +++ b/drivers/net/wireless/Kconfig @@ -271,6 +271,7 @@ config MWL8K source "drivers/net/wireless/ath/Kconfig" source "drivers/net/wireless/b43/Kconfig" source "drivers/net/wireless/b43legacy/Kconfig" +source "drivers/net/wireless/bcm4329/Kconfig" source "drivers/net/wireless/hostap/Kconfig" source "drivers/net/wireless/ipw2x00/Kconfig" source "drivers/net/wireless/iwlwifi/Kconfig" diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile index 5d4ce4d2b32b..06cf827833cf 100644 --- a/drivers/net/wireless/Makefile +++ b/drivers/net/wireless/Makefile @@ -52,3 +52,5 @@ obj-$(CONFIG_MAC80211_HWSIM) += mac80211_hwsim.o obj-$(CONFIG_WL12XX) += wl12xx/ obj-$(CONFIG_IWM) += iwmc3200wifi/ + +obj-$(CONFIG_BCM4329) += bcm4329/ diff --git a/drivers/net/wireless/bcm4329/Kconfig b/drivers/net/wireless/bcm4329/Kconfig new file mode 100644 index 000000000000..ca5760d32385 --- /dev/null +++ b/drivers/net/wireless/bcm4329/Kconfig @@ -0,0 +1,27 @@ +config BCM4329 + tristate "Broadcom 4329 wireless cards support" + depends on MMC + select WIRELESS_EXT + select WEXT_PRIV + ---help--- + This module adds support for wireless adapters based on + Broadcom 4329 chipset. + + This driver uses the kernel's wireless extensions subsystem. + + If you choose to build a module, it'll be called dhd. Say M if + unsure. + +config BCM4329_FW_PATH + depends on BCM4329 + string "Firmware path" + default "/system/etc/firmware/fw_bcm4329.bin" + ---help--- + Path to the firmware file. + +config BCM4329_NVRAM_PATH + depends on BCM4329 + string "NVRAM path" + default "/proc/calibration" + ---help--- + Path to the calibration file. diff --git a/drivers/net/wireless/bcm4329/Makefile b/drivers/net/wireless/bcm4329/Makefile new file mode 100644 index 000000000000..d0bc0b4806fc --- /dev/null +++ b/drivers/net/wireless/bcm4329/Makefile @@ -0,0 +1,20 @@ +# bcm4329 +DHDCFLAGS = -DLINUX -DBCMDRIVER -DBCMDONGLEHOST -DDHDTHREAD -DBCMWPA2 \ + -DUNRELEASEDCHIP -Dlinux -DDHD_SDALIGN=64 -DMAX_HDR_READ=64 \ + -DDHD_FIRSTREAD=64 -DDHD_GPL -DDHD_SCHED -DBDC -DTOE -DDHD_BCMEVENTS \ + -DSHOW_EVENTS -DBCMSDIO -DDHD_GPL -DBCMLXSDMMC -DBCMPLATFORM_BUS \ + -Wall -Wstrict-prototypes -Werror -DOOB_INTR_ONLY -DCUSTOMER_HW2 \ + -DDHD_USE_STATIC_BUF -DMMC_SDIO_ABORT -DDHD_DEBUG_TRAP -DSOFTAP \ + -DEMBEDDED_PLATFORM -DARP_OFFLOAD_SUPPORT -DPKT_FILTER_SUPPORT \ + -DGET_CUSTOM_MAC_ENABLE -DSET_RANDOM_MAC_SOFTAP -DCSCAN -DHW_OOB \ + -Idrivers/net/wireless/bcm4329 -Idrivers/net/wireless/bcm4329/include + +DHDOFILES = dhd_linux.o linux_osl.o bcmutils.o dhd_common.o dhd_custom_gpio.o \ + wl_iw.o siutils.o sbutils.o aiutils.o hndpmu.o bcmwifi.o dhd_sdio.o \ + dhd_linux_sched.o dhd_cdc.o bcmsdh_sdmmc.o bcmsdh.o bcmsdh_linux.o \ + bcmsdh_sdmmc_linux.o + +obj-$(CONFIG_BCM4329) += bcm4329.o +bcm4329-objs += $(DHDOFILES) +EXTRA_CFLAGS = $(DHDCFLAGS) +EXTRA_LDFLAGS += --strip-debug diff --git a/drivers/net/wireless/bcm4329/aiutils.c b/drivers/net/wireless/bcm4329/aiutils.c new file mode 100644 index 000000000000..df48ac0d83d4 --- /dev/null +++ b/drivers/net/wireless/bcm4329/aiutils.c @@ -0,0 +1,686 @@ +/* + * Misc utility routines for accessing chip-specific features + * of the SiliconBackplane-based Broadcom chips. + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: aiutils.c,v 1.6.4.7.4.6 2010/04/21 20:43:47 Exp $ + */ + +#include <typedefs.h> +#include <bcmdefs.h> +#include <osl.h> +#include <bcmutils.h> +#include <siutils.h> +#include <hndsoc.h> +#include <sbchipc.h> +#include <pcicfg.h> + +#include "siutils_priv.h" + +STATIC uint32 +get_asd(si_t *sih, uint32 *eromptr, uint sp, uint ad, uint st, + uint32 *addrl, uint32 *addrh, uint32 *sizel, uint32 *sizeh); + + +/* EROM parsing */ + +static uint32 +get_erom_ent(si_t *sih, uint32 *eromptr, uint32 mask, uint32 match) +{ + uint32 ent; + uint inv = 0, nom = 0; + + while (TRUE) { + ent = R_REG(si_osh(sih), (uint32 *)(uintptr)(*eromptr)); + *eromptr += sizeof(uint32); + + if (mask == 0) + break; + + if ((ent & ER_VALID) == 0) { + inv++; + continue; + } + + if (ent == (ER_END | ER_VALID)) + break; + + if ((ent & mask) == match) + break; + + nom++; + } + + SI_MSG(("%s: Returning ent 0x%08x\n", __FUNCTION__, ent)); + if (inv + nom) + SI_MSG((" after %d invalid and %d non-matching entries\n", inv, nom)); + return ent; +} + +STATIC uint32 +get_asd(si_t *sih, uint32 *eromptr, uint sp, uint ad, uint st, + uint32 *addrl, uint32 *addrh, uint32 *sizel, uint32 *sizeh) +{ + uint32 asd, sz, szd; + + asd = get_erom_ent(sih, eromptr, ER_VALID, ER_VALID); + if (((asd & ER_TAG1) != ER_ADD) || + (((asd & AD_SP_MASK) >> AD_SP_SHIFT) != sp) || + ((asd & AD_ST_MASK) != st)) { + /* This is not what we want, "push" it back */ + *eromptr -= sizeof(uint32); + return 0; + } + *addrl = asd & AD_ADDR_MASK; + if (asd & AD_AG32) + *addrh = get_erom_ent(sih, eromptr, 0, 0); + else + *addrh = 0; + *sizeh = 0; + sz = asd & AD_SZ_MASK; + if (sz == AD_SZ_SZD) { + szd = get_erom_ent(sih, eromptr, 0, 0); + *sizel = szd & SD_SZ_MASK; + if (szd & SD_SG32) + *sizeh = get_erom_ent(sih, eromptr, 0, 0); + } else + *sizel = AD_SZ_BASE << (sz >> AD_SZ_SHIFT); + + SI_MSG((" SP %d, ad %d: st = %d, 0x%08x_0x%08x @ 0x%08x_0x%08x\n", + sp, ad, st, *sizeh, *sizel, *addrh, *addrl)); + + return asd; +} + +/* parse the enumeration rom to identify all cores */ +void +ai_scan(si_t *sih, void *regs, uint devid) +{ + si_info_t *sii = SI_INFO(sih); + chipcregs_t *cc = (chipcregs_t *)regs; + uint32 erombase, eromptr, eromlim; + + erombase = R_REG(sii->osh, &cc->eromptr); + + switch (BUSTYPE(sih->bustype)) { + case SI_BUS: + eromptr = (uintptr)REG_MAP(erombase, SI_CORE_SIZE); + break; + + case PCI_BUS: + /* Set wrappers address */ + sii->curwrap = (void *)((uintptr)regs + SI_CORE_SIZE); + + /* Now point the window at the erom */ + OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, erombase); + eromptr = (uint32)(uintptr)regs; + break; + + case SPI_BUS: + case SDIO_BUS: + eromptr = erombase; + break; + + case PCMCIA_BUS: + default: + SI_ERROR(("Don't know how to do AXI enumertion on bus %d\n", sih->bustype)); + ASSERT(0); + return; + } + eromlim = eromptr + ER_REMAPCONTROL; + + SI_MSG(("ai_scan: regs = 0x%p, erombase = 0x%08x, eromptr = 0x%08x, eromlim = 0x%08x\n", + regs, erombase, eromptr, eromlim)); + while (eromptr < eromlim) { + uint32 cia, cib, base, cid, mfg, crev, nmw, nsw, nmp, nsp; + uint32 mpd, asd, addrl, addrh, sizel, sizeh; + uint i, j, idx; + bool br; + + br = FALSE; + + /* Grok a component */ + cia = get_erom_ent(sih, &eromptr, ER_TAG, ER_CI); + if (cia == (ER_END | ER_VALID)) { + SI_MSG(("Found END of erom after %d cores\n", sii->numcores)); + return; + } + base = eromptr - sizeof(uint32); + cib = get_erom_ent(sih, &eromptr, 0, 0); + + if ((cib & ER_TAG) != ER_CI) { + SI_ERROR(("CIA not followed by CIB\n")); + goto error; + } + + cid = (cia & CIA_CID_MASK) >> CIA_CID_SHIFT; + mfg = (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT; + crev = (cib & CIB_REV_MASK) >> CIB_REV_SHIFT; + nmw = (cib & CIB_NMW_MASK) >> CIB_NMW_SHIFT; + nsw = (cib & CIB_NSW_MASK) >> CIB_NSW_SHIFT; + nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT; + nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT; + + SI_MSG(("Found component 0x%04x/0x%4x rev %d at erom addr 0x%08x, with nmw = %d, " + "nsw = %d, nmp = %d & nsp = %d\n", + mfg, cid, crev, base, nmw, nsw, nmp, nsp)); + + if (((mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) || (nsp == 0)) + continue; + if ((nmw + nsw == 0)) { + /* A component which is not a core */ + if (cid == OOB_ROUTER_CORE_ID) { + asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, + &addrl, &addrh, &sizel, &sizeh); + if (asd != 0) { + sii->common_info->oob_router = addrl; + } + } + continue; + } + + idx = sii->numcores; +/* sii->eromptr[idx] = base; */ + sii->common_info->cia[idx] = cia; + sii->common_info->cib[idx] = cib; + sii->common_info->coreid[idx] = cid; + + for (i = 0; i < nmp; i++) { + mpd = get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID); + if ((mpd & ER_TAG) != ER_MP) { + SI_ERROR(("Not enough MP entries for component 0x%x\n", cid)); + goto error; + } + SI_MSG((" Master port %d, mp: %d id: %d\n", i, + (mpd & MPD_MP_MASK) >> MPD_MP_SHIFT, + (mpd & MPD_MUI_MASK) >> MPD_MUI_SHIFT)); + } + + /* First Slave Address Descriptor should be port 0: + * the main register space for the core + */ + asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh, &sizel, &sizeh); + if (asd == 0) { + /* Try again to see if it is a bridge */ + asd = get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl, &addrh, + &sizel, &sizeh); + if (asd != 0) + br = TRUE; + else + if ((addrh != 0) || (sizeh != 0) || (sizel != SI_CORE_SIZE)) { + SI_ERROR(("First Slave ASD for core 0x%04x malformed " + "(0x%08x)\n", cid, asd)); + goto error; + } + } + sii->common_info->coresba[idx] = addrl; + sii->common_info->coresba_size[idx] = sizel; + /* Get any more ASDs in port 0 */ + j = 1; + do { + asd = get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl, &addrh, + &sizel, &sizeh); + if ((asd != 0) && (j == 1) && (sizel == SI_CORE_SIZE)) + sii->common_info->coresba2[idx] = addrl; + sii->common_info->coresba2_size[idx] = sizel; + j++; + } while (asd != 0); + + /* Go through the ASDs for other slave ports */ + for (i = 1; i < nsp; i++) { + j = 0; + do { + asd = get_asd(sih, &eromptr, i, j++, AD_ST_SLAVE, &addrl, &addrh, + &sizel, &sizeh); + } while (asd != 0); + if (j == 0) { + SI_ERROR((" SP %d has no address descriptors\n", i)); + goto error; + } + } + + /* Now get master wrappers */ + for (i = 0; i < nmw; i++) { + asd = get_asd(sih, &eromptr, i, 0, AD_ST_MWRAP, &addrl, &addrh, + &sizel, &sizeh); + if (asd == 0) { + SI_ERROR(("Missing descriptor for MW %d\n", i)); + goto error; + } + if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) { + SI_ERROR(("Master wrapper %d is not 4KB\n", i)); + goto error; + } + if (i == 0) + sii->common_info->wrapba[idx] = addrl; + } + + /* And finally slave wrappers */ + for (i = 0; i < nsw; i++) { + uint fwp = (nsp == 1) ? 0 : 1; + asd = get_asd(sih, &eromptr, fwp + i, 0, AD_ST_SWRAP, &addrl, &addrh, + &sizel, &sizeh); + if (asd == 0) { + SI_ERROR(("Missing descriptor for SW %d\n", i)); + goto error; + } + if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) { + SI_ERROR(("Slave wrapper %d is not 4KB\n", i)); + goto error; + } + if ((nmw == 0) && (i == 0)) + sii->common_info->wrapba[idx] = addrl; + } + + /* Don't record bridges */ + if (br) + continue; + + /* Done with core */ + sii->numcores++; + } + + SI_ERROR(("Reached end of erom without finding END")); + +error: + sii->numcores = 0; + return; +} + +/* This function changes the logical "focus" to the indicated core. + * Return the current core's virtual address. + */ +void * +ai_setcoreidx(si_t *sih, uint coreidx) +{ + si_info_t *sii = SI_INFO(sih); + uint32 addr = sii->common_info->coresba[coreidx]; + uint32 wrap = sii->common_info->wrapba[coreidx]; + void *regs; + + if (coreidx >= sii->numcores) + return (NULL); + + /* + * If the user has provided an interrupt mask enabled function, + * then assert interrupts are disabled before switching the core. + */ + ASSERT((sii->intrsenabled_fn == NULL) || !(*(sii)->intrsenabled_fn)((sii)->intr_arg)); + + switch (BUSTYPE(sih->bustype)) { + case SI_BUS: + /* map new one */ + if (!sii->common_info->regs[coreidx]) { + sii->common_info->regs[coreidx] = REG_MAP(addr, SI_CORE_SIZE); + ASSERT(GOODREGS(sii->common_info->regs[coreidx])); + } + sii->curmap = regs = sii->common_info->regs[coreidx]; + if (!sii->common_info->wrappers[coreidx]) { + sii->common_info->wrappers[coreidx] = REG_MAP(wrap, SI_CORE_SIZE); + ASSERT(GOODREGS(sii->common_info->wrappers[coreidx])); + } + sii->curwrap = sii->common_info->wrappers[coreidx]; + break; + + + case SPI_BUS: + case SDIO_BUS: + sii->curmap = regs = (void *)((uintptr)addr); + sii->curwrap = (void *)((uintptr)wrap); + break; + + case PCMCIA_BUS: + default: + ASSERT(0); + regs = NULL; + break; + } + + sii->curmap = regs; + sii->curidx = coreidx; + + return regs; +} + +/* Return the number of address spaces in current core */ +int +ai_numaddrspaces(si_t *sih) +{ + return 2; +} + +/* Return the address of the nth address space in the current core */ +uint32 +ai_addrspace(si_t *sih, uint asidx) +{ + si_info_t *sii; + uint cidx; + + sii = SI_INFO(sih); + cidx = sii->curidx; + + if (asidx == 0) + return sii->common_info->coresba[cidx]; + else if (asidx == 1) + return sii->common_info->coresba2[cidx]; + else { + SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n", + __FUNCTION__, asidx)); + return 0; + } +} + +/* Return the size of the nth address space in the current core */ +uint32 +ai_addrspacesize(si_t *sih, uint asidx) +{ + si_info_t *sii; + uint cidx; + + sii = SI_INFO(sih); + cidx = sii->curidx; + + if (asidx == 0) + return sii->common_info->coresba_size[cidx]; + else if (asidx == 1) + return sii->common_info->coresba2_size[cidx]; + else { + SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n", + __FUNCTION__, asidx)); + return 0; + } +} + +uint +ai_flag(si_t *sih) +{ + si_info_t *sii; + aidmp_t *ai; + + sii = SI_INFO(sih); + ai = sii->curwrap; + + return (R_REG(sii->osh, &ai->oobselouta30) & 0x1f); +} + +void +ai_setint(si_t *sih, int siflag) +{ +} + +void +ai_write_wrap_reg(si_t *sih, uint32 offset, uint32 val) +{ + si_info_t *sii = SI_INFO(sih); + aidmp_t *ai = sii->curwrap; + W_REG(sii->osh, (uint32 *)((uint8 *)ai+offset), val); + return; +} + +uint +ai_corevendor(si_t *sih) +{ + si_info_t *sii; + uint32 cia; + + sii = SI_INFO(sih); + cia = sii->common_info->cia[sii->curidx]; + return ((cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT); +} + +uint +ai_corerev(si_t *sih) +{ + si_info_t *sii; + uint32 cib; + + sii = SI_INFO(sih); + cib = sii->common_info->cib[sii->curidx]; + return ((cib & CIB_REV_MASK) >> CIB_REV_SHIFT); +} + +bool +ai_iscoreup(si_t *sih) +{ + si_info_t *sii; + aidmp_t *ai; + + sii = SI_INFO(sih); + ai = sii->curwrap; + + return (((R_REG(sii->osh, &ai->ioctrl) & (SICF_FGC | SICF_CLOCK_EN)) == SICF_CLOCK_EN) && + ((R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) == 0)); +} + +/* + * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation, + * switch back to the original core, and return the new value. + * + * When using the silicon backplane, no fidleing with interrupts or core switches are needed. + * + * Also, when using pci/pcie, we can optimize away the core switching for pci registers + * and (on newer pci cores) chipcommon registers. + */ +uint +ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val) +{ + uint origidx = 0; + uint32 *r = NULL; + uint w; + uint intr_val = 0; + bool fast = FALSE; + si_info_t *sii; + + sii = SI_INFO(sih); + + ASSERT(GOODIDX(coreidx)); + ASSERT(regoff < SI_CORE_SIZE); + ASSERT((val & ~mask) == 0); + + if (coreidx >= SI_MAXCORES) + return 0; + + if (BUSTYPE(sih->bustype) == SI_BUS) { + /* If internal bus, we can always get at everything */ + fast = TRUE; + /* map if does not exist */ + if (!sii->common_info->wrappers[coreidx]) { + sii->common_info->regs[coreidx] = + REG_MAP(sii->common_info->coresba[coreidx], SI_CORE_SIZE); + ASSERT(GOODREGS(sii->common_info->regs[coreidx])); + } + r = (uint32 *)((uchar *)sii->common_info->regs[coreidx] + regoff); + } else if (BUSTYPE(sih->bustype) == PCI_BUS) { + /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */ + + if ((sii->common_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) { + /* Chipc registers are mapped at 12KB */ + + fast = TRUE; + r = (uint32 *)((char *)sii->curmap + PCI_16KB0_CCREGS_OFFSET + regoff); + } else if (sii->pub.buscoreidx == coreidx) { + /* pci registers are at either in the last 2KB of an 8KB window + * or, in pcie and pci rev 13 at 8KB + */ + fast = TRUE; + if (SI_FAST(sii)) + r = (uint32 *)((char *)sii->curmap + + PCI_16KB0_PCIREGS_OFFSET + regoff); + else + r = (uint32 *)((char *)sii->curmap + + ((regoff >= SBCONFIGOFF) ? + PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) + + regoff); + } + } + + if (!fast) { + INTR_OFF(sii, intr_val); + + /* save current core index */ + origidx = si_coreidx(&sii->pub); + + /* switch core */ + r = (uint32*) ((uchar*) ai_setcoreidx(&sii->pub, coreidx) + regoff); + } + ASSERT(r != NULL); + + /* mask and set */ + if (mask || val) { + w = (R_REG(sii->osh, r) & ~mask) | val; + W_REG(sii->osh, r, w); + } + + /* readback */ + w = R_REG(sii->osh, r); + + if (!fast) { + /* restore core index */ + if (origidx != coreidx) + ai_setcoreidx(&sii->pub, origidx); + + INTR_RESTORE(sii, intr_val); + } + + return (w); +} + +void +ai_core_disable(si_t *sih, uint32 bits) +{ + si_info_t *sii; + volatile uint32 dummy; + aidmp_t *ai; + + sii = SI_INFO(sih); + + ASSERT(GOODREGS(sii->curwrap)); + ai = sii->curwrap; + + /* if core is already in reset, just return */ + if (R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) + return; + + W_REG(sii->osh, &ai->ioctrl, bits); + dummy = R_REG(sii->osh, &ai->ioctrl); + OSL_DELAY(10); + + W_REG(sii->osh, &ai->resetctrl, AIRC_RESET); + OSL_DELAY(1); +} + +/* reset and re-enable a core + * inputs: + * bits - core specific bits that are set during and after reset sequence + * resetbits - core specific bits that are set only during reset sequence + */ +void +ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits) +{ + si_info_t *sii; + aidmp_t *ai; + volatile uint32 dummy; + + sii = SI_INFO(sih); + ASSERT(GOODREGS(sii->curwrap)); + ai = sii->curwrap; + + /* + * Must do the disable sequence first to work for arbitrary current core state. + */ + ai_core_disable(sih, (bits | resetbits)); + + /* + * Now do the initialization sequence. + */ + W_REG(sii->osh, &ai->ioctrl, (bits | SICF_FGC | SICF_CLOCK_EN)); + dummy = R_REG(sii->osh, &ai->ioctrl); + W_REG(sii->osh, &ai->resetctrl, 0); + OSL_DELAY(1); + + W_REG(sii->osh, &ai->ioctrl, (bits | SICF_CLOCK_EN)); + dummy = R_REG(sii->osh, &ai->ioctrl); + OSL_DELAY(1); +} + + +void +ai_core_cflags_wo(si_t *sih, uint32 mask, uint32 val) +{ + si_info_t *sii; + aidmp_t *ai; + uint32 w; + + sii = SI_INFO(sih); + ASSERT(GOODREGS(sii->curwrap)); + ai = sii->curwrap; + + ASSERT((val & ~mask) == 0); + + if (mask || val) { + w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val); + W_REG(sii->osh, &ai->ioctrl, w); + } +} + +uint32 +ai_core_cflags(si_t *sih, uint32 mask, uint32 val) +{ + si_info_t *sii; + aidmp_t *ai; + uint32 w; + + sii = SI_INFO(sih); + ASSERT(GOODREGS(sii->curwrap)); + ai = sii->curwrap; + + ASSERT((val & ~mask) == 0); + + if (mask || val) { + w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val); + W_REG(sii->osh, &ai->ioctrl, w); + } + + return R_REG(sii->osh, &ai->ioctrl); +} + +uint32 +ai_core_sflags(si_t *sih, uint32 mask, uint32 val) +{ + si_info_t *sii; + aidmp_t *ai; + uint32 w; + + sii = SI_INFO(sih); + ASSERT(GOODREGS(sii->curwrap)); + ai = sii->curwrap; + + ASSERT((val & ~mask) == 0); + ASSERT((mask & ~SISF_CORE_BITS) == 0); + + if (mask || val) { + w = ((R_REG(sii->osh, &ai->iostatus) & ~mask) | val); + W_REG(sii->osh, &ai->iostatus, w); + } + + return R_REG(sii->osh, &ai->iostatus); +} diff --git a/drivers/net/wireless/bcm4329/bcmpcispi.c b/drivers/net/wireless/bcm4329/bcmpcispi.c new file mode 100644 index 000000000000..1a8b6717f924 --- /dev/null +++ b/drivers/net/wireless/bcm4329/bcmpcispi.c @@ -0,0 +1,630 @@ +/* + * Broadcom SPI over PCI-SPI Host Controller, low-level hardware driver + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: bcmpcispi.c,v 1.22.2.4.4.5.6.1 2010/08/13 00:26:05 Exp $ + */ + +#include <typedefs.h> +#include <bcmutils.h> + +#include <sdio.h> /* SDIO Specs */ +#include <bcmsdbus.h> /* bcmsdh to/from specific controller APIs */ +#include <sdiovar.h> /* to get msglevel bit values */ + +#include <pcicfg.h> +#include <bcmsdspi.h> +#include <bcmspi.h> +#include <bcmpcispi.h> /* BRCM PCI-SPI Host Controller Register definitions */ + + +/* ndis_osl.h needs to do a runtime check of the osh to map + * R_REG/W_REG to bus specific access similar to linux_osl.h. + * Until then... + */ +/* linux */ + +#define SPIPCI_RREG R_REG +#define SPIPCI_WREG W_REG + + +#define SPIPCI_ANDREG(osh, r, v) SPIPCI_WREG(osh, (r), (SPIPCI_RREG(osh, r) & (v))) +#define SPIPCI_ORREG(osh, r, v) SPIPCI_WREG(osh, (r), (SPIPCI_RREG(osh, r) | (v))) + + +int bcmpcispi_dump = 0; /* Set to dump complete trace of all SPI bus transactions */ + +typedef struct spih_info_ { + uint bar0; /* BAR0 of PCI Card */ + uint bar1; /* BAR1 of PCI Card */ + osl_t *osh; /* osh handle */ + spih_pciregs_t *pciregs; /* PCI Core Registers */ + spih_regs_t *regs; /* SPI Controller Registers */ + uint8 rev; /* PCI Card Revision ID */ +} spih_info_t; + + +/* Attach to PCI-SPI Host Controller Hardware */ +bool +spi_hw_attach(sdioh_info_t *sd) +{ + osl_t *osh; + spih_info_t *si; + + sd_trace(("%s: enter\n", __FUNCTION__)); + + osh = sd->osh; + + if ((si = (spih_info_t *)MALLOC(osh, sizeof(spih_info_t))) == NULL) { + sd_err(("%s: out of memory, malloced %d bytes\n", __FUNCTION__, MALLOCED(osh))); + return FALSE; + } + + bzero(si, sizeof(spih_info_t)); + + sd->controller = si; + + si->osh = sd->osh; + si->rev = OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_REV, 4) & 0xFF; + + if (si->rev < 3) { + sd_err(("Host controller %d not supported, please upgrade to rev >= 3\n", si->rev)); + MFREE(osh, si, sizeof(spih_info_t)); + return (FALSE); + } + + sd_err(("Attaching to Generic PCI SPI Host Controller Rev %d\n", si->rev)); + + /* FPGA Revision < 3 not supported by driver anymore. */ + ASSERT(si->rev >= 3); + + si->bar0 = sd->bar0; + + /* Rev < 10 PciSpiHost has 2 BARs: + * BAR0 = PCI Core Registers + * BAR1 = PciSpiHost Registers (all other cores on backplane) + * + * Rev 10 and up use a different PCI core which only has a single + * BAR0 which contains the PciSpiHost Registers. + */ + if (si->rev < 10) { + si->pciregs = (spih_pciregs_t *)spi_reg_map(osh, + (uintptr)si->bar0, + sizeof(spih_pciregs_t)); + sd_err(("Mapped PCI Core regs to BAR0 at %p\n", si->pciregs)); + + si->bar1 = OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_BAR1, 4); + si->regs = (spih_regs_t *)spi_reg_map(osh, + (uintptr)si->bar1, + sizeof(spih_regs_t)); + sd_err(("Mapped SPI Controller regs to BAR1 at %p\n", si->regs)); + } else { + si->regs = (spih_regs_t *)spi_reg_map(osh, + (uintptr)si->bar0, + sizeof(spih_regs_t)); + sd_err(("Mapped SPI Controller regs to BAR0 at %p\n", si->regs)); + si->pciregs = NULL; + } + /* Enable SPI Controller, 16.67MHz SPI Clock */ + SPIPCI_WREG(osh, &si->regs->spih_ctrl, 0x000000d1); + + /* Set extended feature register to defaults */ + SPIPCI_WREG(osh, &si->regs->spih_ext, 0x00000000); + + /* Set GPIO CS# High (de-asserted) */ + SPIPCI_WREG(osh, &si->regs->spih_gpio_data, SPIH_CS); + + /* set GPIO[0] to output for CS# */ + /* set GPIO[1] to output for power control */ + /* set GPIO[2] to input for card detect */ + SPIPCI_WREG(osh, &si->regs->spih_gpio_ctrl, (SPIH_CS | SPIH_SLOT_POWER)); + + /* Clear out the Read FIFO in case there is any stuff left in there from a previous run. */ + while ((SPIPCI_RREG(osh, &si->regs->spih_stat) & SPIH_RFEMPTY) == 0) { + SPIPCI_RREG(osh, &si->regs->spih_data); + } + + /* Wait for power to stabilize to the SDIO Card (100msec was insufficient) */ + OSL_DELAY(250000); + + /* Check card detect on FPGA Revision >= 4 */ + if (si->rev >= 4) { + if (SPIPCI_RREG(osh, &si->regs->spih_gpio_data) & SPIH_CARD_DETECT) { + sd_err(("%s: no card detected in SD slot\n", __FUNCTION__)); + spi_reg_unmap(osh, (uintptr)si->regs, sizeof(spih_regs_t)); + if (si->pciregs) { + spi_reg_unmap(osh, (uintptr)si->pciregs, sizeof(spih_pciregs_t)); + } + MFREE(osh, si, sizeof(spih_info_t)); + return FALSE; + } + } + + /* Interrupts are level sensitive */ + SPIPCI_WREG(osh, &si->regs->spih_int_edge, 0x80000000); + + /* Interrupts are active low. */ + SPIPCI_WREG(osh, &si->regs->spih_int_pol, 0x40000004); + + /* Enable interrupts through PCI Core. */ + if (si->pciregs) { + SPIPCI_WREG(osh, &si->pciregs->ICR, PCI_INT_PROP_EN); + } + + sd_trace(("%s: exit\n", __FUNCTION__)); + return TRUE; +} + +/* Detach and return PCI-SPI Hardware to unconfigured state */ +bool +spi_hw_detach(sdioh_info_t *sd) +{ + spih_info_t *si = (spih_info_t *)sd->controller; + osl_t *osh = si->osh; + spih_regs_t *regs = si->regs; + spih_pciregs_t *pciregs = si->pciregs; + + sd_trace(("%s: enter\n", __FUNCTION__)); + + SPIPCI_WREG(osh, ®s->spih_ctrl, 0x00000010); + SPIPCI_WREG(osh, ®s->spih_gpio_ctrl, 0x00000000); /* Disable GPIO for CS# */ + SPIPCI_WREG(osh, ®s->spih_int_mask, 0x00000000); /* Clear Intmask */ + SPIPCI_WREG(osh, ®s->spih_hex_disp, 0x0000DEAF); + SPIPCI_WREG(osh, ®s->spih_int_edge, 0x00000000); + SPIPCI_WREG(osh, ®s->spih_int_pol, 0x00000000); + SPIPCI_WREG(osh, ®s->spih_hex_disp, 0x0000DEAD); + + /* Disable interrupts through PCI Core. */ + if (si->pciregs) { + SPIPCI_WREG(osh, &pciregs->ICR, 0x00000000); + spi_reg_unmap(osh, (uintptr)pciregs, sizeof(spih_pciregs_t)); + } + spi_reg_unmap(osh, (uintptr)regs, sizeof(spih_regs_t)); + + MFREE(osh, si, sizeof(spih_info_t)); + + sd->controller = NULL; + + sd_trace(("%s: exit\n", __FUNCTION__)); + return TRUE; +} + +/* Switch between internal (PCI) and external clock oscillator */ +static bool +sdspi_switch_clock(sdioh_info_t *sd, bool ext_clk) +{ + spih_info_t *si = (spih_info_t *)sd->controller; + osl_t *osh = si->osh; + spih_regs_t *regs = si->regs; + + /* Switch to desired clock, and reset the PLL. */ + SPIPCI_WREG(osh, ®s->spih_pll_ctrl, ext_clk ? SPIH_EXT_CLK : 0); + + SPINWAIT(((SPIPCI_RREG(osh, ®s->spih_pll_status) & SPIH_PLL_LOCKED) + != SPIH_PLL_LOCKED), 1000); + if ((SPIPCI_RREG(osh, ®s->spih_pll_status) & SPIH_PLL_LOCKED) != SPIH_PLL_LOCKED) { + sd_err(("%s: timeout waiting for PLL to lock\n", __FUNCTION__)); + return (FALSE); + } + return (TRUE); + +} + +/* Configure PCI-SPI Host Controller's SPI Clock rate as a divisor into the + * base clock rate. The base clock is either the PCI Clock (33MHz) or the + * external clock oscillator at U17 on the PciSpiHost. + */ +bool +spi_start_clock(sdioh_info_t *sd, uint16 div) +{ + spih_info_t *si = (spih_info_t *)sd->controller; + osl_t *osh = si->osh; + spih_regs_t *regs = si->regs; + uint32 t, espr, disp; + uint32 disp_xtal_freq; + bool ext_clock = FALSE; + char disp_string[5]; + + if (div > 2048) { + sd_err(("%s: divisor %d too large; using max of 2048\n", __FUNCTION__, div)); + div = 2048; + } else if (div & (div - 1)) { /* Not a power of 2? */ + /* Round up to a power of 2 */ + while ((div + 1) & div) + div |= div >> 1; + div++; + } + + /* For FPGA Rev >= 5, the use of an external clock oscillator is supported. + * If the oscillator is populated, use it to provide the SPI base clock, + * otherwise, default to the PCI clock as the SPI base clock. + */ + if (si->rev >= 5) { + uint32 clk_tick; + /* Enable the External Clock Oscillator as PLL clock source. */ + if (!sdspi_switch_clock(sd, TRUE)) { + sd_err(("%s: error switching to external clock\n", __FUNCTION__)); + } + + /* Check to make sure the external clock is running. If not, then it + * is not populated on the card, so we will default to the PCI clock. + */ + clk_tick = SPIPCI_RREG(osh, ®s->spih_clk_count); + if (clk_tick == SPIPCI_RREG(osh, ®s->spih_clk_count)) { + + /* Switch back to the PCI clock as the clock source. */ + if (!sdspi_switch_clock(sd, FALSE)) { + sd_err(("%s: error switching to external clock\n", __FUNCTION__)); + } + } else { + ext_clock = TRUE; + } + } + + /* Hack to allow hot-swapping oscillators: + * 1. Force PCI clock as clock source, using sd_divisor of 0. + * 2. Swap oscillator + * 3. Set desired sd_divisor (will switch to external oscillator as clock source. + */ + if (div == 0) { + ext_clock = FALSE; + div = 2; + + /* Select PCI clock as the clock source. */ + if (!sdspi_switch_clock(sd, FALSE)) { + sd_err(("%s: error switching to external clock\n", __FUNCTION__)); + } + + sd_err(("%s: Ok to hot-swap oscillators.\n", __FUNCTION__)); + } + + /* If using the external oscillator, read the clock frequency from the controller + * The value read is in units of 10000Hz, and it's not a nice round number because + * it is calculated by the FPGA. So to make up for that, we round it off. + */ + if (ext_clock == TRUE) { + uint32 xtal_freq; + + OSL_DELAY(1000); + xtal_freq = SPIPCI_RREG(osh, ®s->spih_xtal_freq) * 10000; + + sd_info(("%s: Oscillator is %dHz\n", __FUNCTION__, xtal_freq)); + + + disp_xtal_freq = xtal_freq / 10000; + + /* Round it off to a nice number. */ + if ((disp_xtal_freq % 100) > 50) { + disp_xtal_freq += 100; + } + + disp_xtal_freq = (disp_xtal_freq / 100) * 100; + } else { + sd_err(("%s: no external oscillator installed, using PCI clock.\n", __FUNCTION__)); + disp_xtal_freq = 3333; + } + + /* Convert the SPI Clock frequency to BCD format. */ + sprintf(disp_string, "%04d", disp_xtal_freq / div); + + disp = (disp_string[0] - '0') << 12; + disp |= (disp_string[1] - '0') << 8; + disp |= (disp_string[2] - '0') << 4; + disp |= (disp_string[3] - '0'); + + /* Select the correct ESPR register value based on the divisor. */ + switch (div) { + case 1: espr = 0x0; break; + case 2: espr = 0x1; break; + case 4: espr = 0x2; break; + case 8: espr = 0x5; break; + case 16: espr = 0x3; break; + case 32: espr = 0x4; break; + case 64: espr = 0x6; break; + case 128: espr = 0x7; break; + case 256: espr = 0x8; break; + case 512: espr = 0x9; break; + case 1024: espr = 0xa; break; + case 2048: espr = 0xb; break; + default: espr = 0x0; ASSERT(0); break; + } + + t = SPIPCI_RREG(osh, ®s->spih_ctrl); + t &= ~3; + t |= espr & 3; + SPIPCI_WREG(osh, ®s->spih_ctrl, t); + + t = SPIPCI_RREG(osh, ®s->spih_ext); + t &= ~3; + t |= (espr >> 2) & 3; + SPIPCI_WREG(osh, ®s->spih_ext, t); + + SPIPCI_WREG(osh, ®s->spih_hex_disp, disp); + + /* For Rev 8, writing to the PLL_CTRL register resets + * the PLL, and it can re-acquire in 200uS. For + * Rev 7 and older, we use a software delay to allow + * the PLL to re-acquire, which takes more than 2mS. + */ + if (si->rev < 8) { + /* Wait for clock to settle. */ + OSL_DELAY(5000); + } + + sd_info(("%s: SPI_CTRL=0x%08x SPI_EXT=0x%08x\n", + __FUNCTION__, + SPIPCI_RREG(osh, ®s->spih_ctrl), + SPIPCI_RREG(osh, ®s->spih_ext))); + + return TRUE; +} + +/* Configure PCI-SPI Host Controller High-Speed Clocking mode setting */ +bool +spi_controller_highspeed_mode(sdioh_info_t *sd, bool hsmode) +{ + spih_info_t *si = (spih_info_t *)sd->controller; + osl_t *osh = si->osh; + spih_regs_t *regs = si->regs; + + if (si->rev >= 10) { + if (hsmode) { + SPIPCI_ORREG(osh, ®s->spih_ext, 0x10); + } else { + SPIPCI_ANDREG(osh, ®s->spih_ext, ~0x10); + } + } + + return TRUE; +} + +/* Disable device interrupt */ +void +spi_devintr_off(sdioh_info_t *sd) +{ + spih_info_t *si = (spih_info_t *)sd->controller; + osl_t *osh = si->osh; + spih_regs_t *regs = si->regs; + + sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints)); + if (sd->use_client_ints) { + sd->intmask &= ~SPIH_DEV_INTR; + SPIPCI_WREG(osh, ®s->spih_int_mask, sd->intmask); /* Clear Intmask */ + } +} + +/* Enable device interrupt */ +void +spi_devintr_on(sdioh_info_t *sd) +{ + spih_info_t *si = (spih_info_t *)sd->controller; + osl_t *osh = si->osh; + spih_regs_t *regs = si->regs; + + ASSERT(sd->lockcount == 0); + sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints)); + if (sd->use_client_ints) { + if (SPIPCI_RREG(osh, ®s->spih_ctrl) & 0x02) { + /* Ack in case one was pending but is no longer... */ + SPIPCI_WREG(osh, ®s->spih_int_status, SPIH_DEV_INTR); + } + sd->intmask |= SPIH_DEV_INTR; + /* Set device intr in Intmask */ + SPIPCI_WREG(osh, ®s->spih_int_mask, sd->intmask); + } +} + +/* Check to see if an interrupt belongs to the PCI-SPI Host or a SPI Device */ +bool +spi_check_client_intr(sdioh_info_t *sd, int *is_dev_intr) +{ + spih_info_t *si = (spih_info_t *)sd->controller; + osl_t *osh = si->osh; + spih_regs_t *regs = si->regs; + bool ours = FALSE; + + uint32 raw_int, cur_int; + ASSERT(sd); + + if (is_dev_intr) + *is_dev_intr = FALSE; + raw_int = SPIPCI_RREG(osh, ®s->spih_int_status); + cur_int = raw_int & sd->intmask; + if (cur_int & SPIH_DEV_INTR) { + if (sd->client_intr_enabled && sd->use_client_ints) { + sd->intrcount++; + ASSERT(sd->intr_handler); + ASSERT(sd->intr_handler_arg); + (sd->intr_handler)(sd->intr_handler_arg); + if (is_dev_intr) + *is_dev_intr = TRUE; + } else { + sd_trace(("%s: Not ready for intr: enabled %d, handler 0x%p\n", + __FUNCTION__, sd->client_intr_enabled, sd->intr_handler)); + } + SPIPCI_WREG(osh, ®s->spih_int_status, SPIH_DEV_INTR); + SPIPCI_RREG(osh, ®s->spih_int_status); + ours = TRUE; + } else if (cur_int & SPIH_CTLR_INTR) { + /* Interrupt is from SPI FIFO... just clear and ack it... */ + sd_trace(("%s: SPI CTLR interrupt: raw_int 0x%08x cur_int 0x%08x\n", + __FUNCTION__, raw_int, cur_int)); + + /* Clear the interrupt in the SPI_STAT register */ + SPIPCI_WREG(osh, ®s->spih_stat, 0x00000080); + + /* Ack the interrupt in the interrupt controller */ + SPIPCI_WREG(osh, ®s->spih_int_status, SPIH_CTLR_INTR); + SPIPCI_RREG(osh, ®s->spih_int_status); + + ours = TRUE; + } else if (cur_int & SPIH_WFIFO_INTR) { + sd_trace(("%s: SPI WR FIFO Empty interrupt: raw_int 0x%08x cur_int 0x%08x\n", + __FUNCTION__, raw_int, cur_int)); + + /* Disable the FIFO Empty Interrupt */ + sd->intmask &= ~SPIH_WFIFO_INTR; + SPIPCI_WREG(osh, ®s->spih_int_mask, sd->intmask); + + sd->local_intrcount++; + sd->got_hcint = TRUE; + ours = TRUE; + } else { + /* Not an error: can share interrupts... */ + sd_trace(("%s: Not my interrupt: raw_int 0x%08x cur_int 0x%08x\n", + __FUNCTION__, raw_int, cur_int)); + ours = FALSE; + } + + return ours; +} + +static void +hexdump(char *pfx, unsigned char *msg, int msglen) +{ + int i, col; + char buf[80]; + + ASSERT(strlen(pfx) + 49 <= sizeof(buf)); + + col = 0; + + for (i = 0; i < msglen; i++, col++) { + if (col % 16 == 0) + strcpy(buf, pfx); + sprintf(buf + strlen(buf), "%02x", msg[i]); + if ((col + 1) % 16 == 0) + printf("%s\n", buf); + else + sprintf(buf + strlen(buf), " "); + } + + if (col % 16 != 0) + printf("%s\n", buf); +} + +/* Send/Receive an SPI Packet */ +void +spi_sendrecv(sdioh_info_t *sd, uint8 *msg_out, uint8 *msg_in, int msglen) +{ + spih_info_t *si = (spih_info_t *)sd->controller; + osl_t *osh = si->osh; + spih_regs_t *regs = si->regs; + uint32 count; + uint32 spi_data_out; + uint32 spi_data_in; + bool yield; + + sd_trace(("%s: enter\n", __FUNCTION__)); + + if (bcmpcispi_dump) { + printf("SENDRECV(len=%d)\n", msglen); + hexdump(" OUT: ", msg_out, msglen); + } + +#ifdef BCMSDYIELD + /* Only yield the CPU and wait for interrupt on Rev 8 and newer FPGA images. */ + yield = ((msglen > 500) && (si->rev >= 8)); +#else + yield = FALSE; +#endif /* BCMSDYIELD */ + + ASSERT(msglen % 4 == 0); + + + SPIPCI_ANDREG(osh, ®s->spih_gpio_data, ~SPIH_CS); /* Set GPIO CS# Low (asserted) */ + + for (count = 0; count < (uint32)msglen/4; count++) { + spi_data_out = ((uint32)((uint32 *)msg_out)[count]); + SPIPCI_WREG(osh, ®s->spih_data, spi_data_out); + } + +#ifdef BCMSDYIELD + if (yield) { + /* Ack the interrupt in the interrupt controller */ + SPIPCI_WREG(osh, ®s->spih_int_status, SPIH_WFIFO_INTR); + SPIPCI_RREG(osh, ®s->spih_int_status); + + /* Enable the FIFO Empty Interrupt */ + sd->intmask |= SPIH_WFIFO_INTR; + sd->got_hcint = FALSE; + SPIPCI_WREG(osh, ®s->spih_int_mask, sd->intmask); + + } +#endif /* BCMSDYIELD */ + + /* Wait for write fifo to empty... */ + SPIPCI_ANDREG(osh, ®s->spih_gpio_data, ~0x00000020); /* Set GPIO 5 Low */ + + if (yield) { + ASSERT((SPIPCI_RREG(sd->osh, ®s->spih_stat) & SPIH_WFEMPTY) == 0); + } + + spi_waitbits(sd, yield); + SPIPCI_ORREG(osh, ®s->spih_gpio_data, 0x00000020); /* Set GPIO 5 High (de-asserted) */ + + for (count = 0; count < (uint32)msglen/4; count++) { + spi_data_in = SPIPCI_RREG(osh, ®s->spih_data); + ((uint32 *)msg_in)[count] = spi_data_in; + } + + /* Set GPIO CS# High (de-asserted) */ + SPIPCI_ORREG(osh, ®s->spih_gpio_data, SPIH_CS); + + if (bcmpcispi_dump) { + hexdump(" IN : ", msg_in, msglen); + } +} + +void +spi_spinbits(sdioh_info_t *sd) +{ + spih_info_t *si = (spih_info_t *)sd->controller; + osl_t *osh = si->osh; + spih_regs_t *regs = si->regs; + uint spin_count; /* Spin loop bound check */ + + spin_count = 0; + while ((SPIPCI_RREG(sd->osh, ®s->spih_stat) & SPIH_WFEMPTY) == 0) { + if (spin_count > SPI_SPIN_BOUND) { + sd_err(("%s: SPIH_WFEMPTY spin bits out of bound %u times \n", + __FUNCTION__, spin_count)); + ASSERT(FALSE); + } + spin_count++; + } + + /* Wait for SPI Transfer state machine to return to IDLE state. + * The state bits are only implemented in Rev >= 5 FPGA. These + * bits are hardwired to 00 for Rev < 5, so this check doesn't cause + * any problems. + */ + spin_count = 0; + while ((SPIPCI_RREG(osh, ®s->spih_stat) & SPIH_STATE_MASK) != 0) { + if (spin_count > SPI_SPIN_BOUND) { + sd_err(("%s: SPIH_STATE_MASK spin bits out of bound %u times \n", + __FUNCTION__, spin_count)); + ASSERT(FALSE); + } + spin_count++; + } +} diff --git a/drivers/net/wireless/bcm4329/bcmsdh.c b/drivers/net/wireless/bcm4329/bcmsdh.c new file mode 100644 index 000000000000..4bf5889e5a68 --- /dev/null +++ b/drivers/net/wireless/bcm4329/bcmsdh.c @@ -0,0 +1,652 @@ +/* + * BCMSDH interface glue + * implement bcmsdh API for SDIOH driver + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: bcmsdh.c,v 1.35.2.1.4.8.6.13 2010/04/06 03:26:57 Exp $ + */ +/* ****************** BCMSDH Interface Functions *************************** */ + +#include <typedefs.h> +#include <bcmdevs.h> +#include <bcmendian.h> +#include <bcmutils.h> +#include <hndsoc.h> +#include <siutils.h> +#include <osl.h> + +#include <bcmsdh.h> /* BRCM API for SDIO clients (such as wl, dhd) */ +#include <bcmsdbus.h> /* common SDIO/controller interface */ +#include <sbsdio.h> /* BRCM sdio device core */ + +#include <sdio.h> /* sdio spec */ + +#define SDIOH_API_ACCESS_RETRY_LIMIT 2 +const uint bcmsdh_msglevel = BCMSDH_ERROR_VAL; + + +struct bcmsdh_info +{ + bool init_success; /* underlying driver successfully attached */ + void *sdioh; /* handler for sdioh */ + uint32 vendevid; /* Target Vendor and Device ID on SD bus */ + osl_t *osh; + bool regfail; /* Save status of last reg_read/reg_write call */ + uint32 sbwad; /* Save backplane window address */ +}; +/* local copy of bcm sd handler */ +bcmsdh_info_t * l_bcmsdh = NULL; + +#if defined(OOB_INTR_ONLY) && defined(HW_OOB) +extern int +sdioh_enable_hw_oob_intr(void *sdioh, bool enable); + +void +bcmsdh_enable_hw_oob_intr(bcmsdh_info_t *sdh, bool enable) +{ + sdioh_enable_hw_oob_intr(sdh->sdioh, enable); +} +#endif + +bcmsdh_info_t * +bcmsdh_attach(osl_t *osh, void *cfghdl, void **regsva, uint irq) +{ + bcmsdh_info_t *bcmsdh; + + if ((bcmsdh = (bcmsdh_info_t *)MALLOC(osh, sizeof(bcmsdh_info_t))) == NULL) { + BCMSDH_ERROR(("bcmsdh_attach: out of memory, malloced %d bytes\n", MALLOCED(osh))); + return NULL; + } + bzero((char *)bcmsdh, sizeof(bcmsdh_info_t)); + + /* save the handler locally */ + l_bcmsdh = bcmsdh; + + if (!(bcmsdh->sdioh = sdioh_attach(osh, cfghdl, irq))) { + bcmsdh_detach(osh, bcmsdh); + return NULL; + } + + bcmsdh->osh = osh; + bcmsdh->init_success = TRUE; + + *regsva = (uint32 *)SI_ENUM_BASE; + + /* Report the BAR, to fix if needed */ + bcmsdh->sbwad = SI_ENUM_BASE; + return bcmsdh; +} + +int +bcmsdh_detach(osl_t *osh, void *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + + if (bcmsdh != NULL) { + if (bcmsdh->sdioh) { + sdioh_detach(osh, bcmsdh->sdioh); + bcmsdh->sdioh = NULL; + } + MFREE(osh, bcmsdh, sizeof(bcmsdh_info_t)); + } + + l_bcmsdh = NULL; + return 0; +} + +int +bcmsdh_iovar_op(void *sdh, const char *name, + void *params, int plen, void *arg, int len, bool set) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + return sdioh_iovar_op(bcmsdh->sdioh, name, params, plen, arg, len, set); +} + +bool +bcmsdh_intr_query(void *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + bool on; + + ASSERT(bcmsdh); + status = sdioh_interrupt_query(bcmsdh->sdioh, &on); + if (SDIOH_API_SUCCESS(status)) + return FALSE; + else + return on; +} + +int +bcmsdh_intr_enable(void *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + ASSERT(bcmsdh); + + status = sdioh_interrupt_set(bcmsdh->sdioh, TRUE); + return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR); +} + +int +bcmsdh_intr_disable(void *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + ASSERT(bcmsdh); + + status = sdioh_interrupt_set(bcmsdh->sdioh, FALSE); + return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR); +} + +int +bcmsdh_intr_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + ASSERT(bcmsdh); + + status = sdioh_interrupt_register(bcmsdh->sdioh, fn, argh); + return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR); +} + +int +bcmsdh_intr_dereg(void *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + ASSERT(bcmsdh); + + status = sdioh_interrupt_deregister(bcmsdh->sdioh); + return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR); +} + +#if defined(DHD_DEBUG) +bool +bcmsdh_intr_pending(void *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + + ASSERT(sdh); + return sdioh_interrupt_pending(bcmsdh->sdioh); +} +#endif + + +int +bcmsdh_devremove_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh) +{ + ASSERT(sdh); + + /* don't support yet */ + return BCME_UNSUPPORTED; +} + +uint8 +bcmsdh_cfg_read(void *sdh, uint fnc_num, uint32 addr, int *err) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; +#ifdef SDIOH_API_ACCESS_RETRY_LIMIT + int32 retry = 0; +#endif + uint8 data = 0; + + if (!bcmsdh) + bcmsdh = l_bcmsdh; + + ASSERT(bcmsdh->init_success); + +#ifdef SDIOH_API_ACCESS_RETRY_LIMIT + do { + if (retry) /* wait for 1 ms till bus get settled down */ + OSL_DELAY(1000); +#endif + status = sdioh_cfg_read(bcmsdh->sdioh, fnc_num, addr, (uint8 *)&data); +#ifdef SDIOH_API_ACCESS_RETRY_LIMIT + } while (!SDIOH_API_SUCCESS(status) && (retry++ < SDIOH_API_ACCESS_RETRY_LIMIT)); +#endif + if (err) + *err = (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR); + + BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint8data = 0x%x\n", __FUNCTION__, + fnc_num, addr, data)); + + return data; +} + +void +bcmsdh_cfg_write(void *sdh, uint fnc_num, uint32 addr, uint8 data, int *err) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; +#ifdef SDIOH_API_ACCESS_RETRY_LIMIT + int32 retry = 0; +#endif + + if (!bcmsdh) + bcmsdh = l_bcmsdh; + + ASSERT(bcmsdh->init_success); + +#ifdef SDIOH_API_ACCESS_RETRY_LIMIT + do { + if (retry) /* wait for 1 ms till bus get settled down */ + OSL_DELAY(1000); +#endif + status = sdioh_cfg_write(bcmsdh->sdioh, fnc_num, addr, (uint8 *)&data); +#ifdef SDIOH_API_ACCESS_RETRY_LIMIT + } while (!SDIOH_API_SUCCESS(status) && (retry++ < SDIOH_API_ACCESS_RETRY_LIMIT)); +#endif + if (err) + *err = SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR; + + BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint8data = 0x%x\n", __FUNCTION__, + fnc_num, addr, data)); +} + +uint32 +bcmsdh_cfg_read_word(void *sdh, uint fnc_num, uint32 addr, int *err) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + uint32 data = 0; + + if (!bcmsdh) + bcmsdh = l_bcmsdh; + + ASSERT(bcmsdh->init_success); + + status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL, SDIOH_READ, fnc_num, + addr, &data, 4); + + if (err) + *err = (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR); + + BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint32data = 0x%x\n", __FUNCTION__, + fnc_num, addr, data)); + + return data; +} + +void +bcmsdh_cfg_write_word(void *sdh, uint fnc_num, uint32 addr, uint32 data, int *err) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + + if (!bcmsdh) + bcmsdh = l_bcmsdh; + + ASSERT(bcmsdh->init_success); + + status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL, SDIOH_WRITE, fnc_num, + addr, &data, 4); + + if (err) + *err = (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR); + + BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint32data = 0x%x\n", __FUNCTION__, fnc_num, + addr, data)); +} + + +int +bcmsdh_cis_read(void *sdh, uint func, uint8 *cis, uint length) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + + uint8 *tmp_buf, *tmp_ptr; + uint8 *ptr; + bool ascii = func & ~0xf; + func &= 0x7; + + if (!bcmsdh) + bcmsdh = l_bcmsdh; + + ASSERT(bcmsdh->init_success); + ASSERT(cis); + ASSERT(length <= SBSDIO_CIS_SIZE_LIMIT); + + status = sdioh_cis_read(bcmsdh->sdioh, func, cis, length); + + if (ascii) { + /* Move binary bits to tmp and format them into the provided buffer. */ + if ((tmp_buf = (uint8 *)MALLOC(bcmsdh->osh, length)) == NULL) { + BCMSDH_ERROR(("%s: out of memory\n", __FUNCTION__)); + return BCME_NOMEM; + } + bcopy(cis, tmp_buf, length); + for (tmp_ptr = tmp_buf, ptr = cis; ptr < (cis + length - 4); tmp_ptr++) { + ptr += sprintf((char*)ptr, "%.2x ", *tmp_ptr & 0xff); + if ((((tmp_ptr - tmp_buf) + 1) & 0xf) == 0) + ptr += sprintf((char *)ptr, "\n"); + } + MFREE(bcmsdh->osh, tmp_buf, length); + } + + return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR); +} + + +static int +bcmsdhsdio_set_sbaddr_window(void *sdh, uint32 address) +{ + int err = 0; + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + bcmsdh_cfg_write(bcmsdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW, + (address >> 8) & SBSDIO_SBADDRLOW_MASK, &err); + if (!err) + bcmsdh_cfg_write(bcmsdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRMID, + (address >> 16) & SBSDIO_SBADDRMID_MASK, &err); + if (!err) + bcmsdh_cfg_write(bcmsdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRHIGH, + (address >> 24) & SBSDIO_SBADDRHIGH_MASK, &err); + + + return err; +} + +uint32 +bcmsdh_reg_read(void *sdh, uint32 addr, uint size) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + uint32 word = 0; + uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK; + + BCMSDH_INFO(("%s:fun = 1, addr = 0x%x, ", __FUNCTION__, addr)); + + if (!bcmsdh) + bcmsdh = l_bcmsdh; + + ASSERT(bcmsdh->init_success); + + if (bar0 != bcmsdh->sbwad) { + if (bcmsdhsdio_set_sbaddr_window(bcmsdh, bar0)) + return 0xFFFFFFFF; + + bcmsdh->sbwad = bar0; + } + + addr &= SBSDIO_SB_OFT_ADDR_MASK; + if (size == 4) + addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; + + status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL, + SDIOH_READ, SDIO_FUNC_1, addr, &word, size); + + bcmsdh->regfail = !(SDIOH_API_SUCCESS(status)); + + BCMSDH_INFO(("uint32data = 0x%x\n", word)); + + /* if ok, return appropriately masked word */ + if (SDIOH_API_SUCCESS(status)) { + switch (size) { + case sizeof(uint8): + return (word & 0xff); + case sizeof(uint16): + return (word & 0xffff); + case sizeof(uint32): + return word; + default: + bcmsdh->regfail = TRUE; + + } + } + + /* otherwise, bad sdio access or invalid size */ + BCMSDH_ERROR(("%s: error reading addr 0x%04x size %d\n", __FUNCTION__, addr, size)); + return 0xFFFFFFFF; +} + +uint32 +bcmsdh_reg_write(void *sdh, uint32 addr, uint size, uint32 data) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK; + int err = 0; + + BCMSDH_INFO(("%s:fun = 1, addr = 0x%x, uint%ddata = 0x%x\n", + __FUNCTION__, addr, size*8, data)); + + if (!bcmsdh) + bcmsdh = l_bcmsdh; + + ASSERT(bcmsdh->init_success); + + if (bar0 != bcmsdh->sbwad) { + if ((err = bcmsdhsdio_set_sbaddr_window(bcmsdh, bar0))) + return err; + + bcmsdh->sbwad = bar0; + } + + addr &= SBSDIO_SB_OFT_ADDR_MASK; + if (size == 4) + addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; + status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL, SDIOH_WRITE, SDIO_FUNC_1, + addr, &data, size); + bcmsdh->regfail = !(SDIOH_API_SUCCESS(status)); + + if (SDIOH_API_SUCCESS(status)) + return 0; + + BCMSDH_ERROR(("%s: error writing 0x%08x to addr 0x%04x size %d\n", + __FUNCTION__, data, addr, size)); + return 0xFFFFFFFF; +} + +bool +bcmsdh_regfail(void *sdh) +{ + return ((bcmsdh_info_t *)sdh)->regfail; +} + +int +bcmsdh_recv_buf(void *sdh, uint32 addr, uint fn, uint flags, + uint8 *buf, uint nbytes, void *pkt, + bcmsdh_cmplt_fn_t complete, void *handle) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + uint incr_fix; + uint width; + uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK; + int err = 0; + + ASSERT(bcmsdh); + ASSERT(bcmsdh->init_success); + + BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, size = %d\n", + __FUNCTION__, fn, addr, nbytes)); + + /* Async not implemented yet */ + ASSERT(!(flags & SDIO_REQ_ASYNC)); + if (flags & SDIO_REQ_ASYNC) + return BCME_UNSUPPORTED; + + if (bar0 != bcmsdh->sbwad) { + if ((err = bcmsdhsdio_set_sbaddr_window(bcmsdh, bar0))) + return err; + + bcmsdh->sbwad = bar0; + } + + addr &= SBSDIO_SB_OFT_ADDR_MASK; + + incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC; + width = (flags & SDIO_REQ_4BYTE) ? 4 : 2; + if (width == 4) + addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; + + status = sdioh_request_buffer(bcmsdh->sdioh, SDIOH_DATA_PIO, incr_fix, + SDIOH_READ, fn, addr, width, nbytes, buf, pkt); + + return (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR); +} + +int +bcmsdh_send_buf(void *sdh, uint32 addr, uint fn, uint flags, + uint8 *buf, uint nbytes, void *pkt, + bcmsdh_cmplt_fn_t complete, void *handle) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + uint incr_fix; + uint width; + uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK; + int err = 0; + + ASSERT(bcmsdh); + ASSERT(bcmsdh->init_success); + + BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, size = %d\n", + __FUNCTION__, fn, addr, nbytes)); + + /* Async not implemented yet */ + ASSERT(!(flags & SDIO_REQ_ASYNC)); + if (flags & SDIO_REQ_ASYNC) + return BCME_UNSUPPORTED; + + if (bar0 != bcmsdh->sbwad) { + if ((err = bcmsdhsdio_set_sbaddr_window(bcmsdh, bar0))) + return err; + + bcmsdh->sbwad = bar0; + } + + addr &= SBSDIO_SB_OFT_ADDR_MASK; + + incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC; + width = (flags & SDIO_REQ_4BYTE) ? 4 : 2; + if (width == 4) + addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; + + status = sdioh_request_buffer(bcmsdh->sdioh, SDIOH_DATA_PIO, incr_fix, + SDIOH_WRITE, fn, addr, width, nbytes, buf, pkt); + + return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR); +} + +int +bcmsdh_rwdata(void *sdh, uint rw, uint32 addr, uint8 *buf, uint nbytes) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + + ASSERT(bcmsdh); + ASSERT(bcmsdh->init_success); + ASSERT((addr & SBSDIO_SBWINDOW_MASK) == 0); + + addr &= SBSDIO_SB_OFT_ADDR_MASK; + addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; + + status = sdioh_request_buffer(bcmsdh->sdioh, SDIOH_DATA_PIO, SDIOH_DATA_INC, + (rw ? SDIOH_WRITE : SDIOH_READ), SDIO_FUNC_1, + addr, 4, nbytes, buf, NULL); + + return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR); +} + +int +bcmsdh_abort(void *sdh, uint fn) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + + return sdioh_abort(bcmsdh->sdioh, fn); +} + +int +bcmsdh_start(void *sdh, int stage) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + + return sdioh_start(bcmsdh->sdioh, stage); +} + +int +bcmsdh_stop(void *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + + return sdioh_stop(bcmsdh->sdioh); +} + + +int +bcmsdh_query_device(void *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + bcmsdh->vendevid = (VENDOR_BROADCOM << 16) | 0; + return (bcmsdh->vendevid); +} + +uint +bcmsdh_query_iofnum(void *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + + if (!bcmsdh) + bcmsdh = l_bcmsdh; + + return (sdioh_query_iofnum(bcmsdh->sdioh)); +} + +int +bcmsdh_reset(bcmsdh_info_t *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + + return sdioh_sdio_reset(bcmsdh->sdioh); +} + +void *bcmsdh_get_sdioh(bcmsdh_info_t *sdh) +{ + ASSERT(sdh); + return sdh->sdioh; +} + +/* Function to pass device-status bits to DHD. */ +uint32 +bcmsdh_get_dstatus(void *sdh) +{ + return 0; +} +uint32 +bcmsdh_cur_sbwad(void *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + + if (!bcmsdh) + bcmsdh = l_bcmsdh; + + return (bcmsdh->sbwad); +} + +void +bcmsdh_chipinfo(void *sdh, uint32 chip, uint32 chiprev) +{ + return; +} diff --git a/drivers/net/wireless/bcm4329/bcmsdh_linux.c b/drivers/net/wireless/bcm4329/bcmsdh_linux.c new file mode 100644 index 000000000000..559f4819b1b6 --- /dev/null +++ b/drivers/net/wireless/bcm4329/bcmsdh_linux.c @@ -0,0 +1,718 @@ +/* + * SDIO access interface for drivers - linux specific (pci only) + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: bcmsdh_linux.c,v 1.42.10.10.2.14.4.2 2010/09/15 00:30:11 Exp $ + */ + +/** + * @file bcmsdh_linux.c + */ + +#define __UNDEF_NO_VERSION__ + +#include <typedefs.h> +#include <linuxver.h> + +#include <linux/pci.h> +#include <linux/completion.h> + +#include <osl.h> +#include <pcicfg.h> +#include <bcmdefs.h> +#include <bcmdevs.h> + +#if defined(OOB_INTR_ONLY) +#include <linux/irq.h> +extern void dhdsdio_isr(void * args); +#include <bcmutils.h> +#include <dngl_stats.h> +#include <dhd.h> +#endif /* defined(OOB_INTR_ONLY) */ +#if defined(CONFIG_MACH_SANDGATE2G) || defined(CONFIG_MACH_LOGICPD_PXA270) +#if !defined(BCMPLATFORM_BUS) +#define BCMPLATFORM_BUS +#endif /* !defined(BCMPLATFORM_BUS) */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)) +#include <linux/platform_device.h> +#endif /* KERNEL_VERSION(2, 6, 19) */ +#endif /* CONFIG_MACH_SANDGATE2G || CONFIG_MACH_LOGICPD_PXA270 */ + +/** + * SDIO Host Controller info + */ +typedef struct bcmsdh_hc bcmsdh_hc_t; + +struct bcmsdh_hc { + bcmsdh_hc_t *next; +#ifdef BCMPLATFORM_BUS + struct device *dev; /* platform device handle */ +#else + struct pci_dev *dev; /* pci device handle */ +#endif /* BCMPLATFORM_BUS */ + osl_t *osh; + void *regs; /* SDIO Host Controller address */ + bcmsdh_info_t *sdh; /* SDIO Host Controller handle */ + void *ch; + unsigned int oob_irq; + unsigned long oob_flags; /* OOB Host specifiction as edge and etc */ + bool oob_irq_registered; +#if defined(OOB_INTR_ONLY) + spinlock_t irq_lock; +#endif +}; +static bcmsdh_hc_t *sdhcinfo = NULL; + +/* driver info, initialized when bcmsdh_register is called */ +static bcmsdh_driver_t drvinfo = {NULL, NULL}; + +/* debugging macros */ +#define SDLX_MSG(x) + +/** + * Checks to see if vendor and device IDs match a supported SDIO Host Controller. + */ +bool +bcmsdh_chipmatch(uint16 vendor, uint16 device) +{ + /* Add other vendors and devices as required */ + +#ifdef BCMSDIOH_STD + /* Check for Arasan host controller */ + if (vendor == VENDOR_SI_IMAGE) { + return (TRUE); + } + /* Check for BRCM 27XX Standard host controller */ + if (device == BCM27XX_SDIOH_ID && vendor == VENDOR_BROADCOM) { + return (TRUE); + } + /* Check for BRCM Standard host controller */ + if (device == SDIOH_FPGA_ID && vendor == VENDOR_BROADCOM) { + return (TRUE); + } + /* Check for TI PCIxx21 Standard host controller */ + if (device == PCIXX21_SDIOH_ID && vendor == VENDOR_TI) { + return (TRUE); + } + if (device == PCIXX21_SDIOH0_ID && vendor == VENDOR_TI) { + return (TRUE); + } + /* Ricoh R5C822 Standard SDIO Host */ + if (device == R5C822_SDIOH_ID && vendor == VENDOR_RICOH) { + return (TRUE); + } + /* JMicron Standard SDIO Host */ + if (device == JMICRON_SDIOH_ID && vendor == VENDOR_JMICRON) { + return (TRUE); + } + +#endif /* BCMSDIOH_STD */ +#ifdef BCMSDIOH_SPI + /* This is the PciSpiHost. */ + if (device == SPIH_FPGA_ID && vendor == VENDOR_BROADCOM) { + printf("Found PCI SPI Host Controller\n"); + return (TRUE); + } + +#endif /* BCMSDIOH_SPI */ + + return (FALSE); +} + +#if defined(BCMPLATFORM_BUS) +#if defined(BCMLXSDMMC) +/* forward declarations */ +int bcmsdh_probe(struct device *dev); +int bcmsdh_remove(struct device *dev); + +EXPORT_SYMBOL(bcmsdh_probe); +EXPORT_SYMBOL(bcmsdh_remove); + +#else +/* forward declarations */ +static int __devinit bcmsdh_probe(struct device *dev); +static int __devexit bcmsdh_remove(struct device *dev); +#endif /* BCMLXSDMMC */ + +#ifndef BCMLXSDMMC +static struct device_driver bcmsdh_driver = { + .name = "pxa2xx-mci", + .bus = &platform_bus_type, + .probe = bcmsdh_probe, + .remove = bcmsdh_remove, + .suspend = NULL, + .resume = NULL, + }; +#endif /* BCMLXSDMMC */ + +#ifndef BCMLXSDMMC +static +#endif /* BCMLXSDMMC */ +int bcmsdh_probe(struct device *dev) +{ + osl_t *osh = NULL; + bcmsdh_hc_t *sdhc = NULL; + ulong regs = 0; + bcmsdh_info_t *sdh = NULL; +#if !defined(BCMLXSDMMC) && defined(BCMPLATFORM_BUS) + struct platform_device *pdev; + struct resource *r; +#endif /* BCMLXSDMMC */ + int irq = 0; + uint32 vendevid; + unsigned long irq_flags = 0; + +#if !defined(BCMLXSDMMC) && defined(BCMPLATFORM_BUS) + pdev = to_platform_device(dev); + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + irq = platform_get_irq(pdev, 0); + if (!r || irq == NO_IRQ) + return -ENXIO; +#endif /* BCMLXSDMMC */ + +#if defined(OOB_INTR_ONLY) +#ifdef HW_OOB + irq_flags = \ + IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL | IORESOURCE_IRQ_SHAREABLE; +#else + irq_flags = IRQF_TRIGGER_FALLING; +#endif /* HW_OOB */ + irq = dhd_customer_oob_irq_map(&irq_flags); + if (irq < 0) { + SDLX_MSG(("%s: Host irq is not defined\n", __FUNCTION__)); + return 1; + } +#endif /* defined(OOB_INTR_ONLY) */ + /* allocate SDIO Host Controller state info */ + if (!(osh = osl_attach(dev, PCI_BUS, FALSE))) { + SDLX_MSG(("%s: osl_attach failed\n", __FUNCTION__)); + goto err; + } + if (!(sdhc = MALLOC(osh, sizeof(bcmsdh_hc_t)))) { + SDLX_MSG(("%s: out of memory, allocated %d bytes\n", + __FUNCTION__, + MALLOCED(osh))); + goto err; + } + bzero(sdhc, sizeof(bcmsdh_hc_t)); + sdhc->osh = osh; + + sdhc->dev = (void *)dev; + +#ifdef BCMLXSDMMC + if (!(sdh = bcmsdh_attach(osh, (void *)0, + (void **)®s, irq))) { + SDLX_MSG(("%s: bcmsdh_attach failed\n", __FUNCTION__)); + goto err; + } +#else + if (!(sdh = bcmsdh_attach(osh, (void *)r->start, + (void **)®s, irq))) { + SDLX_MSG(("%s: bcmsdh_attach failed\n", __FUNCTION__)); + goto err; + } +#endif /* BCMLXSDMMC */ + sdhc->sdh = sdh; + sdhc->oob_irq = irq; + sdhc->oob_flags = irq_flags; + sdhc->oob_irq_registered = FALSE; /* to make sure.. */ +#if defined(OOB_INTR_ONLY) + spin_lock_init(&sdhc->irq_lock); +#endif + + /* chain SDIO Host Controller info together */ + sdhc->next = sdhcinfo; + sdhcinfo = sdhc; + /* Read the vendor/device ID from the CIS */ + vendevid = bcmsdh_query_device(sdh); + + /* try to attach to the target device */ + if (!(sdhc->ch = drvinfo.attach((vendevid >> 16), + (vendevid & 0xFFFF), 0, 0, 0, 0, + (void *)regs, NULL, sdh))) { + SDLX_MSG(("%s: device attach failed\n", __FUNCTION__)); + goto err; + } + + return 0; + + /* error handling */ +err: + if (sdhc) { + if (sdhc->sdh) + bcmsdh_detach(sdhc->osh, sdhc->sdh); + MFREE(osh, sdhc, sizeof(bcmsdh_hc_t)); + } + if (osh) + osl_detach(osh); + return -ENODEV; +} + +#ifndef BCMLXSDMMC +static +#endif /* BCMLXSDMMC */ +int bcmsdh_remove(struct device *dev) +{ + bcmsdh_hc_t *sdhc, *prev; + osl_t *osh; + + sdhc = sdhcinfo; + drvinfo.detach(sdhc->ch); + bcmsdh_detach(sdhc->osh, sdhc->sdh); + /* find the SDIO Host Controller state for this pdev and take it out from the list */ + for (sdhc = sdhcinfo, prev = NULL; sdhc; sdhc = sdhc->next) { + if (sdhc->dev == (void *)dev) { + if (prev) + prev->next = sdhc->next; + else + sdhcinfo = NULL; + break; + } + prev = sdhc; + } + if (!sdhc) { + SDLX_MSG(("%s: failed\n", __FUNCTION__)); + return 0; + } + + + /* release SDIO Host Controller info */ + osh = sdhc->osh; + MFREE(osh, sdhc, sizeof(bcmsdh_hc_t)); + osl_detach(osh); + +#if !defined(BCMLXSDMMC) + dev_set_drvdata(dev, NULL); +#endif /* !defined(BCMLXSDMMC) */ + + return 0; +} + +#else /* BCMPLATFORM_BUS */ + +#if !defined(BCMLXSDMMC) +/* forward declarations for PCI probe and remove functions. */ +static int __devinit bcmsdh_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent); +static void __devexit bcmsdh_pci_remove(struct pci_dev *pdev); + +/** + * pci id table + */ +static struct pci_device_id bcmsdh_pci_devid[] __devinitdata = { + { vendor: PCI_ANY_ID, + device: PCI_ANY_ID, + subvendor: PCI_ANY_ID, + subdevice: PCI_ANY_ID, + class: 0, + class_mask: 0, + driver_data: 0, + }, + { 0, } +}; +MODULE_DEVICE_TABLE(pci, bcmsdh_pci_devid); + +/** + * SDIO Host Controller pci driver info + */ +static struct pci_driver bcmsdh_pci_driver = { + node: {}, + name: "bcmsdh", + id_table: bcmsdh_pci_devid, + probe: bcmsdh_pci_probe, + remove: bcmsdh_pci_remove, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) + save_state: NULL, +#endif + suspend: NULL, + resume: NULL, +}; + + +extern uint sd_pci_slot; /* Force detection to a particular PCI */ + /* slot only . Allows for having multiple */ + /* WL devices at once in a PC */ + /* Only one instance of dhd will be */ + /* usable at a time */ + /* Upper word is bus number, */ + /* lower word is slot number */ + /* Default value of 0xFFFFffff turns this */ + /* off */ +module_param(sd_pci_slot, uint, 0); + + +/** + * Detect supported SDIO Host Controller and attach if found. + * + * Determine if the device described by pdev is a supported SDIO Host + * Controller. If so, attach to it and attach to the target device. + */ +static int __devinit +bcmsdh_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + osl_t *osh = NULL; + bcmsdh_hc_t *sdhc = NULL; + ulong regs; + bcmsdh_info_t *sdh = NULL; + int rc; + + if (sd_pci_slot != 0xFFFFffff) { + if (pdev->bus->number != (sd_pci_slot>>16) || + PCI_SLOT(pdev->devfn) != (sd_pci_slot&0xffff)) { + SDLX_MSG(("%s: %s: bus %X, slot %X, vend %X, dev %X\n", + __FUNCTION__, + bcmsdh_chipmatch(pdev->vendor, pdev->device) ? + "Found compatible SDIOHC" : + "Probing unknown device", + pdev->bus->number, PCI_SLOT(pdev->devfn), + pdev->vendor, pdev->device)); + return -ENODEV; + } + SDLX_MSG(("%s: %s: bus %X, slot %X, vendor %X, device %X (good PCI location)\n", + __FUNCTION__, + bcmsdh_chipmatch(pdev->vendor, pdev->device) ? + "Using compatible SDIOHC" : + "WARNING, forced use of unkown device", + pdev->bus->number, PCI_SLOT(pdev->devfn), + pdev->vendor, pdev->device)); + } + + if ((pdev->vendor == VENDOR_TI) && ((pdev->device == PCIXX21_FLASHMEDIA_ID) || + (pdev->device == PCIXX21_FLASHMEDIA0_ID))) { + uint32 config_reg; + + SDLX_MSG(("%s: Disabling TI FlashMedia Controller.\n", __FUNCTION__)); + if (!(osh = osl_attach(pdev, PCI_BUS, FALSE))) { + SDLX_MSG(("%s: osl_attach failed\n", __FUNCTION__)); + goto err; + } + + config_reg = OSL_PCI_READ_CONFIG(osh, 0x4c, 4); + + /* + * Set MMC_SD_DIS bit in FlashMedia Controller. + * Disbling the SD/MMC Controller in the FlashMedia Controller + * allows the Standard SD Host Controller to take over control + * of the SD Slot. + */ + config_reg |= 0x02; + OSL_PCI_WRITE_CONFIG(osh, 0x4c, 4, config_reg); + osl_detach(osh); + } + /* match this pci device with what we support */ + /* we can't solely rely on this to believe it is our SDIO Host Controller! */ + if (!bcmsdh_chipmatch(pdev->vendor, pdev->device)) { + return -ENODEV; + } + + /* this is a pci device we might support */ + SDLX_MSG(("%s: Found possible SDIO Host Controller: bus %d slot %d func %d irq %d\n", + __FUNCTION__, + pdev->bus->number, PCI_SLOT(pdev->devfn), + PCI_FUNC(pdev->devfn), pdev->irq)); + + /* use bcmsdh_query_device() to get the vendor ID of the target device so + * it will eventually appear in the Broadcom string on the console + */ + + /* allocate SDIO Host Controller state info */ + if (!(osh = osl_attach(pdev, PCI_BUS, FALSE))) { + SDLX_MSG(("%s: osl_attach failed\n", __FUNCTION__)); + goto err; + } + if (!(sdhc = MALLOC(osh, sizeof(bcmsdh_hc_t)))) { + SDLX_MSG(("%s: out of memory, allocated %d bytes\n", + __FUNCTION__, + MALLOCED(osh))); + goto err; + } + bzero(sdhc, sizeof(bcmsdh_hc_t)); + sdhc->osh = osh; + + sdhc->dev = pdev; + + /* map to address where host can access */ + pci_set_master(pdev); + rc = pci_enable_device(pdev); + if (rc) { + SDLX_MSG(("%s: Cannot enable PCI device\n", __FUNCTION__)); + goto err; + } + if (!(sdh = bcmsdh_attach(osh, (void *)(uintptr)pci_resource_start(pdev, 0), + (void **)®s, pdev->irq))) { + SDLX_MSG(("%s: bcmsdh_attach failed\n", __FUNCTION__)); + goto err; + } + + sdhc->sdh = sdh; + + /* try to attach to the target device */ + if (!(sdhc->ch = drvinfo.attach(VENDOR_BROADCOM, /* pdev->vendor, */ + bcmsdh_query_device(sdh) & 0xFFFF, 0, 0, 0, 0, + (void *)regs, NULL, sdh))) { + SDLX_MSG(("%s: device attach failed\n", __FUNCTION__)); + goto err; + } + + /* chain SDIO Host Controller info together */ + sdhc->next = sdhcinfo; + sdhcinfo = sdhc; + + return 0; + + /* error handling */ +err: + if (sdhc->sdh) + bcmsdh_detach(sdhc->osh, sdhc->sdh); + if (sdhc) + MFREE(osh, sdhc, sizeof(bcmsdh_hc_t)); + if (osh) + osl_detach(osh); + return -ENODEV; +} + + +/** + * Detach from target devices and SDIO Host Controller + */ +static void __devexit +bcmsdh_pci_remove(struct pci_dev *pdev) +{ + bcmsdh_hc_t *sdhc, *prev; + osl_t *osh; + + /* find the SDIO Host Controller state for this pdev and take it out from the list */ + for (sdhc = sdhcinfo, prev = NULL; sdhc; sdhc = sdhc->next) { + if (sdhc->dev == pdev) { + if (prev) + prev->next = sdhc->next; + else + sdhcinfo = NULL; + break; + } + prev = sdhc; + } + if (!sdhc) + return; + + drvinfo.detach(sdhc->ch); + + bcmsdh_detach(sdhc->osh, sdhc->sdh); + + /* release SDIO Host Controller info */ + osh = sdhc->osh; + MFREE(osh, sdhc, sizeof(bcmsdh_hc_t)); + osl_detach(osh); +} +#endif /* BCMLXSDMMC */ +#endif /* BCMPLATFORM_BUS */ + +extern int sdio_function_init(void); + +int +bcmsdh_register(bcmsdh_driver_t *driver) +{ + int error = 0; + + drvinfo = *driver; + +#if defined(BCMPLATFORM_BUS) +#if defined(BCMLXSDMMC) + SDLX_MSG(("Linux Kernel SDIO/MMC Driver\n")); + error = sdio_function_init(); +#else + SDLX_MSG(("Intel PXA270 SDIO Driver\n")); + error = driver_register(&bcmsdh_driver); +#endif /* defined(BCMLXSDMMC) */ + return error; +#endif /* defined(BCMPLATFORM_BUS) */ + +#if !defined(BCMPLATFORM_BUS) && !defined(BCMLXSDMMC) +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) + if (!(error = pci_module_init(&bcmsdh_pci_driver))) + return 0; +#else + if (!(error = pci_register_driver(&bcmsdh_pci_driver))) + return 0; +#endif + + SDLX_MSG(("%s: pci_module_init failed 0x%x\n", __FUNCTION__, error)); +#endif /* BCMPLATFORM_BUS */ + + return error; +} + +extern void sdio_function_cleanup(void); + +void +bcmsdh_unregister(void) +{ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) + if (bcmsdh_pci_driver.node.next) +#endif + +#if defined(BCMPLATFORM_BUS) && !defined(BCMLXSDMMC) + driver_unregister(&bcmsdh_driver); +#endif +#if defined(BCMLXSDMMC) + sdio_function_cleanup(); +#endif /* BCMLXSDMMC */ +#if !defined(BCMPLATFORM_BUS) && !defined(BCMLXSDMMC) + pci_unregister_driver(&bcmsdh_pci_driver); +#endif /* BCMPLATFORM_BUS */ +} + + + +#if defined(OOB_INTR_ONLY) +void bcmsdh_oob_intr_set(bool enable) +{ + static bool curstate = 1; + unsigned long flags; + + spin_lock_irqsave(&sdhcinfo->irq_lock, flags); + if (curstate != enable) { + if (enable) + enable_irq(sdhcinfo->oob_irq); + else + disable_irq_nosync(sdhcinfo->oob_irq); + curstate = enable; + } + spin_unlock_irqrestore(&sdhcinfo->irq_lock, flags); +} + +static irqreturn_t wlan_oob_irq(int irq, void *dev_id) +{ + dhd_pub_t *dhdp; + + dhdp = (dhd_pub_t *)dev_get_drvdata(sdhcinfo->dev); + + bcmsdh_oob_intr_set(0); + + if (dhdp == NULL) { + SDLX_MSG(("Out of band GPIO interrupt fired way too early\n")); + return IRQ_HANDLED; + } + + dhdsdio_isr((void *)dhdp->bus); + + return IRQ_HANDLED; +} + +int bcmsdh_register_oob_intr(void * dhdp) +{ + int error = 0; + + SDLX_MSG(("%s Enter\n", __FUNCTION__)); + + dev_set_drvdata(sdhcinfo->dev, dhdp); + + if (!sdhcinfo->oob_irq_registered) { + SDLX_MSG(("%s IRQ=%d Type=%X \n", __FUNCTION__, \ + (int)sdhcinfo->oob_irq, (int)sdhcinfo->oob_flags)); + /* Refer to customer Host IRQ docs about proper irqflags definition */ + error = request_irq(sdhcinfo->oob_irq, wlan_oob_irq, sdhcinfo->oob_flags, + "bcmsdh_sdmmc", NULL); + if (error) + return -ENODEV; + + set_irq_wake(sdhcinfo->oob_irq, 1); + sdhcinfo->oob_irq_registered = TRUE; + } + + return 0; +} + +void bcmsdh_unregister_oob_intr(void) +{ + SDLX_MSG(("%s: Enter\n", __FUNCTION__)); + + set_irq_wake(sdhcinfo->oob_irq, 0); + disable_irq(sdhcinfo->oob_irq); /* just in case.. */ + free_irq(sdhcinfo->oob_irq, NULL); + sdhcinfo->oob_irq_registered = FALSE; +} +#endif /* defined(OOB_INTR_ONLY) */ +/* Module parameters specific to each host-controller driver */ + +extern uint sd_msglevel; /* Debug message level */ +module_param(sd_msglevel, uint, 0); + +extern uint sd_power; /* 0 = SD Power OFF, 1 = SD Power ON. */ +module_param(sd_power, uint, 0); + +extern uint sd_clock; /* SD Clock Control, 0 = SD Clock OFF, 1 = SD Clock ON */ +module_param(sd_clock, uint, 0); + +extern uint sd_divisor; /* Divisor (-1 means external clock) */ +module_param(sd_divisor, uint, 0); + +extern uint sd_sdmode; /* Default is SD4, 0=SPI, 1=SD1, 2=SD4 */ +module_param(sd_sdmode, uint, 0); + +extern uint sd_hiok; /* Ok to use hi-speed mode */ +module_param(sd_hiok, uint, 0); + +extern uint sd_f2_blocksize; +module_param(sd_f2_blocksize, int, 0); + + +#ifdef BCMSDH_MODULE +EXPORT_SYMBOL(bcmsdh_attach); +EXPORT_SYMBOL(bcmsdh_detach); +EXPORT_SYMBOL(bcmsdh_intr_query); +EXPORT_SYMBOL(bcmsdh_intr_enable); +EXPORT_SYMBOL(bcmsdh_intr_disable); +EXPORT_SYMBOL(bcmsdh_intr_reg); +EXPORT_SYMBOL(bcmsdh_intr_dereg); + +#if defined(DHD_DEBUG) +EXPORT_SYMBOL(bcmsdh_intr_pending); +#endif + +EXPORT_SYMBOL(bcmsdh_devremove_reg); +EXPORT_SYMBOL(bcmsdh_cfg_read); +EXPORT_SYMBOL(bcmsdh_cfg_write); +EXPORT_SYMBOL(bcmsdh_cis_read); +EXPORT_SYMBOL(bcmsdh_reg_read); +EXPORT_SYMBOL(bcmsdh_reg_write); +EXPORT_SYMBOL(bcmsdh_regfail); +EXPORT_SYMBOL(bcmsdh_send_buf); +EXPORT_SYMBOL(bcmsdh_recv_buf); + +EXPORT_SYMBOL(bcmsdh_rwdata); +EXPORT_SYMBOL(bcmsdh_abort); +EXPORT_SYMBOL(bcmsdh_query_device); +EXPORT_SYMBOL(bcmsdh_query_iofnum); +EXPORT_SYMBOL(bcmsdh_iovar_op); +EXPORT_SYMBOL(bcmsdh_register); +EXPORT_SYMBOL(bcmsdh_unregister); +EXPORT_SYMBOL(bcmsdh_chipmatch); +EXPORT_SYMBOL(bcmsdh_reset); + +EXPORT_SYMBOL(bcmsdh_get_dstatus); +EXPORT_SYMBOL(bcmsdh_cfg_read_word); +EXPORT_SYMBOL(bcmsdh_cfg_write_word); +EXPORT_SYMBOL(bcmsdh_cur_sbwad); +EXPORT_SYMBOL(bcmsdh_chipinfo); + +#endif /* BCMSDH_MODULE */ diff --git a/drivers/net/wireless/bcm4329/bcmsdh_sdmmc.c b/drivers/net/wireless/bcm4329/bcmsdh_sdmmc.c new file mode 100644 index 000000000000..bda919390587 --- /dev/null +++ b/drivers/net/wireless/bcm4329/bcmsdh_sdmmc.c @@ -0,0 +1,1305 @@ +/* + * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: bcmsdh_sdmmc.c,v 1.1.2.5.6.30.4.1 2010/09/02 23:12:21 Exp $ + */ +#include <typedefs.h> + +#include <bcmdevs.h> +#include <bcmendian.h> +#include <bcmutils.h> +#include <osl.h> +#include <sdio.h> /* SDIO Device and Protocol Specs */ +#include <sdioh.h> /* SDIO Host Controller Specification */ +#include <bcmsdbus.h> /* bcmsdh to/from specific controller APIs */ +#include <sdiovar.h> /* ioctl/iovars */ + +#include <linux/mmc/core.h> +#include <linux/mmc/sdio_func.h> +#include <linux/mmc/sdio_ids.h> + +#include <dngl_stats.h> +#include <dhd.h> + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) +#include <linux/suspend.h> +extern volatile bool dhd_mmc_suspend; +#endif +#include "bcmsdh_sdmmc.h" + +#ifndef BCMSDH_MODULE +extern int sdio_function_init(void); +extern void sdio_function_cleanup(void); +#endif /* BCMSDH_MODULE */ + +#if !defined(OOB_INTR_ONLY) +static void IRQHandler(struct sdio_func *func); +static void IRQHandlerF2(struct sdio_func *func); +#endif /* !defined(OOB_INTR_ONLY) */ +static int sdioh_sdmmc_get_cisaddr(sdioh_info_t *sd, uint32 regaddr); +extern int sdio_reset_comm(struct mmc_card *card); + +extern PBCMSDH_SDMMC_INSTANCE gInstance; + +uint sd_sdmode = SDIOH_MODE_SD4; /* Use SD4 mode by default */ +uint sd_f2_blocksize = 512; /* Default blocksize */ + +uint sd_divisor = 2; /* Default 48MHz/2 = 24MHz */ + +uint sd_power = 1; /* Default to SD Slot powered ON */ +uint sd_clock = 1; /* Default to SD Clock turned ON */ +uint sd_hiok = FALSE; /* Don't use hi-speed mode by default */ +uint sd_msglevel = 0x01; +uint sd_use_dma = TRUE; +DHD_PM_RESUME_WAIT_INIT(sdioh_request_byte_wait); +DHD_PM_RESUME_WAIT_INIT(sdioh_request_word_wait); +DHD_PM_RESUME_WAIT_INIT(sdioh_request_packet_wait); +DHD_PM_RESUME_WAIT_INIT(sdioh_request_buffer_wait); + +#define DMA_ALIGN_MASK 0x03 + +int sdioh_sdmmc_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data); + +static int +sdioh_sdmmc_card_enablefuncs(sdioh_info_t *sd) +{ + int err_ret; + uint32 fbraddr; + uint8 func; + + sd_trace(("%s\n", __FUNCTION__)); + + /* Get the Card's common CIS address */ + sd->com_cis_ptr = sdioh_sdmmc_get_cisaddr(sd, SDIOD_CCCR_CISPTR_0); + sd->func_cis_ptr[0] = sd->com_cis_ptr; + sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr)); + + /* Get the Card's function CIS (for each function) */ + for (fbraddr = SDIOD_FBR_STARTADDR, func = 1; + func <= sd->num_funcs; func++, fbraddr += SDIOD_FBR_SIZE) { + sd->func_cis_ptr[func] = sdioh_sdmmc_get_cisaddr(sd, SDIOD_FBR_CISPTR_0 + fbraddr); + sd_info(("%s: Function %d CIS Ptr = 0x%x\n", + __FUNCTION__, func, sd->func_cis_ptr[func])); + } + + sd->func_cis_ptr[0] = sd->com_cis_ptr; + sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr)); + + /* Enable Function 1 */ + sdio_claim_host(gInstance->func[1]); + err_ret = sdio_enable_func(gInstance->func[1]); + sdio_release_host(gInstance->func[1]); + if (err_ret) { + sd_err(("bcmsdh_sdmmc: Failed to enable F1 Err: 0x%08x", err_ret)); + } + + return FALSE; +} + +/* + * Public entry points & extern's + */ +extern sdioh_info_t * +sdioh_attach(osl_t *osh, void *bar0, uint irq) +{ + sdioh_info_t *sd; + int err_ret; + + sd_trace(("%s\n", __FUNCTION__)); + + if (gInstance == NULL) { + sd_err(("%s: SDIO Device not present\n", __FUNCTION__)); + return NULL; + } + + if ((sd = (sdioh_info_t *)MALLOC(osh, sizeof(sdioh_info_t))) == NULL) { + sd_err(("sdioh_attach: out of memory, malloced %d bytes\n", MALLOCED(osh))); + return NULL; + } + bzero((char *)sd, sizeof(sdioh_info_t)); + sd->osh = osh; + if (sdioh_sdmmc_osinit(sd) != 0) { + sd_err(("%s:sdioh_sdmmc_osinit() failed\n", __FUNCTION__)); + MFREE(sd->osh, sd, sizeof(sdioh_info_t)); + return NULL; + } + + sd->num_funcs = 2; + sd->sd_blockmode = TRUE; + sd->use_client_ints = TRUE; + sd->client_block_size[0] = 64; + + gInstance->sd = sd; + + /* Claim host controller */ + sdio_claim_host(gInstance->func[1]); + + sd->client_block_size[1] = 64; + err_ret = sdio_set_block_size(gInstance->func[1], 64); + if (err_ret) { + sd_err(("bcmsdh_sdmmc: Failed to set F1 blocksize\n")); + } + + /* Release host controller F1 */ + sdio_release_host(gInstance->func[1]); + + if (gInstance->func[2]) { + /* Claim host controller F2 */ + sdio_claim_host(gInstance->func[2]); + + sd->client_block_size[2] = sd_f2_blocksize; + err_ret = sdio_set_block_size(gInstance->func[2], sd_f2_blocksize); + if (err_ret) { + sd_err(("bcmsdh_sdmmc: Failed to set F2 blocksize to %d\n", + sd_f2_blocksize)); + } + + /* Release host controller F2 */ + sdio_release_host(gInstance->func[2]); + } + + sdioh_sdmmc_card_enablefuncs(sd); + + sd_trace(("%s: Done\n", __FUNCTION__)); + return sd; +} + + +extern SDIOH_API_RC +sdioh_detach(osl_t *osh, sdioh_info_t *sd) +{ + sd_trace(("%s\n", __FUNCTION__)); + + if (sd) { + + /* Disable Function 2 */ + sdio_claim_host(gInstance->func[2]); + sdio_disable_func(gInstance->func[2]); + sdio_release_host(gInstance->func[2]); + + /* Disable Function 1 */ + sdio_claim_host(gInstance->func[1]); + sdio_disable_func(gInstance->func[1]); + sdio_release_host(gInstance->func[1]); + + /* deregister irq */ + sdioh_sdmmc_osfree(sd); + + MFREE(sd->osh, sd, sizeof(sdioh_info_t)); + } + return SDIOH_API_RC_SUCCESS; +} + +#if defined(OOB_INTR_ONLY) && defined(HW_OOB) + +extern SDIOH_API_RC +sdioh_enable_func_intr(void) +{ + uint8 reg; + int err; + + if (gInstance->func[0]) { + sdio_claim_host(gInstance->func[0]); + + reg = sdio_readb(gInstance->func[0], SDIOD_CCCR_INTEN, &err); + if (err) { + sd_err(("%s: error for read SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err)); + sdio_release_host(gInstance->func[0]); + return SDIOH_API_RC_FAIL; + } + + /* Enable F1 and F2 interrupts, set master enable */ + reg |= (INTR_CTL_FUNC1_EN | INTR_CTL_FUNC2_EN | INTR_CTL_MASTER_EN); + + sdio_writeb(gInstance->func[0], reg, SDIOD_CCCR_INTEN, &err); + sdio_release_host(gInstance->func[0]); + + if (err) { + sd_err(("%s: error for write SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err)); + return SDIOH_API_RC_FAIL; + } + } + + return SDIOH_API_RC_SUCCESS; +} + +extern SDIOH_API_RC +sdioh_disable_func_intr(void) +{ + uint8 reg; + int err; + + if (gInstance->func[0]) { + sdio_claim_host(gInstance->func[0]); + reg = sdio_readb(gInstance->func[0], SDIOD_CCCR_INTEN, &err); + if (err) { + sd_err(("%s: error for read SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err)); + sdio_release_host(gInstance->func[0]); + return SDIOH_API_RC_FAIL; + } + + reg &= ~(INTR_CTL_FUNC1_EN | INTR_CTL_FUNC2_EN); + /* Disable master interrupt with the last function interrupt */ + if (!(reg & 0xFE)) + reg = 0; + sdio_writeb(gInstance->func[0], reg, SDIOD_CCCR_INTEN, &err); + + sdio_release_host(gInstance->func[0]); + if (err) { + sd_err(("%s: error for write SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err)); + return SDIOH_API_RC_FAIL; + } + } + return SDIOH_API_RC_SUCCESS; +} +#endif /* defined(OOB_INTR_ONLY) && defined(HW_OOB) */ + +/* Configure callback to client when we recieve client interrupt */ +extern SDIOH_API_RC +sdioh_interrupt_register(sdioh_info_t *sd, sdioh_cb_fn_t fn, void *argh) +{ + sd_trace(("%s: Entering\n", __FUNCTION__)); + if (fn == NULL) { + sd_err(("%s: interrupt handler is NULL, not registering\n", __FUNCTION__)); + return SDIOH_API_RC_FAIL; + } +#if !defined(OOB_INTR_ONLY) + sd->intr_handler = fn; + sd->intr_handler_arg = argh; + sd->intr_handler_valid = TRUE; + + /* register and unmask irq */ + if (gInstance->func[2]) { + sdio_claim_host(gInstance->func[2]); + sdio_claim_irq(gInstance->func[2], IRQHandlerF2); + sdio_release_host(gInstance->func[2]); + } + + if (gInstance->func[1]) { + sdio_claim_host(gInstance->func[1]); + sdio_claim_irq(gInstance->func[1], IRQHandler); + sdio_release_host(gInstance->func[1]); + } +#elif defined(HW_OOB) + sdioh_enable_func_intr(); +#endif /* defined(OOB_INTR_ONLY) */ + return SDIOH_API_RC_SUCCESS; +} + +extern SDIOH_API_RC +sdioh_interrupt_deregister(sdioh_info_t *sd) +{ + sd_trace(("%s: Entering\n", __FUNCTION__)); + +#if !defined(OOB_INTR_ONLY) + if (gInstance->func[1]) { + /* register and unmask irq */ + sdio_claim_host(gInstance->func[1]); + sdio_release_irq(gInstance->func[1]); + sdio_release_host(gInstance->func[1]); + } + + if (gInstance->func[2]) { + /* Claim host controller F2 */ + sdio_claim_host(gInstance->func[2]); + sdio_release_irq(gInstance->func[2]); + /* Release host controller F2 */ + sdio_release_host(gInstance->func[2]); + } + + sd->intr_handler_valid = FALSE; + sd->intr_handler = NULL; + sd->intr_handler_arg = NULL; +#elif defined(HW_OOB) + sdioh_disable_func_intr(); +#endif /* !defined(OOB_INTR_ONLY) */ + return SDIOH_API_RC_SUCCESS; +} + +extern SDIOH_API_RC +sdioh_interrupt_query(sdioh_info_t *sd, bool *onoff) +{ + sd_trace(("%s: Entering\n", __FUNCTION__)); + *onoff = sd->client_intr_enabled; + return SDIOH_API_RC_SUCCESS; +} + +#if defined(DHD_DEBUG) +extern bool +sdioh_interrupt_pending(sdioh_info_t *sd) +{ + return (0); +} +#endif + +uint +sdioh_query_iofnum(sdioh_info_t *sd) +{ + return sd->num_funcs; +} + +/* IOVar table */ +enum { + IOV_MSGLEVEL = 1, + IOV_BLOCKMODE, + IOV_BLOCKSIZE, + IOV_DMA, + IOV_USEINTS, + IOV_NUMINTS, + IOV_NUMLOCALINTS, + IOV_HOSTREG, + IOV_DEVREG, + IOV_DIVISOR, + IOV_SDMODE, + IOV_HISPEED, + IOV_HCIREGS, + IOV_POWER, + IOV_CLOCK, + IOV_RXCHAIN +}; + +const bcm_iovar_t sdioh_iovars[] = { + {"sd_msglevel", IOV_MSGLEVEL, 0, IOVT_UINT32, 0 }, + {"sd_blockmode", IOV_BLOCKMODE, 0, IOVT_BOOL, 0 }, + {"sd_blocksize", IOV_BLOCKSIZE, 0, IOVT_UINT32, 0 }, /* ((fn << 16) | size) */ + {"sd_dma", IOV_DMA, 0, IOVT_BOOL, 0 }, + {"sd_ints", IOV_USEINTS, 0, IOVT_BOOL, 0 }, + {"sd_numints", IOV_NUMINTS, 0, IOVT_UINT32, 0 }, + {"sd_numlocalints", IOV_NUMLOCALINTS, 0, IOVT_UINT32, 0 }, + {"sd_hostreg", IOV_HOSTREG, 0, IOVT_BUFFER, sizeof(sdreg_t) }, + {"sd_devreg", IOV_DEVREG, 0, IOVT_BUFFER, sizeof(sdreg_t) }, + {"sd_divisor", IOV_DIVISOR, 0, IOVT_UINT32, 0 }, + {"sd_power", IOV_POWER, 0, IOVT_UINT32, 0 }, + {"sd_clock", IOV_CLOCK, 0, IOVT_UINT32, 0 }, + {"sd_mode", IOV_SDMODE, 0, IOVT_UINT32, 100}, + {"sd_highspeed", IOV_HISPEED, 0, IOVT_UINT32, 0 }, + {"sd_rxchain", IOV_RXCHAIN, 0, IOVT_BOOL, 0 }, + {NULL, 0, 0, 0, 0 } +}; + +int +sdioh_iovar_op(sdioh_info_t *si, const char *name, + void *params, int plen, void *arg, int len, bool set) +{ + const bcm_iovar_t *vi = NULL; + int bcmerror = 0; + int val_size; + int32 int_val = 0; + bool bool_val; + uint32 actionid; + + ASSERT(name); + ASSERT(len >= 0); + + /* Get must have return space; Set does not take qualifiers */ + ASSERT(set || (arg && len)); + ASSERT(!set || (!params && !plen)); + + sd_trace(("%s: Enter (%s %s)\n", __FUNCTION__, (set ? "set" : "get"), name)); + + if ((vi = bcm_iovar_lookup(sdioh_iovars, name)) == NULL) { + bcmerror = BCME_UNSUPPORTED; + goto exit; + } + + if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, set)) != 0) + goto exit; + + /* Set up params so get and set can share the convenience variables */ + if (params == NULL) { + params = arg; + plen = len; + } + + if (vi->type == IOVT_VOID) + val_size = 0; + else if (vi->type == IOVT_BUFFER) + val_size = len; + else + val_size = sizeof(int); + + if (plen >= (int)sizeof(int_val)) + bcopy(params, &int_val, sizeof(int_val)); + + bool_val = (int_val != 0) ? TRUE : FALSE; + + actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid); + switch (actionid) { + case IOV_GVAL(IOV_MSGLEVEL): + int_val = (int32)sd_msglevel; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_MSGLEVEL): + sd_msglevel = int_val; + break; + + case IOV_GVAL(IOV_BLOCKMODE): + int_val = (int32)si->sd_blockmode; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_BLOCKMODE): + si->sd_blockmode = (bool)int_val; + /* Haven't figured out how to make non-block mode with DMA */ + break; + + case IOV_GVAL(IOV_BLOCKSIZE): + if ((uint32)int_val > si->num_funcs) { + bcmerror = BCME_BADARG; + break; + } + int_val = (int32)si->client_block_size[int_val]; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_BLOCKSIZE): + { + uint func = ((uint32)int_val >> 16); + uint blksize = (uint16)int_val; + uint maxsize; + + if (func > si->num_funcs) { + bcmerror = BCME_BADARG; + break; + } + + switch (func) { + case 0: maxsize = 32; break; + case 1: maxsize = BLOCK_SIZE_4318; break; + case 2: maxsize = BLOCK_SIZE_4328; break; + default: maxsize = 0; + } + if (blksize > maxsize) { + bcmerror = BCME_BADARG; + break; + } + if (!blksize) { + blksize = maxsize; + } + + /* Now set it */ + si->client_block_size[func] = blksize; + + break; + } + + case IOV_GVAL(IOV_RXCHAIN): + int_val = FALSE; + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_DMA): + int_val = (int32)si->sd_use_dma; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_DMA): + si->sd_use_dma = (bool)int_val; + break; + + case IOV_GVAL(IOV_USEINTS): + int_val = (int32)si->use_client_ints; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_USEINTS): + si->use_client_ints = (bool)int_val; + if (si->use_client_ints) + si->intmask |= CLIENT_INTR; + else + si->intmask &= ~CLIENT_INTR; + + break; + + case IOV_GVAL(IOV_DIVISOR): + int_val = (uint32)sd_divisor; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_DIVISOR): + sd_divisor = int_val; + break; + + case IOV_GVAL(IOV_POWER): + int_val = (uint32)sd_power; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_POWER): + sd_power = int_val; + break; + + case IOV_GVAL(IOV_CLOCK): + int_val = (uint32)sd_clock; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_CLOCK): + sd_clock = int_val; + break; + + case IOV_GVAL(IOV_SDMODE): + int_val = (uint32)sd_sdmode; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_SDMODE): + sd_sdmode = int_val; + break; + + case IOV_GVAL(IOV_HISPEED): + int_val = (uint32)sd_hiok; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_HISPEED): + sd_hiok = int_val; + break; + + case IOV_GVAL(IOV_NUMINTS): + int_val = (int32)si->intrcount; + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_NUMLOCALINTS): + int_val = (int32)0; + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_HOSTREG): + { + sdreg_t *sd_ptr = (sdreg_t *)params; + + if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD_MaxCurCap) { + sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset)); + bcmerror = BCME_BADARG; + break; + } + + sd_trace(("%s: rreg%d at offset %d\n", __FUNCTION__, + (sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32), + sd_ptr->offset)); + if (sd_ptr->offset & 1) + int_val = 8; /* sdioh_sdmmc_rreg8(si, sd_ptr->offset); */ + else if (sd_ptr->offset & 2) + int_val = 16; /* sdioh_sdmmc_rreg16(si, sd_ptr->offset); */ + else + int_val = 32; /* sdioh_sdmmc_rreg(si, sd_ptr->offset); */ + + bcopy(&int_val, arg, sizeof(int_val)); + break; + } + + case IOV_SVAL(IOV_HOSTREG): + { + sdreg_t *sd_ptr = (sdreg_t *)params; + + if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD_MaxCurCap) { + sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset)); + bcmerror = BCME_BADARG; + break; + } + + sd_trace(("%s: wreg%d value 0x%08x at offset %d\n", __FUNCTION__, sd_ptr->value, + (sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32), + sd_ptr->offset)); + break; + } + + case IOV_GVAL(IOV_DEVREG): + { + sdreg_t *sd_ptr = (sdreg_t *)params; + uint8 data = 0; + + if (sdioh_cfg_read(si, sd_ptr->func, sd_ptr->offset, &data)) { + bcmerror = BCME_SDIO_ERROR; + break; + } + + int_val = (int)data; + bcopy(&int_val, arg, sizeof(int_val)); + break; + } + + case IOV_SVAL(IOV_DEVREG): + { + sdreg_t *sd_ptr = (sdreg_t *)params; + uint8 data = (uint8)sd_ptr->value; + + if (sdioh_cfg_write(si, sd_ptr->func, sd_ptr->offset, &data)) { + bcmerror = BCME_SDIO_ERROR; + break; + } + break; + } + + default: + bcmerror = BCME_UNSUPPORTED; + break; + } +exit: + + return bcmerror; +} + +#if defined(OOB_INTR_ONLY) && defined(HW_OOB) + +SDIOH_API_RC +sdioh_enable_hw_oob_intr(sdioh_info_t *sd, bool enable) +{ + SDIOH_API_RC status; + uint8 data; + + if (enable) + data = 3; /* enable hw oob interrupt */ + else + data = 4; /* disable hw oob interrupt */ + + data |= 4; /* Active HIGH */ + + status = sdioh_request_byte(sd, SDIOH_WRITE, 0, 0xf2, &data); + return status; +} +#endif /* defined(OOB_INTR_ONLY) && defined(HW_OOB) */ + +extern SDIOH_API_RC +sdioh_cfg_read(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data) +{ + SDIOH_API_RC status; + /* No lock needed since sdioh_request_byte does locking */ + status = sdioh_request_byte(sd, SDIOH_READ, fnc_num, addr, data); + return status; +} + +extern SDIOH_API_RC +sdioh_cfg_write(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data) +{ + /* No lock needed since sdioh_request_byte does locking */ + SDIOH_API_RC status; + status = sdioh_request_byte(sd, SDIOH_WRITE, fnc_num, addr, data); + return status; +} + +static int +sdioh_sdmmc_get_cisaddr(sdioh_info_t *sd, uint32 regaddr) +{ + /* read 24 bits and return valid 17 bit addr */ + int i; + uint32 scratch, regdata; + uint8 *ptr = (uint8 *)&scratch; + for (i = 0; i < 3; i++) { + if ((sdioh_sdmmc_card_regread (sd, 0, regaddr, 1, ®data)) != SUCCESS) + sd_err(("%s: Can't read!\n", __FUNCTION__)); + + *ptr++ = (uint8) regdata; + regaddr++; + } + + /* Only the lower 17-bits are valid */ + scratch = ltoh32(scratch); + scratch &= 0x0001FFFF; + return (scratch); +} + +extern SDIOH_API_RC +sdioh_cis_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 length) +{ + uint32 count; + int offset; + uint32 foo; + uint8 *cis = cisd; + + sd_trace(("%s: Func = %d\n", __FUNCTION__, func)); + + if (!sd->func_cis_ptr[func]) { + bzero(cis, length); + sd_err(("%s: no func_cis_ptr[%d]\n", __FUNCTION__, func)); + return SDIOH_API_RC_FAIL; + } + + sd_err(("%s: func_cis_ptr[%d]=0x%04x\n", __FUNCTION__, func, sd->func_cis_ptr[func])); + + for (count = 0; count < length; count++) { + offset = sd->func_cis_ptr[func] + count; + if (sdioh_sdmmc_card_regread (sd, 0, offset, 1, &foo) < 0) { + sd_err(("%s: regread failed: Can't read CIS\n", __FUNCTION__)); + return SDIOH_API_RC_FAIL; + } + + *cis = (uint8)(foo & 0xff); + cis++; + } + + return SDIOH_API_RC_SUCCESS; +} + +extern SDIOH_API_RC +sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr, uint8 *byte) +{ + int err_ret; + + sd_info(("%s: rw=%d, func=%d, addr=0x%05x\n", __FUNCTION__, rw, func, regaddr)); + + DHD_PM_RESUME_WAIT(sdioh_request_byte_wait); + DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL); + if(rw) { /* CMD52 Write */ + if (func == 0) { + /* Can only directly write to some F0 registers. Handle F2 enable + * as a special case. + */ + if (regaddr == SDIOD_CCCR_IOEN) { + if (gInstance->func[2]) { + sdio_claim_host(gInstance->func[2]); + if (*byte & SDIO_FUNC_ENABLE_2) { + /* Enable Function 2 */ + err_ret = sdio_enable_func(gInstance->func[2]); + if (err_ret) { + sd_err(("bcmsdh_sdmmc: enable F2 failed:%d", + err_ret)); + } + } else { + /* Disable Function 2 */ + err_ret = sdio_disable_func(gInstance->func[2]); + if (err_ret) { + sd_err(("bcmsdh_sdmmc: Disab F2 failed:%d", + err_ret)); + } + } + sdio_release_host(gInstance->func[2]); + } + } +#if defined(MMC_SDIO_ABORT) + /* to allow abort command through F1 */ + else if (regaddr == SDIOD_CCCR_IOABORT) { + sdio_claim_host(gInstance->func[func]); + /* + * this sdio_f0_writeb() can be replaced with another api + * depending upon MMC driver change. + * As of this time, this is temporaray one + */ + sdio_writeb(gInstance->func[func], *byte, regaddr, &err_ret); + sdio_release_host(gInstance->func[func]); + } +#endif /* MMC_SDIO_ABORT */ + else if (regaddr < 0xF0) { + sd_err(("bcmsdh_sdmmc: F0 Wr:0x%02x: write disallowed\n", regaddr)); + } else { + /* Claim host controller, perform F0 write, and release */ + sdio_claim_host(gInstance->func[func]); + sdio_f0_writeb(gInstance->func[func], *byte, regaddr, &err_ret); + sdio_release_host(gInstance->func[func]); + } + } else { + /* Claim host controller, perform Fn write, and release */ + sdio_claim_host(gInstance->func[func]); + sdio_writeb(gInstance->func[func], *byte, regaddr, &err_ret); + sdio_release_host(gInstance->func[func]); + } + } else { /* CMD52 Read */ + /* Claim host controller, perform Fn read, and release */ + sdio_claim_host(gInstance->func[func]); + + if (func == 0) { + *byte = sdio_f0_readb(gInstance->func[func], regaddr, &err_ret); + } else { + *byte = sdio_readb(gInstance->func[func], regaddr, &err_ret); + } + + sdio_release_host(gInstance->func[func]); + } + + if (err_ret) { + sd_err(("bcmsdh_sdmmc: Failed to %s byte F%d:@0x%05x=%02x, Err: %d\n", + rw ? "Write" : "Read", func, regaddr, *byte, err_ret)); + } + + return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL); +} + +extern SDIOH_API_RC +sdioh_request_word(sdioh_info_t *sd, uint cmd_type, uint rw, uint func, uint addr, + uint32 *word, uint nbytes) +{ + int err_ret = SDIOH_API_RC_FAIL; + + if (func == 0) { + sd_err(("%s: Only CMD52 allowed to F0.\n", __FUNCTION__)); + return SDIOH_API_RC_FAIL; + } + + sd_info(("%s: cmd_type=%d, rw=%d, func=%d, addr=0x%05x, nbytes=%d\n", + __FUNCTION__, cmd_type, rw, func, addr, nbytes)); + + DHD_PM_RESUME_WAIT(sdioh_request_word_wait); + DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL); + /* Claim host controller */ + sdio_claim_host(gInstance->func[func]); + + if(rw) { /* CMD52 Write */ + if (nbytes == 4) { + sdio_writel(gInstance->func[func], *word, addr, &err_ret); + } else if (nbytes == 2) { + sdio_writew(gInstance->func[func], (*word & 0xFFFF), addr, &err_ret); + } else { + sd_err(("%s: Invalid nbytes: %d\n", __FUNCTION__, nbytes)); + } + } else { /* CMD52 Read */ + if (nbytes == 4) { + *word = sdio_readl(gInstance->func[func], addr, &err_ret); + } else if (nbytes == 2) { + *word = sdio_readw(gInstance->func[func], addr, &err_ret) & 0xFFFF; + } else { + sd_err(("%s: Invalid nbytes: %d\n", __FUNCTION__, nbytes)); + } + } + + /* Release host controller */ + sdio_release_host(gInstance->func[func]); + + if (err_ret) { + sd_err(("bcmsdh_sdmmc: Failed to %s word, Err: 0x%08x", + rw ? "Write" : "Read", err_ret)); + } + + return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL); +} + +static SDIOH_API_RC +sdioh_request_packet(sdioh_info_t *sd, uint fix_inc, uint write, uint func, + uint addr, void *pkt) +{ + bool fifo = (fix_inc == SDIOH_DATA_FIX); + uint32 SGCount = 0; + int err_ret = 0; + + void *pnext; + + sd_trace(("%s: Enter\n", __FUNCTION__)); + + ASSERT(pkt); + DHD_PM_RESUME_WAIT(sdioh_request_packet_wait); + DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL); + + /* Claim host controller */ + sdio_claim_host(gInstance->func[func]); + for (pnext = pkt; pnext; pnext = PKTNEXT(sd->osh, pnext)) { + uint pkt_len = PKTLEN(sd->osh, pnext); + pkt_len += 3; + pkt_len &= 0xFFFFFFFC; + +#ifdef CONFIG_MMC_MSM7X00A + if ((pkt_len % 64) == 32) { + sd_trace(("%s: Rounding up TX packet +=32\n", __FUNCTION__)); + pkt_len += 32; + } +#endif /* CONFIG_MMC_MSM7X00A */ + /* Make sure the packet is aligned properly. If it isn't, then this + * is the fault of sdioh_request_buffer() which is supposed to give + * us something we can work with. + */ + ASSERT(((uint32)(PKTDATA(sd->osh, pkt)) & DMA_ALIGN_MASK) == 0); + + if ((write) && (!fifo)) { + err_ret = sdio_memcpy_toio(gInstance->func[func], addr, + ((uint8*)PKTDATA(sd->osh, pnext)), + pkt_len); + } else if (write) { + err_ret = sdio_memcpy_toio(gInstance->func[func], addr, + ((uint8*)PKTDATA(sd->osh, pnext)), + pkt_len); + } else if (fifo) { + err_ret = sdio_readsb(gInstance->func[func], + ((uint8*)PKTDATA(sd->osh, pnext)), + addr, + pkt_len); + } else { + err_ret = sdio_memcpy_fromio(gInstance->func[func], + ((uint8*)PKTDATA(sd->osh, pnext)), + addr, + pkt_len); + } + + if (err_ret) { + sd_err(("%s: %s FAILED %p[%d], addr=0x%05x, pkt_len=%d, ERR=0x%08x\n", + __FUNCTION__, + (write) ? "TX" : "RX", + pnext, SGCount, addr, pkt_len, err_ret)); + } else { + sd_trace(("%s: %s xfr'd %p[%d], addr=0x%05x, len=%d\n", + __FUNCTION__, + (write) ? "TX" : "RX", + pnext, SGCount, addr, pkt_len)); + } + + if (!fifo) { + addr += pkt_len; + } + SGCount ++; + + } + + /* Release host controller */ + sdio_release_host(gInstance->func[func]); + + sd_trace(("%s: Exit\n", __FUNCTION__)); + return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL); +} + + +/* + * This function takes a buffer or packet, and fixes everything up so that in the + * end, a DMA-able packet is created. + * + * A buffer does not have an associated packet pointer, and may or may not be aligned. + * A packet may consist of a single packet, or a packet chain. If it is a packet chain, + * then all the packets in the chain must be properly aligned. If the packet data is not + * aligned, then there may only be one packet, and in this case, it is copied to a new + * aligned packet. + * + */ +extern SDIOH_API_RC +sdioh_request_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint write, uint func, + uint addr, uint reg_width, uint buflen_u, uint8 *buffer, void *pkt) +{ + SDIOH_API_RC Status; + void *mypkt = NULL; + + sd_trace(("%s: Enter\n", __FUNCTION__)); + + DHD_PM_RESUME_WAIT(sdioh_request_buffer_wait); + DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL); + /* Case 1: we don't have a packet. */ + if (pkt == NULL) { + sd_data(("%s: Creating new %s Packet, len=%d\n", + __FUNCTION__, write ? "TX" : "RX", buflen_u)); +#ifdef DHD_USE_STATIC_BUF + if (!(mypkt = PKTGET_STATIC(sd->osh, buflen_u, write ? TRUE : FALSE))) { +#else + if (!(mypkt = PKTGET(sd->osh, buflen_u, write ? TRUE : FALSE))) { +#endif /* DHD_USE_STATIC_BUF */ + sd_err(("%s: PKTGET failed: len %d\n", + __FUNCTION__, buflen_u)); + return SDIOH_API_RC_FAIL; + } + + /* For a write, copy the buffer data into the packet. */ + if (write) { + bcopy(buffer, PKTDATA(sd->osh, mypkt), buflen_u); + } + + Status = sdioh_request_packet(sd, fix_inc, write, func, addr, mypkt); + + /* For a read, copy the packet data back to the buffer. */ + if (!write) { + bcopy(PKTDATA(sd->osh, mypkt), buffer, buflen_u); + } +#ifdef DHD_USE_STATIC_BUF + PKTFREE_STATIC(sd->osh, mypkt, write ? TRUE : FALSE); +#else + PKTFREE(sd->osh, mypkt, write ? TRUE : FALSE); +#endif /* DHD_USE_STATIC_BUF */ + } else if (((uint32)(PKTDATA(sd->osh, pkt)) & DMA_ALIGN_MASK) != 0) { + /* Case 2: We have a packet, but it is unaligned. */ + + /* In this case, we cannot have a chain. */ + ASSERT(PKTNEXT(sd->osh, pkt) == NULL); + + sd_data(("%s: Creating aligned %s Packet, len=%d\n", + __FUNCTION__, write ? "TX" : "RX", PKTLEN(sd->osh, pkt))); +#ifdef DHD_USE_STATIC_BUF + if (!(mypkt = PKTGET_STATIC(sd->osh, PKTLEN(sd->osh, pkt), write ? TRUE : FALSE))) { +#else + if (!(mypkt = PKTGET(sd->osh, PKTLEN(sd->osh, pkt), write ? TRUE : FALSE))) { +#endif /* DHD_USE_STATIC_BUF */ + sd_err(("%s: PKTGET failed: len %d\n", + __FUNCTION__, PKTLEN(sd->osh, pkt))); + return SDIOH_API_RC_FAIL; + } + + /* For a write, copy the buffer data into the packet. */ + if (write) { + bcopy(PKTDATA(sd->osh, pkt), + PKTDATA(sd->osh, mypkt), + PKTLEN(sd->osh, pkt)); + } + + Status = sdioh_request_packet(sd, fix_inc, write, func, addr, mypkt); + + /* For a read, copy the packet data back to the buffer. */ + if (!write) { + bcopy(PKTDATA(sd->osh, mypkt), + PKTDATA(sd->osh, pkt), + PKTLEN(sd->osh, mypkt)); + } +#ifdef DHD_USE_STATIC_BUF + PKTFREE_STATIC(sd->osh, mypkt, write ? TRUE : FALSE); +#else + PKTFREE(sd->osh, mypkt, write ? TRUE : FALSE); +#endif /* DHD_USE_STATIC_BUF */ + } else { /* case 3: We have a packet and it is aligned. */ + sd_data(("%s: Aligned %s Packet, direct DMA\n", + __FUNCTION__, write ? "Tx" : "Rx")); + Status = sdioh_request_packet(sd, fix_inc, write, func, addr, pkt); + } + + return (Status); +} + +/* this function performs "abort" for both of host & device */ +extern int +sdioh_abort(sdioh_info_t *sd, uint func) +{ +#if defined(MMC_SDIO_ABORT) + char t_func = (char) func; +#endif /* defined(MMC_SDIO_ABORT) */ + sd_trace(("%s: Enter\n", __FUNCTION__)); + +#if defined(MMC_SDIO_ABORT) + /* issue abort cmd52 command through F1 */ + sdioh_request_byte(sd, SD_IO_OP_WRITE, SDIO_FUNC_0, SDIOD_CCCR_IOABORT, &t_func); +#endif /* defined(MMC_SDIO_ABORT) */ + + sd_trace(("%s: Exit\n", __FUNCTION__)); + return SDIOH_API_RC_SUCCESS; +} + +/* Reset and re-initialize the device */ +int sdioh_sdio_reset(sdioh_info_t *si) +{ + sd_trace(("%s: Enter\n", __FUNCTION__)); + sd_trace(("%s: Exit\n", __FUNCTION__)); + return SDIOH_API_RC_SUCCESS; +} + +/* Disable device interrupt */ +void +sdioh_sdmmc_devintr_off(sdioh_info_t *sd) +{ + sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints)); + sd->intmask &= ~CLIENT_INTR; +} + +/* Enable device interrupt */ +void +sdioh_sdmmc_devintr_on(sdioh_info_t *sd) +{ + sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints)); + sd->intmask |= CLIENT_INTR; +} + +/* Read client card reg */ +int +sdioh_sdmmc_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data) +{ + + if ((func == 0) || (regsize == 1)) { + uint8 temp = 0; + + sdioh_request_byte(sd, SDIOH_READ, func, regaddr, &temp); + *data = temp; + *data &= 0xff; + sd_data(("%s: byte read data=0x%02x\n", + __FUNCTION__, *data)); + } else { + sdioh_request_word(sd, 0, SDIOH_READ, func, regaddr, data, regsize); + if (regsize == 2) + *data &= 0xffff; + + sd_data(("%s: word read data=0x%08x\n", + __FUNCTION__, *data)); + } + + return SUCCESS; +} + +#if !defined(OOB_INTR_ONLY) +/* bcmsdh_sdmmc interrupt handler */ +static void IRQHandler(struct sdio_func *func) +{ + sdioh_info_t *sd; + + sd_trace(("bcmsdh_sdmmc: ***IRQHandler\n")); + sd = gInstance->sd; + + ASSERT(sd != NULL); + sdio_release_host(gInstance->func[0]); + + if (sd->use_client_ints) { + sd->intrcount++; + ASSERT(sd->intr_handler); + ASSERT(sd->intr_handler_arg); + (sd->intr_handler)(sd->intr_handler_arg); + } else { + sd_err(("bcmsdh_sdmmc: ***IRQHandler\n")); + + sd_err(("%s: Not ready for intr: enabled %d, handler %p\n", + __FUNCTION__, sd->client_intr_enabled, sd->intr_handler)); + } + + sdio_claim_host(gInstance->func[0]); +} + +/* bcmsdh_sdmmc interrupt handler for F2 (dummy handler) */ +static void IRQHandlerF2(struct sdio_func *func) +{ + sdioh_info_t *sd; + + sd_trace(("bcmsdh_sdmmc: ***IRQHandlerF2\n")); + + sd = gInstance->sd; + + ASSERT(sd != NULL); +} +#endif /* !defined(OOB_INTR_ONLY) */ + +#ifdef NOTUSED +/* Write client card reg */ +static int +sdioh_sdmmc_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 data) +{ + + if ((func == 0) || (regsize == 1)) { + uint8 temp; + + temp = data & 0xff; + sdioh_request_byte(sd, SDIOH_READ, func, regaddr, &temp); + sd_data(("%s: byte write data=0x%02x\n", + __FUNCTION__, data)); + } else { + if (regsize == 2) + data &= 0xffff; + + sdioh_request_word(sd, 0, SDIOH_READ, func, regaddr, &data, regsize); + + sd_data(("%s: word write data=0x%08x\n", + __FUNCTION__, data)); + } + + return SUCCESS; +} +#endif /* NOTUSED */ + +int +sdioh_start(sdioh_info_t *si, int stage) +{ + int ret; + sdioh_info_t *sd = gInstance->sd; + + /* Need to do this stages as we can't enable the interrupt till + downloading of the firmware is complete, other wise polling + sdio access will come in way + */ + if (gInstance->func[0]) { + if (stage == 0) { + /* Since the power to the chip is killed, we will have + re enumerate the device again. Set the block size + and enable the fucntion 1 for in preparation for + downloading the code + */ + /* sdio_reset_comm() - has been fixed in latest kernel/msm.git for Linux + 2.6.27. The implementation prior to that is buggy, and needs broadcom's + patch for it + */ + if ((ret = sdio_reset_comm(gInstance->func[0]->card))) + sd_err(("%s Failed, error = %d\n", __FUNCTION__, ret)); + else { + sd->num_funcs = 2; + sd->sd_blockmode = TRUE; + sd->use_client_ints = TRUE; + sd->client_block_size[0] = 64; + + /* Claim host controller */ + sdio_claim_host(gInstance->func[1]); + + sd->client_block_size[1] = 64; + if (sdio_set_block_size(gInstance->func[1], 64)) { + sd_err(("bcmsdh_sdmmc: Failed to set F1 blocksize\n")); + } + + /* Release host controller F1 */ + sdio_release_host(gInstance->func[1]); + + if (gInstance->func[2]) { + /* Claim host controller F2 */ + sdio_claim_host(gInstance->func[2]); + + sd->client_block_size[2] = sd_f2_blocksize; + if (sdio_set_block_size(gInstance->func[2], + sd_f2_blocksize)) { + sd_err(("bcmsdh_sdmmc: Failed to set F2 " + "blocksize to %d\n", sd_f2_blocksize)); + } + + /* Release host controller F2 */ + sdio_release_host(gInstance->func[2]); + } + + sdioh_sdmmc_card_enablefuncs(sd); + } + } else { +#if !defined(OOB_INTR_ONLY) + sdio_claim_host(gInstance->func[0]); + sdio_claim_irq(gInstance->func[2], IRQHandlerF2); + sdio_claim_irq(gInstance->func[1], IRQHandler); + sdio_release_host(gInstance->func[0]); +#else /* defined(OOB_INTR_ONLY) */ +#if defined(HW_OOB) + sdioh_enable_func_intr(); +#endif + bcmsdh_oob_intr_set(TRUE); +#endif /* !defined(OOB_INTR_ONLY) */ + } + } + else + sd_err(("%s Failed\n", __FUNCTION__)); + + return (0); +} + +int +sdioh_stop(sdioh_info_t *si) +{ + /* MSM7201A Android sdio stack has bug with interrupt + So internaly within SDIO stack they are polling + which cause issue when device is turned off. So + unregister interrupt with SDIO stack to stop the + polling + */ + if (gInstance->func[0]) { +#if !defined(OOB_INTR_ONLY) + sdio_claim_host(gInstance->func[0]); + sdio_release_irq(gInstance->func[1]); + sdio_release_irq(gInstance->func[2]); + sdio_release_host(gInstance->func[0]); +#else /* defined(OOB_INTR_ONLY) */ +#if defined(HW_OOB) + sdioh_disable_func_intr(); +#endif + bcmsdh_oob_intr_set(FALSE); +#endif /* !defined(OOB_INTR_ONLY) */ + } + else + sd_err(("%s Failed\n", __FUNCTION__)); + return (0); +} diff --git a/drivers/net/wireless/bcm4329/bcmsdh_sdmmc_linux.c b/drivers/net/wireless/bcm4329/bcmsdh_sdmmc_linux.c new file mode 100644 index 000000000000..8992a4267f9f --- /dev/null +++ b/drivers/net/wireless/bcm4329/bcmsdh_sdmmc_linux.c @@ -0,0 +1,273 @@ +/* + * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: bcmsdh_sdmmc_linux.c,v 1.1.2.5.6.17 2010/08/13 00:36:19 Exp $ + */ + +#include <typedefs.h> +#include <bcmutils.h> +#include <sdio.h> /* SDIO Specs */ +#include <bcmsdbus.h> /* bcmsdh to/from specific controller APIs */ +#include <sdiovar.h> /* to get msglevel bit values */ + +#include <linux/sched.h> /* request_irq() */ + +#include <linux/mmc/core.h> +#include <linux/mmc/card.h> +#include <linux/mmc/sdio_func.h> +#include <linux/mmc/sdio_ids.h> + +#if !defined(SDIO_VENDOR_ID_BROADCOM) +#define SDIO_VENDOR_ID_BROADCOM 0x02d0 +#endif /* !defined(SDIO_VENDOR_ID_BROADCOM) */ + +#define SDIO_DEVICE_ID_BROADCOM_DEFAULT 0x0000 + +#if !defined(SDIO_DEVICE_ID_BROADCOM_4325_SDGWB) +#define SDIO_DEVICE_ID_BROADCOM_4325_SDGWB 0x0492 /* BCM94325SDGWB */ +#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4325_SDGWB) */ +#if !defined(SDIO_DEVICE_ID_BROADCOM_4325) +#define SDIO_DEVICE_ID_BROADCOM_4325 0x0493 +#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4325) */ +#if !defined(SDIO_DEVICE_ID_BROADCOM_4329) +#define SDIO_DEVICE_ID_BROADCOM_4329 0x4329 +#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4329) */ +#if !defined(SDIO_DEVICE_ID_BROADCOM_4319) +#define SDIO_DEVICE_ID_BROADCOM_4319 0x4319 +#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4329) */ + +#include <bcmsdh_sdmmc.h> + +#include <dhd_dbg.h> + +extern void sdioh_sdmmc_devintr_off(sdioh_info_t *sd); +extern void sdioh_sdmmc_devintr_on(sdioh_info_t *sd); + +int sdio_function_init(void); +void sdio_function_cleanup(void); + +#define DESCRIPTION "bcmsdh_sdmmc Driver" +#define AUTHOR "Broadcom Corporation" + +/* module param defaults */ +static int clockoverride = 0; + +module_param(clockoverride, int, 0644); +MODULE_PARM_DESC(clockoverride, "SDIO card clock override"); + +PBCMSDH_SDMMC_INSTANCE gInstance; + +/* Maximum number of bcmsdh_sdmmc devices supported by driver */ +#define BCMSDH_SDMMC_MAX_DEVICES 1 + +extern int bcmsdh_probe(struct device *dev); +extern int bcmsdh_remove(struct device *dev); +struct device sdmmc_dev; + +static int bcmsdh_sdmmc_probe(struct sdio_func *func, + const struct sdio_device_id *id) +{ + int ret = 0; + static struct sdio_func sdio_func_0; + sd_trace(("bcmsdh_sdmmc: %s Enter\n", __FUNCTION__)); + sd_trace(("sdio_bcmsdh: func->class=%x\n", func->class)); + sd_trace(("sdio_vendor: 0x%04x\n", func->vendor)); + sd_trace(("sdio_device: 0x%04x\n", func->device)); + sd_trace(("Function#: 0x%04x\n", func->num)); + + if (func->num == 1) { + sdio_func_0.num = 0; + sdio_func_0.card = func->card; + gInstance->func[0] = &sdio_func_0; + if(func->device == 0x4) { /* 4318 */ + gInstance->func[2] = NULL; + sd_trace(("NIC found, calling bcmsdh_probe...\n")); + ret = bcmsdh_probe(&sdmmc_dev); + } + } + + gInstance->func[func->num] = func; + + if (func->num == 2) { + sd_trace(("F2 found, calling bcmsdh_probe...\n")); + ret = bcmsdh_probe(&sdmmc_dev); + } + + return ret; +} + +static void bcmsdh_sdmmc_remove(struct sdio_func *func) +{ + sd_trace(("bcmsdh_sdmmc: %s Enter\n", __FUNCTION__)); + sd_info(("sdio_bcmsdh: func->class=%x\n", func->class)); + sd_info(("sdio_vendor: 0x%04x\n", func->vendor)); + sd_info(("sdio_device: 0x%04x\n", func->device)); + sd_info(("Function#: 0x%04x\n", func->num)); + + if (func->num == 2) { + sd_trace(("F2 found, calling bcmsdh_remove...\n")); + bcmsdh_remove(&sdmmc_dev); + } +} + +/* devices we support, null terminated */ +static const struct sdio_device_id bcmsdh_sdmmc_ids[] = { + { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_DEFAULT) }, + { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4325_SDGWB) }, + { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4325) }, + { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329) }, + { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4319) }, + { /* end: all zeroes */ }, +}; + +MODULE_DEVICE_TABLE(sdio, bcmsdh_sdmmc_ids); + +static struct sdio_driver bcmsdh_sdmmc_driver = { + .probe = bcmsdh_sdmmc_probe, + .remove = bcmsdh_sdmmc_remove, + .name = "bcmsdh_sdmmc", + .id_table = bcmsdh_sdmmc_ids, + }; + +struct sdos_info { + sdioh_info_t *sd; + spinlock_t lock; +}; + + +int +sdioh_sdmmc_osinit(sdioh_info_t *sd) +{ + struct sdos_info *sdos; + + sdos = (struct sdos_info*)MALLOC(sd->osh, sizeof(struct sdos_info)); + sd->sdos_info = (void*)sdos; + if (sdos == NULL) + return BCME_NOMEM; + + sdos->sd = sd; + spin_lock_init(&sdos->lock); + return BCME_OK; +} + +void +sdioh_sdmmc_osfree(sdioh_info_t *sd) +{ + struct sdos_info *sdos; + ASSERT(sd && sd->sdos_info); + + sdos = (struct sdos_info *)sd->sdos_info; + MFREE(sd->osh, sdos, sizeof(struct sdos_info)); +} + +/* Interrupt enable/disable */ +SDIOH_API_RC +sdioh_interrupt_set(sdioh_info_t *sd, bool enable) +{ + ulong flags; + struct sdos_info *sdos; + + sd_trace(("%s: %s\n", __FUNCTION__, enable ? "Enabling" : "Disabling")); + + sdos = (struct sdos_info *)sd->sdos_info; + ASSERT(sdos); + +#if !defined(OOB_INTR_ONLY) + if (enable && !(sd->intr_handler && sd->intr_handler_arg)) { + sd_err(("%s: no handler registered, will not enable\n", __FUNCTION__)); + return SDIOH_API_RC_FAIL; + } +#endif /* !defined(OOB_INTR_ONLY) */ + + /* Ensure atomicity for enable/disable calls */ + spin_lock_irqsave(&sdos->lock, flags); + + sd->client_intr_enabled = enable; + if (enable) { + sdioh_sdmmc_devintr_on(sd); + } else { + sdioh_sdmmc_devintr_off(sd); + } + + spin_unlock_irqrestore(&sdos->lock, flags); + + return SDIOH_API_RC_SUCCESS; +} + + +#ifdef BCMSDH_MODULE +static int __init +bcmsdh_module_init(void) +{ + int error = 0; + sdio_function_init(); + return error; +} + +static void __exit +bcmsdh_module_cleanup(void) +{ + sdio_function_cleanup(); +} + +module_init(bcmsdh_module_init); +module_exit(bcmsdh_module_cleanup); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION(DESCRIPTION); +MODULE_AUTHOR(AUTHOR); + +#endif /* BCMSDH_MODULE */ +/* + * module init +*/ +int sdio_function_init(void) +{ + int error = 0; + sd_trace(("bcmsdh_sdmmc: %s Enter\n", __FUNCTION__)); + + gInstance = kzalloc(sizeof(BCMSDH_SDMMC_INSTANCE), GFP_KERNEL); + if (!gInstance) + return -ENOMEM; + + bzero(&sdmmc_dev, sizeof(sdmmc_dev)); + error = sdio_register_driver(&bcmsdh_sdmmc_driver); + + + return error; +} + +/* + * module cleanup +*/ +extern int bcmsdh_remove(struct device *dev); +void sdio_function_cleanup(void) +{ + sd_trace(("%s Enter\n", __FUNCTION__)); + + + sdio_unregister_driver(&bcmsdh_sdmmc_driver); + + if (gInstance) + kfree(gInstance); +} diff --git a/drivers/net/wireless/bcm4329/bcmsdspi.c b/drivers/net/wireless/bcm4329/bcmsdspi.c new file mode 100644 index 000000000000..636539be5ea5 --- /dev/null +++ b/drivers/net/wireless/bcm4329/bcmsdspi.c @@ -0,0 +1,1596 @@ +/* + * Broadcom BCMSDH to SPI Protocol Conversion Layer + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: bcmsdspi.c,v 1.14.4.2.4.4.6.5 2010/03/10 03:09:48 Exp $ + */ + +#include <typedefs.h> + +#include <bcmdevs.h> +#include <bcmendian.h> +#include <bcmutils.h> +#include <osl.h> +#include <siutils.h> +#include <sdio.h> /* SDIO Device and Protocol Specs */ +#include <sdioh.h> /* SDIO Host Controller Specification */ +#include <bcmsdbus.h> /* bcmsdh to/from specific controller APIs */ +#include <sdiovar.h> /* ioctl/iovars */ + +#include <pcicfg.h> + + +#include <bcmsdspi.h> +#include <bcmspi.h> + +#include <proto/sdspi.h> + +#define SD_PAGE 4096 + +/* Globals */ + +uint sd_msglevel = SDH_ERROR_VAL; +uint sd_hiok = FALSE; /* Use hi-speed mode if available? */ +uint sd_sdmode = SDIOH_MODE_SPI; /* Use SD4 mode by default */ +uint sd_f2_blocksize = 512; /* Default blocksize */ + +uint sd_divisor = 2; /* Default 33MHz/2 = 16MHz for dongle */ +uint sd_power = 1; /* Default to SD Slot powered ON */ +uint sd_clock = 1; /* Default to SD Clock turned ON */ +uint sd_crc = 0; /* Default to SPI CRC Check turned OFF */ +uint sd_pci_slot = 0xFFFFffff; /* Used to force selection of a particular PCI slot */ + +uint sd_toctl = 7; + +/* Prototypes */ +static bool sdspi_start_power(sdioh_info_t *sd); +static int sdspi_set_highspeed_mode(sdioh_info_t *sd, bool HSMode); +static int sdspi_card_enablefuncs(sdioh_info_t *sd); +static void sdspi_cmd_getrsp(sdioh_info_t *sd, uint32 *rsp_buffer, int count); +static int sdspi_cmd_issue(sdioh_info_t *sd, bool use_dma, uint32 cmd, uint32 arg, + uint32 *data, uint32 datalen); +static int sdspi_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, + int regsize, uint32 *data); +static int sdspi_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, + int regsize, uint32 data); +static int sdspi_driver_init(sdioh_info_t *sd); +static bool sdspi_reset(sdioh_info_t *sd, bool host_reset, bool client_reset); +static int sdspi_card_buf(sdioh_info_t *sd, int rw, int func, bool fifo, + uint32 addr, int nbytes, uint32 *data); +static int sdspi_abort(sdioh_info_t *sd, uint func); + +static int set_client_block_size(sdioh_info_t *sd, int func, int blocksize); + +static uint8 sdspi_crc7(unsigned char* p, uint32 len); +static uint16 sdspi_crc16(unsigned char* p, uint32 len); +static int sdspi_crc_onoff(sdioh_info_t *sd, bool use_crc); + +/* + * Public entry points & extern's + */ +extern sdioh_info_t * +sdioh_attach(osl_t *osh, void *bar0, uint irq) +{ + sdioh_info_t *sd; + + sd_trace(("%s\n", __FUNCTION__)); + if ((sd = (sdioh_info_t *)MALLOC(osh, sizeof(sdioh_info_t))) == NULL) { + sd_err(("sdioh_attach: out of memory, malloced %d bytes\n", MALLOCED(osh))); + return NULL; + } + bzero((char *)sd, sizeof(sdioh_info_t)); + sd->osh = osh; + + if (spi_osinit(sd) != 0) { + sd_err(("%s: spi_osinit() failed\n", __FUNCTION__)); + MFREE(sd->osh, sd, sizeof(sdioh_info_t)); + return NULL; + } + + sd->bar0 = (uintptr)bar0; + sd->irq = irq; + sd->intr_handler = NULL; + sd->intr_handler_arg = NULL; + sd->intr_handler_valid = FALSE; + + /* Set defaults */ + sd->sd_blockmode = FALSE; + sd->use_client_ints = TRUE; + sd->sd_use_dma = FALSE; /* DMA Not supported */ + + /* Haven't figured out how to make bytemode work with dma */ + if (!sd->sd_blockmode) + sd->sd_use_dma = 0; + + if (!spi_hw_attach(sd)) { + sd_err(("%s: spi_hw_attach() failed\n", __FUNCTION__)); + spi_osfree(sd); + MFREE(sd->osh, sd, sizeof(sdioh_info_t)); + return NULL; + } + + if (sdspi_driver_init(sd) != SUCCESS) { + if (sdspi_driver_init(sd) != SUCCESS) { + sd_err(("%s:sdspi_driver_init() failed()\n", __FUNCTION__)); + spi_hw_detach(sd); + spi_osfree(sd); + MFREE(sd->osh, sd, sizeof(sdioh_info_t)); + return (NULL); + } + } + + if (spi_register_irq(sd, irq) != SUCCESS) { + sd_err(("%s: spi_register_irq() failed for irq = %d\n", __FUNCTION__, irq)); + spi_hw_detach(sd); + spi_osfree(sd); + MFREE(sd->osh, sd, sizeof(sdioh_info_t)); + return (NULL); + } + + sd_trace(("%s: Done\n", __FUNCTION__)); + return sd; +} + +extern SDIOH_API_RC +sdioh_detach(osl_t *osh, sdioh_info_t *sd) +{ + sd_trace(("%s\n", __FUNCTION__)); + + if (sd) { + if (sd->card_init_done) + sdspi_reset(sd, 1, 1); + + sd_info(("%s: detaching from hardware\n", __FUNCTION__)); + spi_free_irq(sd->irq, sd); + spi_hw_detach(sd); + spi_osfree(sd); + MFREE(sd->osh, sd, sizeof(sdioh_info_t)); + } + + return SDIOH_API_RC_SUCCESS; +} + +/* Configure callback to client when we recieve client interrupt */ +extern SDIOH_API_RC +sdioh_interrupt_register(sdioh_info_t *sd, sdioh_cb_fn_t fn, void *argh) +{ + sd_trace(("%s: Entering\n", __FUNCTION__)); + + sd->intr_handler = fn; + sd->intr_handler_arg = argh; + sd->intr_handler_valid = TRUE; + + return SDIOH_API_RC_SUCCESS; +} + +extern SDIOH_API_RC +sdioh_interrupt_deregister(sdioh_info_t *sd) +{ + sd_trace(("%s: Entering\n", __FUNCTION__)); + + sd->intr_handler_valid = FALSE; + sd->intr_handler = NULL; + sd->intr_handler_arg = NULL; + + return SDIOH_API_RC_SUCCESS; +} + +extern SDIOH_API_RC +sdioh_interrupt_query(sdioh_info_t *sd, bool *onoff) +{ + sd_trace(("%s: Entering\n", __FUNCTION__)); + + *onoff = sd->client_intr_enabled; + + return SDIOH_API_RC_SUCCESS; +} + +#if defined(DHD_DEBUG) +extern bool +sdioh_interrupt_pending(sdioh_info_t *sd) +{ + return 0; +} +#endif + +uint +sdioh_query_iofnum(sdioh_info_t *sd) +{ + return sd->num_funcs; +} + +/* IOVar table */ +enum { + IOV_MSGLEVEL = 1, + IOV_BLOCKMODE, + IOV_BLOCKSIZE, + IOV_DMA, + IOV_USEINTS, + IOV_NUMINTS, + IOV_NUMLOCALINTS, + IOV_HOSTREG, + IOV_DEVREG, + IOV_DIVISOR, + IOV_SDMODE, + IOV_HISPEED, + IOV_HCIREGS, + IOV_POWER, + IOV_CLOCK, + IOV_CRC +}; + +const bcm_iovar_t sdioh_iovars[] = { + {"sd_msglevel", IOV_MSGLEVEL, 0, IOVT_UINT32, 0 }, + {"sd_blockmode", IOV_BLOCKMODE, 0, IOVT_BOOL, 0 }, + {"sd_blocksize", IOV_BLOCKSIZE, 0, IOVT_UINT32, 0 }, /* ((fn << 16) | size) */ + {"sd_dma", IOV_DMA, 0, IOVT_BOOL, 0 }, + {"sd_ints", IOV_USEINTS, 0, IOVT_BOOL, 0 }, + {"sd_numints", IOV_NUMINTS, 0, IOVT_UINT32, 0 }, + {"sd_numlocalints", IOV_NUMLOCALINTS, 0, IOVT_UINT32, 0 }, + {"sd_hostreg", IOV_HOSTREG, 0, IOVT_BUFFER, sizeof(sdreg_t) }, + {"sd_devreg", IOV_DEVREG, 0, IOVT_BUFFER, sizeof(sdreg_t) }, + {"sd_divisor", IOV_DIVISOR, 0, IOVT_UINT32, 0 }, + {"sd_power", IOV_POWER, 0, IOVT_UINT32, 0 }, + {"sd_clock", IOV_CLOCK, 0, IOVT_UINT32, 0 }, + {"sd_crc", IOV_CRC, 0, IOVT_UINT32, 0 }, + {"sd_mode", IOV_SDMODE, 0, IOVT_UINT32, 100}, + {"sd_highspeed", IOV_HISPEED, 0, IOVT_UINT32, 0}, + {NULL, 0, 0, 0, 0 } +}; + +int +sdioh_iovar_op(sdioh_info_t *si, const char *name, + void *params, int plen, void *arg, int len, bool set) +{ + const bcm_iovar_t *vi = NULL; + int bcmerror = 0; + int val_size; + int32 int_val = 0; + bool bool_val; + uint32 actionid; + + ASSERT(name); + ASSERT(len >= 0); + + /* Get must have return space; Set does not take qualifiers */ + ASSERT(set || (arg && len)); + ASSERT(!set || (!params && !plen)); + + sd_trace(("%s: Enter (%s %s)\n", __FUNCTION__, (set ? "set" : "get"), name)); + + if ((vi = bcm_iovar_lookup(sdioh_iovars, name)) == NULL) { + bcmerror = BCME_UNSUPPORTED; + goto exit; + } + + if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, set)) != 0) + goto exit; + + /* Set up params so get and set can share the convenience variables */ + if (params == NULL) { + params = arg; + plen = len; + } + + if (vi->type == IOVT_VOID) + val_size = 0; + else if (vi->type == IOVT_BUFFER) + val_size = len; + else + val_size = sizeof(int); + + if (plen >= (int)sizeof(int_val)) + bcopy(params, &int_val, sizeof(int_val)); + + bool_val = (int_val != 0) ? TRUE : FALSE; + + actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid); + switch (actionid) { + case IOV_GVAL(IOV_MSGLEVEL): + int_val = (int32)sd_msglevel; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_MSGLEVEL): + sd_msglevel = int_val; + break; + + case IOV_GVAL(IOV_BLOCKMODE): + int_val = (int32)si->sd_blockmode; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_BLOCKMODE): + si->sd_blockmode = (bool)int_val; + /* Haven't figured out how to make non-block mode with DMA */ + if (!si->sd_blockmode) + si->sd_use_dma = 0; + break; + + case IOV_GVAL(IOV_BLOCKSIZE): + if ((uint32)int_val > si->num_funcs) { + bcmerror = BCME_BADARG; + break; + } + int_val = (int32)si->client_block_size[int_val]; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_BLOCKSIZE): + { + uint func = ((uint32)int_val >> 16); + uint blksize = (uint16)int_val; + uint maxsize; + + if (func > si->num_funcs) { + bcmerror = BCME_BADARG; + break; + } + + switch (func) { + case 0: maxsize = 32; break; + case 1: maxsize = BLOCK_SIZE_4318; break; + case 2: maxsize = BLOCK_SIZE_4328; break; + default: maxsize = 0; + } + if (blksize > maxsize) { + bcmerror = BCME_BADARG; + break; + } + if (!blksize) { + blksize = maxsize; + } + + /* Now set it */ + spi_lock(si); + bcmerror = set_client_block_size(si, func, blksize); + spi_unlock(si); + break; + } + + case IOV_GVAL(IOV_DMA): + int_val = (int32)si->sd_use_dma; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_DMA): + si->sd_use_dma = (bool)int_val; + break; + + case IOV_GVAL(IOV_USEINTS): + int_val = (int32)si->use_client_ints; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_USEINTS): + break; + + case IOV_GVAL(IOV_DIVISOR): + int_val = (uint32)sd_divisor; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_DIVISOR): + sd_divisor = int_val; + if (!spi_start_clock(si, (uint16)sd_divisor)) { + sd_err(("set clock failed!\n")); + bcmerror = BCME_ERROR; + } + break; + + case IOV_GVAL(IOV_POWER): + int_val = (uint32)sd_power; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_POWER): + sd_power = int_val; + break; + + case IOV_GVAL(IOV_CLOCK): + int_val = (uint32)sd_clock; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_CLOCK): + sd_clock = int_val; + break; + + case IOV_GVAL(IOV_CRC): + int_val = (uint32)sd_crc; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_CRC): + /* Apply new setting, but don't change sd_crc until + * after the CRC-mode is selected in the device. This + * is required because the software must generate a + * correct CRC for the CMD59 in order to be able to + * turn OFF the CRC. + */ + sdspi_crc_onoff(si, int_val ? 1 : 0); + sd_crc = int_val; + break; + + case IOV_GVAL(IOV_SDMODE): + int_val = (uint32)sd_sdmode; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_SDMODE): + sd_sdmode = int_val; + break; + + case IOV_GVAL(IOV_HISPEED): + int_val = (uint32)sd_hiok; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_HISPEED): + sd_hiok = int_val; + + if (!sdspi_set_highspeed_mode(si, (bool)sd_hiok)) { + sd_err(("Failed changing highspeed mode to %d.\n", sd_hiok)); + bcmerror = BCME_ERROR; + return ERROR; + } + break; + + case IOV_GVAL(IOV_NUMINTS): + int_val = (int32)si->intrcount; + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_NUMLOCALINTS): + int_val = (int32)si->local_intrcount; + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_HOSTREG): + { + break; + } + + case IOV_SVAL(IOV_HOSTREG): + { + sd_err(("IOV_HOSTREG unsupported\n")); + break; + } + + case IOV_GVAL(IOV_DEVREG): + { + sdreg_t *sd_ptr = (sdreg_t *)params; + uint8 data; + + if (sdioh_cfg_read(si, sd_ptr->func, sd_ptr->offset, &data)) { + bcmerror = BCME_SDIO_ERROR; + break; + } + + int_val = (int)data; + bcopy(&int_val, arg, sizeof(int_val)); + break; + } + + case IOV_SVAL(IOV_DEVREG): + { + sdreg_t *sd_ptr = (sdreg_t *)params; + uint8 data = (uint8)sd_ptr->value; + + if (sdioh_cfg_write(si, sd_ptr->func, sd_ptr->offset, &data)) { + bcmerror = BCME_SDIO_ERROR; + break; + } + break; + } + + + default: + bcmerror = BCME_UNSUPPORTED; + break; + } +exit: + + return bcmerror; +} + +extern SDIOH_API_RC +sdioh_cfg_read(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data) +{ + SDIOH_API_RC status; + /* No lock needed since sdioh_request_byte does locking */ + status = sdioh_request_byte(sd, SDIOH_READ, fnc_num, addr, data); + return status; +} + +extern SDIOH_API_RC +sdioh_cfg_write(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data) +{ + /* No lock needed since sdioh_request_byte does locking */ + SDIOH_API_RC status; + status = sdioh_request_byte(sd, SDIOH_WRITE, fnc_num, addr, data); + return status; +} + +extern SDIOH_API_RC +sdioh_cis_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 length) +{ + uint32 count; + int offset; + uint32 foo; + uint8 *cis = cisd; + + sd_trace(("%s: Func = %d\n", __FUNCTION__, func)); + + if (!sd->func_cis_ptr[func]) { + bzero(cis, length); + return SDIOH_API_RC_FAIL; + } + + spi_lock(sd); + *cis = 0; + for (count = 0; count < length; count++) { + offset = sd->func_cis_ptr[func] + count; + if (sdspi_card_regread (sd, 0, offset, 1, &foo) < 0) { + sd_err(("%s: regread failed: Can't read CIS\n", __FUNCTION__)); + spi_unlock(sd); + return SDIOH_API_RC_FAIL; + } + *cis = (uint8)(foo & 0xff); + cis++; + } + spi_unlock(sd); + return SDIOH_API_RC_SUCCESS; +} + +extern SDIOH_API_RC +sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr, uint8 *byte) +{ + int status; + uint32 cmd_arg; + uint32 rsp5; + + spi_lock(sd); + + cmd_arg = 0; + cmd_arg = SFIELD(cmd_arg, CMD52_FUNCTION, func); + cmd_arg = SFIELD(cmd_arg, CMD52_REG_ADDR, regaddr); + cmd_arg = SFIELD(cmd_arg, CMD52_RW_FLAG, rw == SDIOH_READ ? 0 : 1); + cmd_arg = SFIELD(cmd_arg, CMD52_RAW, 0); + cmd_arg = SFIELD(cmd_arg, CMD52_DATA, rw == SDIOH_READ ? 0 : *byte); + + sd_trace(("%s: rw=%d, func=%d, regaddr=0x%08x\n", __FUNCTION__, rw, func, regaddr)); + + if ((status = sdspi_cmd_issue(sd, sd->sd_use_dma, + SDIOH_CMD_52, cmd_arg, NULL, 0)) != SUCCESS) { + spi_unlock(sd); + return status; + } + + sdspi_cmd_getrsp(sd, &rsp5, 1); + if (rsp5 != 0x00) { + sd_err(("%s: rsp5 flags is 0x%x func=%d\n", + __FUNCTION__, rsp5, func)); + /* ASSERT(0); */ + spi_unlock(sd); + return SDIOH_API_RC_FAIL; + } + + if (rw == SDIOH_READ) + *byte = sd->card_rsp_data >> 24; + + spi_unlock(sd); + return SDIOH_API_RC_SUCCESS; +} + +extern SDIOH_API_RC +sdioh_request_word(sdioh_info_t *sd, uint cmd_type, uint rw, uint func, uint addr, + uint32 *word, uint nbytes) +{ + int status; + + spi_lock(sd); + + if (rw == SDIOH_READ) + status = sdspi_card_regread(sd, func, addr, nbytes, word); + else + status = sdspi_card_regwrite(sd, func, addr, nbytes, *word); + + spi_unlock(sd); + return (status == SUCCESS ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL); +} + +extern SDIOH_API_RC +sdioh_request_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint rw, uint func, + uint addr, uint reg_width, uint buflen_u, uint8 *buffer, void *pkt) +{ + int len; + int buflen = (int)buflen_u; + bool fifo = (fix_inc == SDIOH_DATA_FIX); + + spi_lock(sd); + + ASSERT(reg_width == 4); + ASSERT(buflen_u < (1 << 30)); + ASSERT(sd->client_block_size[func]); + + sd_data(("%s: %c len %d r_cnt %d t_cnt %d, pkt @0x%p\n", + __FUNCTION__, rw == SDIOH_READ ? 'R' : 'W', + buflen_u, sd->r_cnt, sd->t_cnt, pkt)); + + /* Break buffer down into blocksize chunks: + * Bytemode: 1 block at a time. + */ + while (buflen > 0) { + if (sd->sd_blockmode) { + /* Max xfer is Page size */ + len = MIN(SD_PAGE, buflen); + + /* Round down to a block boundry */ + if (buflen > sd->client_block_size[func]) + len = (len/sd->client_block_size[func]) * + sd->client_block_size[func]; + } else { + /* Byte mode: One block at a time */ + len = MIN(sd->client_block_size[func], buflen); + } + + if (sdspi_card_buf(sd, rw, func, fifo, addr, len, (uint32 *)buffer) != SUCCESS) { + spi_unlock(sd); + return SDIOH_API_RC_FAIL; + } + buffer += len; + buflen -= len; + if (!fifo) + addr += len; + } + spi_unlock(sd); + return SDIOH_API_RC_SUCCESS; +} + +static int +sdspi_abort(sdioh_info_t *sd, uint func) +{ + uint8 spi_databuf[] = { 0x74, 0x80, 0x00, 0x0C, 0xFF, 0x95, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; + uint8 spi_rspbuf[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; + int err = 0; + + sd_err(("Sending SPI Abort to F%d\n", func)); + spi_databuf[4] = func & 0x7; + /* write to function 0, addr 6 (IOABORT) func # in 3 LSBs. */ + spi_sendrecv(sd, spi_databuf, spi_rspbuf, sizeof(spi_databuf)); + + return err; +} + +extern int +sdioh_abort(sdioh_info_t *sd, uint fnum) +{ + int ret; + + spi_lock(sd); + ret = sdspi_abort(sd, fnum); + spi_unlock(sd); + + return ret; +} + +int +sdioh_start(sdioh_info_t *sd, int stage) +{ + return SUCCESS; +} + +int +sdioh_stop(sdioh_info_t *sd) +{ + return SUCCESS; +} + + +/* + * Private/Static work routines + */ +static bool +sdspi_reset(sdioh_info_t *sd, bool host_reset, bool client_reset) +{ + if (!sd) + return TRUE; + + spi_lock(sd); + /* Reset client card */ + if (client_reset && (sd->adapter_slot != -1)) { + if (sdspi_card_regwrite(sd, 0, SDIOD_CCCR_IOABORT, 1, 0x8) != SUCCESS) + sd_err(("%s: Cannot write to card reg 0x%x\n", + __FUNCTION__, SDIOD_CCCR_IOABORT)); + else + sd->card_rca = 0; + } + + /* The host reset is a NOP in the sd-spi case. */ + if (host_reset) { + sd->sd_mode = SDIOH_MODE_SPI; + } + spi_unlock(sd); + return TRUE; +} + +static int +sdspi_host_init(sdioh_info_t *sd) +{ + sdspi_reset(sd, 1, 0); + + /* Default power on mode is SD1 */ + sd->sd_mode = SDIOH_MODE_SPI; + sd->polled_mode = TRUE; + sd->host_init_done = TRUE; + sd->card_init_done = FALSE; + sd->adapter_slot = 1; + + return (SUCCESS); +} + +#define CMD0_RETRIES 3 +#define CMD5_RETRIES 10 + +static int +get_ocr(sdioh_info_t *sd, uint32 *cmd_arg, uint32 *cmd_rsp) +{ + uint32 rsp5; + int retries, status; + + /* First issue a CMD0 to get the card into SPI mode. */ + for (retries = 0; retries <= CMD0_RETRIES; retries++) { + if ((status = sdspi_cmd_issue(sd, sd->sd_use_dma, + SDIOH_CMD_0, *cmd_arg, NULL, 0)) != SUCCESS) { + sd_err(("%s: No response to CMD0\n", __FUNCTION__)); + continue; + } + + sdspi_cmd_getrsp(sd, &rsp5, 1); + + if (GFIELD(rsp5, SPI_RSP_ILL_CMD)) { + printf("%s: Card already initialized (continuing)\n", __FUNCTION__); + break; + } + + if (GFIELD(rsp5, SPI_RSP_IDLE)) { + printf("%s: Card in SPI mode\n", __FUNCTION__); + break; + } + } + + if (retries > CMD0_RETRIES) { + sd_err(("%s: Too many retries for CMD0\n", __FUNCTION__)); + return ERROR; + } + + /* Get the Card's Operation Condition. */ + /* Occasionally the board takes a while to become ready. */ + for (retries = 0; retries <= CMD5_RETRIES; retries++) { + if ((status = sdspi_cmd_issue(sd, sd->sd_use_dma, + SDIOH_CMD_5, *cmd_arg, NULL, 0)) != SUCCESS) { + sd_err(("%s: No response to CMD5\n", __FUNCTION__)); + continue; + } + + printf("CMD5 response data was: 0x%08x\n", sd->card_rsp_data); + + if (GFIELD(sd->card_rsp_data, RSP4_CARD_READY)) { + printf("%s: Card ready\n", __FUNCTION__); + break; + } + } + + if (retries > CMD5_RETRIES) { + sd_err(("%s: Too many retries for CMD5\n", __FUNCTION__)); + return ERROR; + } + + *cmd_rsp = sd->card_rsp_data; + + sdspi_crc_onoff(sd, sd_crc ? 1 : 0); + + return (SUCCESS); +} + +static int +sdspi_crc_onoff(sdioh_info_t *sd, bool use_crc) +{ + uint32 args; + int status; + + args = use_crc ? 1 : 0; + if ((status = sdspi_cmd_issue(sd, sd->sd_use_dma, + SDIOH_CMD_59, args, NULL, 0)) != SUCCESS) { + sd_err(("%s: No response to CMD59\n", __FUNCTION__)); + } + + sd_info(("CMD59 response data was: 0x%08x\n", sd->card_rsp_data)); + + sd_err(("SD-SPI CRC turned %s\n", use_crc ? "ON" : "OFF")); + return (SUCCESS); +} + +static int +sdspi_client_init(sdioh_info_t *sd) +{ + uint8 fn_ints; + + sd_trace(("%s: Powering up slot %d\n", __FUNCTION__, sd->adapter_slot)); + + /* Start at ~400KHz clock rate for initialization */ + if (!spi_start_clock(sd, 128)) { + sd_err(("spi_start_clock failed\n")); + return ERROR; + } + + if (!sdspi_start_power(sd)) { + sd_err(("sdspi_start_power failed\n")); + return ERROR; + } + + if (sd->num_funcs == 0) { + sd_err(("%s: No IO funcs!\n", __FUNCTION__)); + return ERROR; + } + + sdspi_card_enablefuncs(sd); + + set_client_block_size(sd, 1, BLOCK_SIZE_4318); + fn_ints = INTR_CTL_FUNC1_EN; + + if (sd->num_funcs >= 2) { + set_client_block_size(sd, 2, sd_f2_blocksize /* BLOCK_SIZE_4328 */); + fn_ints |= INTR_CTL_FUNC2_EN; + } + + /* Enable/Disable Client interrupts */ + /* Turn on here but disable at host controller */ + if (sdspi_card_regwrite(sd, 0, SDIOD_CCCR_INTEN, 1, + (fn_ints | INTR_CTL_MASTER_EN)) != SUCCESS) { + sd_err(("%s: Could not enable ints in CCCR\n", __FUNCTION__)); + return ERROR; + } + + /* Switch to High-speed clocking mode if both host and device support it */ + sdspi_set_highspeed_mode(sd, (bool)sd_hiok); + + /* After configuring for High-Speed mode, set the desired clock rate. */ + if (!spi_start_clock(sd, (uint16)sd_divisor)) { + sd_err(("spi_start_clock failed\n")); + return ERROR; + } + + sd->card_init_done = TRUE; + + return SUCCESS; +} + +static int +sdspi_set_highspeed_mode(sdioh_info_t *sd, bool HSMode) +{ + uint32 regdata; + int status; + bool hsmode; + + if (HSMode == TRUE) { + + sd_err(("Attempting to enable High-Speed mode.\n")); + + if ((status = sdspi_card_regread(sd, 0, SDIOD_CCCR_SPEED_CONTROL, + 1, ®data)) != SUCCESS) { + return status; + } + if (regdata & SDIO_SPEED_SHS) { + sd_err(("Device supports High-Speed mode.\n")); + + regdata |= SDIO_SPEED_EHS; + + sd_err(("Writing %08x to Card at %08x\n", + regdata, SDIOD_CCCR_SPEED_CONTROL)); + if ((status = sdspi_card_regwrite(sd, 0, SDIOD_CCCR_SPEED_CONTROL, + 1, regdata)) != BCME_OK) { + return status; + } + + hsmode = 1; + + sd_err(("High-speed clocking mode enabled.\n")); + } + else { + sd_err(("Device does not support High-Speed Mode.\n")); + hsmode = 0; + } + } else { + if ((status = sdspi_card_regread(sd, 0, SDIOD_CCCR_SPEED_CONTROL, + 1, ®data)) != SUCCESS) { + return status; + } + + regdata = ~SDIO_SPEED_EHS; + + sd_err(("Writing %08x to Card at %08x\n", + regdata, SDIOD_CCCR_SPEED_CONTROL)); + if ((status = sdspi_card_regwrite(sd, 0, SDIOD_CCCR_SPEED_CONTROL, + 1, regdata)) != BCME_OK) { + return status; + } + + sd_err(("Low-speed clocking mode enabled.\n")); + hsmode = 0; + } + + spi_controller_highspeed_mode(sd, hsmode); + + return TRUE; +} + +bool +sdspi_start_power(sdioh_info_t *sd) +{ + uint32 cmd_arg; + uint32 cmd_rsp; + + sd_trace(("%s\n", __FUNCTION__)); + + /* Get the Card's Operation Condition. Occasionally the board + * takes a while to become ready + */ + + cmd_arg = 0; + if (get_ocr(sd, &cmd_arg, &cmd_rsp) != SUCCESS) { + sd_err(("%s: Failed to get OCR; bailing\n", __FUNCTION__)); + return FALSE; + } + + sd_err(("mem_present = %d\n", GFIELD(cmd_rsp, RSP4_MEM_PRESENT))); + sd_err(("num_funcs = %d\n", GFIELD(cmd_rsp, RSP4_NUM_FUNCS))); + sd_err(("card_ready = %d\n", GFIELD(cmd_rsp, RSP4_CARD_READY))); + sd_err(("OCR = 0x%x\n", GFIELD(cmd_rsp, RSP4_IO_OCR))); + + /* Verify that the card supports I/O mode */ + if (GFIELD(cmd_rsp, RSP4_NUM_FUNCS) == 0) { + sd_err(("%s: Card does not support I/O\n", __FUNCTION__)); + return ERROR; + } + + sd->num_funcs = GFIELD(cmd_rsp, RSP4_NUM_FUNCS); + + /* Examine voltage: Arasan only supports 3.3 volts, + * so look for 3.2-3.3 Volts and also 3.3-3.4 volts. + */ + + if ((GFIELD(cmd_rsp, RSP4_IO_OCR) & (0x3 << 20)) == 0) { + sd_err(("This client does not support 3.3 volts!\n")); + return ERROR; + } + + + return TRUE; +} + +static int +sdspi_driver_init(sdioh_info_t *sd) +{ + sd_trace(("%s\n", __FUNCTION__)); + + if ((sdspi_host_init(sd)) != SUCCESS) { + return ERROR; + } + + if (sdspi_client_init(sd) != SUCCESS) { + return ERROR; + } + + return SUCCESS; +} + +static int +sdspi_card_enablefuncs(sdioh_info_t *sd) +{ + int status; + uint32 regdata; + uint32 regaddr, fbraddr; + uint8 func; + uint8 *ptr; + + sd_trace(("%s\n", __FUNCTION__)); + /* Get the Card's common CIS address */ + ptr = (uint8 *) &sd->com_cis_ptr; + for (regaddr = SDIOD_CCCR_CISPTR_0; regaddr <= SDIOD_CCCR_CISPTR_2; regaddr++) { + if ((status = sdspi_card_regread (sd, 0, regaddr, 1, ®data)) != SUCCESS) + return status; + + *ptr++ = (uint8) regdata; + } + + /* Only the lower 17-bits are valid */ + sd->com_cis_ptr &= 0x0001FFFF; + sd->func_cis_ptr[0] = sd->com_cis_ptr; + sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr)); + + /* Get the Card's function CIS (for each function) */ + for (fbraddr = SDIOD_FBR_STARTADDR, func = 1; + func <= sd->num_funcs; func++, fbraddr += SDIOD_FBR_SIZE) { + ptr = (uint8 *) &sd->func_cis_ptr[func]; + for (regaddr = SDIOD_FBR_CISPTR_0; regaddr <= SDIOD_FBR_CISPTR_2; regaddr++) { + if ((status = sdspi_card_regread (sd, 0, regaddr + fbraddr, 1, ®data)) + != SUCCESS) + return status; + + *ptr++ = (uint8) regdata; + } + + /* Only the lower 17-bits are valid */ + sd->func_cis_ptr[func] &= 0x0001FFFF; + sd_info(("%s: Function %d CIS Ptr = 0x%x\n", + __FUNCTION__, func, sd->func_cis_ptr[func])); + } + + sd_info(("%s: write ESCI bit\n", __FUNCTION__)); + /* Enable continuous SPI interrupt (ESCI bit) */ + sdspi_card_regwrite(sd, 0, SDIOD_CCCR_BICTRL, 1, 0x60); + + sd_info(("%s: enable f1\n", __FUNCTION__)); + /* Enable function 1 on the card */ + regdata = SDIO_FUNC_ENABLE_1; + if ((status = sdspi_card_regwrite(sd, 0, SDIOD_CCCR_IOEN, 1, regdata)) != SUCCESS) + return status; + + sd_info(("%s: done\n", __FUNCTION__)); + return SUCCESS; +} + +/* Read client card reg */ +static int +sdspi_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data) +{ + int status; + uint32 cmd_arg; + uint32 rsp5; + + cmd_arg = 0; + + if ((func == 0) || (regsize == 1)) { + cmd_arg = SFIELD(cmd_arg, CMD52_FUNCTION, func); + cmd_arg = SFIELD(cmd_arg, CMD52_REG_ADDR, regaddr); + cmd_arg = SFIELD(cmd_arg, CMD52_RW_FLAG, SDIOH_XFER_TYPE_READ); + cmd_arg = SFIELD(cmd_arg, CMD52_RAW, 0); + cmd_arg = SFIELD(cmd_arg, CMD52_DATA, 0); + + if ((status = sdspi_cmd_issue(sd, sd->sd_use_dma, SDIOH_CMD_52, cmd_arg, NULL, 0)) + != SUCCESS) + return status; + + sdspi_cmd_getrsp(sd, &rsp5, 1); + + if (rsp5 != 0x00) + sd_err(("%s: rsp5 flags is 0x%x\t %d\n", + __FUNCTION__, rsp5, func)); + + *data = sd->card_rsp_data >> 24; + } else { + cmd_arg = SFIELD(cmd_arg, CMD53_BYTE_BLK_CNT, regsize); + cmd_arg = SFIELD(cmd_arg, CMD53_OP_CODE, 1); + cmd_arg = SFIELD(cmd_arg, CMD53_BLK_MODE, 0); + cmd_arg = SFIELD(cmd_arg, CMD53_FUNCTION, func); + cmd_arg = SFIELD(cmd_arg, CMD53_REG_ADDR, regaddr); + cmd_arg = SFIELD(cmd_arg, CMD53_RW_FLAG, SDIOH_XFER_TYPE_READ); + + sd->data_xfer_count = regsize; + + /* sdspi_cmd_issue() returns with the command complete bit + * in the ISR already cleared + */ + if ((status = sdspi_cmd_issue(sd, sd->sd_use_dma, SDIOH_CMD_53, cmd_arg, NULL, 0)) + != SUCCESS) + return status; + + sdspi_cmd_getrsp(sd, &rsp5, 1); + + if (rsp5 != 0x00) + sd_err(("%s: rsp5 flags is 0x%x\t %d\n", + __FUNCTION__, rsp5, func)); + + *data = sd->card_rsp_data; + if (regsize == 2) { + *data &= 0xffff; + } + + sd_info(("%s: CMD53 func %d, addr 0x%x, size %d, data 0x%08x\n", + __FUNCTION__, func, regaddr, regsize, *data)); + + + } + + return SUCCESS; +} + +/* write a client register */ +static int +sdspi_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 data) +{ + int status; + uint32 cmd_arg, rsp5, flags; + + cmd_arg = 0; + + if ((func == 0) || (regsize == 1)) { + cmd_arg = SFIELD(cmd_arg, CMD52_FUNCTION, func); + cmd_arg = SFIELD(cmd_arg, CMD52_REG_ADDR, regaddr); + cmd_arg = SFIELD(cmd_arg, CMD52_RW_FLAG, SDIOH_XFER_TYPE_WRITE); + cmd_arg = SFIELD(cmd_arg, CMD52_RAW, 0); + cmd_arg = SFIELD(cmd_arg, CMD52_DATA, data & 0xff); + if ((status = sdspi_cmd_issue(sd, sd->sd_use_dma, SDIOH_CMD_52, cmd_arg, NULL, 0)) + != SUCCESS) + return status; + + sdspi_cmd_getrsp(sd, &rsp5, 1); + flags = GFIELD(rsp5, RSP5_FLAGS); + if (flags && (flags != 0x10)) + sd_err(("%s: rsp5.rsp5.flags = 0x%x, expecting 0x10\n", + __FUNCTION__, flags)); + } + else { + cmd_arg = SFIELD(cmd_arg, CMD53_BYTE_BLK_CNT, regsize); + cmd_arg = SFIELD(cmd_arg, CMD53_OP_CODE, 1); + cmd_arg = SFIELD(cmd_arg, CMD53_BLK_MODE, 0); + cmd_arg = SFIELD(cmd_arg, CMD53_FUNCTION, func); + cmd_arg = SFIELD(cmd_arg, CMD53_REG_ADDR, regaddr); + cmd_arg = SFIELD(cmd_arg, CMD53_RW_FLAG, SDIOH_XFER_TYPE_WRITE); + + sd->data_xfer_count = regsize; + sd->cmd53_wr_data = data; + + sd_info(("%s: CMD53 func %d, addr 0x%x, size %d, data 0x%08x\n", + __FUNCTION__, func, regaddr, regsize, data)); + + /* sdspi_cmd_issue() returns with the command complete bit + * in the ISR already cleared + */ + if ((status = sdspi_cmd_issue(sd, sd->sd_use_dma, SDIOH_CMD_53, cmd_arg, NULL, 0)) + != SUCCESS) + return status; + + sdspi_cmd_getrsp(sd, &rsp5, 1); + + if (rsp5 != 0x00) + sd_err(("%s: rsp5 flags = 0x%x, expecting 0x00\n", + __FUNCTION__, rsp5)); + + } + return SUCCESS; +} + +void +sdspi_cmd_getrsp(sdioh_info_t *sd, uint32 *rsp_buffer, int count /* num 32 bit words */) +{ + *rsp_buffer = sd->card_response; +} + +int max_errors = 0; + +#define SPI_MAX_PKT_LEN 768 +uint8 spi_databuf[SPI_MAX_PKT_LEN]; +uint8 spi_rspbuf[SPI_MAX_PKT_LEN]; + +/* datalen is used for CMD53 length only (0 for sd->data_xfer_count) */ +static int +sdspi_cmd_issue(sdioh_info_t *sd, bool use_dma, uint32 cmd, uint32 arg, + uint32 *data, uint32 datalen) +{ + uint32 cmd_reg; + uint32 cmd_arg = arg; + uint8 cmd_crc = 0x95; /* correct CRC for CMD0 and don't care for others. */ + uint16 dat_crc; + uint8 cmd52data = 0; + uint32 i, j; + uint32 spi_datalen = 0; + uint32 spi_pre_cmd_pad = 0; + uint32 spi_max_response_pad = 128; + + cmd_reg = 0; + cmd_reg = SFIELD(cmd_reg, SPI_DIR, 1); + cmd_reg = SFIELD(cmd_reg, SPI_CMD_INDEX, cmd); + + if (GFIELD(cmd_arg, CMD52_RW_FLAG) == 1) { /* Same for CMD52 and CMD53 */ + cmd_reg = SFIELD(cmd_reg, SPI_RW, 1); + } + + switch (cmd) { + case SDIOH_CMD_59: /* CRC_ON_OFF (SPI Mode Only) - Response R1 */ + cmd52data = arg & 0x1; + case SDIOH_CMD_0: /* Set Card to Idle State - No Response */ + case SDIOH_CMD_5: /* Send Operation condition - Response R4 */ + sd_trace(("%s: CMD%d\n", __FUNCTION__, cmd)); + spi_datalen = 44; + spi_pre_cmd_pad = 12; + spi_max_response_pad = 28; + break; + + case SDIOH_CMD_3: /* Ask card to send RCA - Response R6 */ + case SDIOH_CMD_7: /* Select card - Response R1 */ + case SDIOH_CMD_15: /* Set card to inactive state - Response None */ + sd_err(("%s: CMD%d is invalid for SPI Mode.\n", __FUNCTION__, cmd)); + return ERROR; + break; + + case SDIOH_CMD_52: /* IO R/W Direct (single byte) - Response R5 */ + cmd52data = GFIELD(cmd_arg, CMD52_DATA); + cmd_arg = arg; + cmd_reg = SFIELD(cmd_reg, SPI_FUNC, GFIELD(cmd_arg, CMD52_FUNCTION)); + cmd_reg = SFIELD(cmd_reg, SPI_ADDR, GFIELD(cmd_arg, CMD52_REG_ADDR)); + /* Display trace for byte write */ + if (GFIELD(cmd_arg, CMD52_RW_FLAG) == 1) { + sd_trace(("%s: CMD52: Wr F:%d @0x%04x=%02x\n", + __FUNCTION__, + GFIELD(cmd_arg, CMD52_FUNCTION), + GFIELD(cmd_arg, CMD52_REG_ADDR), + cmd52data)); + } + + spi_datalen = 32; + spi_max_response_pad = 28; + + break; + case SDIOH_CMD_53: /* IO R/W Extended (multiple bytes/blocks) */ + cmd_arg = arg; + cmd_reg = SFIELD(cmd_reg, SPI_FUNC, GFIELD(cmd_arg, CMD53_FUNCTION)); + cmd_reg = SFIELD(cmd_reg, SPI_ADDR, GFIELD(cmd_arg, CMD53_REG_ADDR)); + cmd_reg = SFIELD(cmd_reg, SPI_BLKMODE, 0); + cmd_reg = SFIELD(cmd_reg, SPI_OPCODE, GFIELD(cmd_arg, CMD53_OP_CODE)); + cmd_reg = SFIELD(cmd_reg, SPI_STUFF0, (sd->data_xfer_count>>8)); + cmd52data = (uint8)sd->data_xfer_count; + + /* Set upper bit in byte count if necessary, but don't set it for 512 bytes. */ + if ((sd->data_xfer_count > 255) && (sd->data_xfer_count < 512)) { + cmd_reg |= 1; + } + + if (GFIELD(cmd_reg, SPI_RW) == 1) { /* Write */ + spi_max_response_pad = 32; + spi_datalen = (sd->data_xfer_count + spi_max_response_pad) & 0xFFFC; + } else { /* Read */ + + spi_max_response_pad = 32; + spi_datalen = (sd->data_xfer_count + spi_max_response_pad) & 0xFFFC; + } + sd_trace(("%s: CMD53: %s F:%d @0x%04x len=0x%02x\n", + __FUNCTION__, + (GFIELD(cmd_reg, SPI_RW) == 1 ? "Wr" : "Rd"), + GFIELD(cmd_arg, CMD53_FUNCTION), + GFIELD(cmd_arg, CMD53_REG_ADDR), + cmd52data)); + break; + + default: + sd_err(("%s: Unknown command %d\n", __FUNCTION__, cmd)); + return ERROR; + } + + /* Set up and issue the SDIO command */ + memset(spi_databuf, SDSPI_IDLE_PAD, spi_datalen); + spi_databuf[spi_pre_cmd_pad + 0] = (cmd_reg & 0xFF000000) >> 24; + spi_databuf[spi_pre_cmd_pad + 1] = (cmd_reg & 0x00FF0000) >> 16; + spi_databuf[spi_pre_cmd_pad + 2] = (cmd_reg & 0x0000FF00) >> 8; + spi_databuf[spi_pre_cmd_pad + 3] = (cmd_reg & 0x000000FF); + spi_databuf[spi_pre_cmd_pad + 4] = cmd52data; + + /* Generate CRC7 for command, if CRC is enabled, otherwise, a + * default CRC7 of 0x95, which is correct for CMD0, is used. + */ + if (sd_crc) { + cmd_crc = sdspi_crc7(&spi_databuf[spi_pre_cmd_pad], 5); + } + spi_databuf[spi_pre_cmd_pad + 5] = cmd_crc; +#define SPI_STOP_TRAN 0xFD + + /* for CMD53 Write, put the data into the output buffer */ + if ((cmd == SDIOH_CMD_53) && (GFIELD(cmd_arg, CMD53_RW_FLAG) == 1)) { + if (datalen != 0) { + spi_databuf[spi_pre_cmd_pad + 9] = SDSPI_IDLE_PAD; + spi_databuf[spi_pre_cmd_pad + 10] = SDSPI_START_BLOCK; + + for (i = 0; i < sd->data_xfer_count; i++) { + spi_databuf[i + 11 + spi_pre_cmd_pad] = ((uint8 *)data)[i]; + } + if (sd_crc) { + dat_crc = sdspi_crc16(&spi_databuf[spi_pre_cmd_pad+11], i); + } else { + dat_crc = 0xAAAA; + } + spi_databuf[i + 11 + spi_pre_cmd_pad] = (dat_crc >> 8) & 0xFF; + spi_databuf[i + 12 + spi_pre_cmd_pad] = dat_crc & 0xFF; + } else if (sd->data_xfer_count == 2) { + spi_databuf[spi_pre_cmd_pad + 9] = SDSPI_IDLE_PAD; + spi_databuf[spi_pre_cmd_pad + 10] = SDSPI_START_BLOCK; + spi_databuf[spi_pre_cmd_pad + 11] = sd->cmd53_wr_data & 0xFF; + spi_databuf[spi_pre_cmd_pad + 12] = (sd->cmd53_wr_data & 0x0000FF00) >> 8; + if (sd_crc) { + dat_crc = sdspi_crc16(&spi_databuf[spi_pre_cmd_pad+11], 2); + } else { + dat_crc = 0x22AA; + } + spi_databuf[spi_pre_cmd_pad + 13] = (dat_crc >> 8) & 0xFF; + spi_databuf[spi_pre_cmd_pad + 14] = (dat_crc & 0xFF); + } else if (sd->data_xfer_count == 4) { + spi_databuf[spi_pre_cmd_pad + 9] = SDSPI_IDLE_PAD; + spi_databuf[spi_pre_cmd_pad + 10] = SDSPI_START_BLOCK; + spi_databuf[spi_pre_cmd_pad + 11] = sd->cmd53_wr_data & 0xFF; + spi_databuf[spi_pre_cmd_pad + 12] = (sd->cmd53_wr_data & 0x0000FF00) >> 8; + spi_databuf[spi_pre_cmd_pad + 13] = (sd->cmd53_wr_data & 0x00FF0000) >> 16; + spi_databuf[spi_pre_cmd_pad + 14] = (sd->cmd53_wr_data & 0xFF000000) >> 24; + if (sd_crc) { + dat_crc = sdspi_crc16(&spi_databuf[spi_pre_cmd_pad+11], 4); + } else { + dat_crc = 0x44AA; + } + spi_databuf[spi_pre_cmd_pad + 15] = (dat_crc >> 8) & 0xFF; + spi_databuf[spi_pre_cmd_pad + 16] = (dat_crc & 0xFF); + } else { + printf("CMD53 Write: size %d unsupported\n", sd->data_xfer_count); + } + } + + spi_sendrecv(sd, spi_databuf, spi_rspbuf, spi_datalen); + + for (i = spi_pre_cmd_pad + SDSPI_COMMAND_LEN; i < spi_max_response_pad; i++) { + if ((spi_rspbuf[i] & SDSPI_START_BIT_MASK) == 0) { + break; + } + } + + if (i == spi_max_response_pad) { + sd_err(("%s: Did not get a response for CMD%d\n", __FUNCTION__, cmd)); + return ERROR; + } + + /* Extract the response. */ + sd->card_response = spi_rspbuf[i]; + + /* for CMD53 Read, find the start of the response data... */ + if ((cmd == SDIOH_CMD_53) && (GFIELD(cmd_arg, CMD52_RW_FLAG) == 0)) { + for (; i < spi_max_response_pad; i++) { + if (spi_rspbuf[i] == SDSPI_START_BLOCK) { + break; + } + } + + if (i == spi_max_response_pad) { + printf("Did not get a start of data phase for CMD%d\n", cmd); + max_errors++; + sdspi_abort(sd, GFIELD(cmd_arg, CMD53_FUNCTION)); + } + sd->card_rsp_data = spi_rspbuf[i+1]; + sd->card_rsp_data |= spi_rspbuf[i+2] << 8; + sd->card_rsp_data |= spi_rspbuf[i+3] << 16; + sd->card_rsp_data |= spi_rspbuf[i+4] << 24; + + if (datalen != 0) { + i++; + for (j = 0; j < sd->data_xfer_count; j++) { + ((uint8 *)data)[j] = spi_rspbuf[i+j]; + } + if (sd_crc) { + uint16 recv_crc; + + recv_crc = spi_rspbuf[i+j] << 8 | spi_rspbuf[i+j+1]; + dat_crc = sdspi_crc16((uint8 *)data, datalen); + if (dat_crc != recv_crc) { + sd_err(("%s: Incorrect data CRC: expected 0x%04x, " + "received 0x%04x\n", + __FUNCTION__, dat_crc, recv_crc)); + } + } + } + return SUCCESS; + } + + sd->card_rsp_data = spi_rspbuf[i+4]; + sd->card_rsp_data |= spi_rspbuf[i+3] << 8; + sd->card_rsp_data |= spi_rspbuf[i+2] << 16; + sd->card_rsp_data |= spi_rspbuf[i+1] << 24; + + /* Display trace for byte read */ + if ((cmd == SDIOH_CMD_52) && (GFIELD(cmd_arg, CMD52_RW_FLAG) == 0)) { + sd_trace(("%s: CMD52: Rd F:%d @0x%04x=%02x\n", + __FUNCTION__, + GFIELD(cmd_arg, CMD53_FUNCTION), + GFIELD(cmd_arg, CMD53_REG_ADDR), + sd->card_rsp_data >> 24)); + } + + return SUCCESS; +} + +/* + * On entry: if single-block or non-block, buffer size <= block size. + * If multi-block, buffer size is unlimited. + * Question is how to handle the left-overs in either single- or multi-block. + * I think the caller should break the buffer up so this routine will always + * use block size == buffer size to handle the end piece of the buffer + */ + +static int +sdspi_card_buf(sdioh_info_t *sd, int rw, int func, bool fifo, uint32 addr, int nbytes, uint32 *data) +{ + int status; + uint32 cmd_arg; + uint32 rsp5; + int num_blocks, blocksize; + bool local_blockmode, local_dma; + bool read = rw == SDIOH_READ ? 1 : 0; + + ASSERT(nbytes); + + cmd_arg = 0; + sd_data(("%s: %s 53 func %d, %s, addr 0x%x, len %d bytes, r_cnt %d t_cnt %d\n", + __FUNCTION__, read ? "Rd" : "Wr", func, fifo ? "FIXED" : "INCR", + addr, nbytes, sd->r_cnt, sd->t_cnt)); + + if (read) sd->r_cnt++; else sd->t_cnt++; + + local_blockmode = sd->sd_blockmode; + local_dma = sd->sd_use_dma; + + /* Don't bother with block mode on small xfers */ + if (nbytes < sd->client_block_size[func]) { + sd_info(("setting local blockmode to false: nbytes (%d) != block_size (%d)\n", + nbytes, sd->client_block_size[func])); + local_blockmode = FALSE; + local_dma = FALSE; + } + + if (local_blockmode) { + blocksize = MIN(sd->client_block_size[func], nbytes); + num_blocks = nbytes/blocksize; + cmd_arg = SFIELD(cmd_arg, CMD53_BYTE_BLK_CNT, num_blocks); + cmd_arg = SFIELD(cmd_arg, CMD53_BLK_MODE, 1); + } else { + num_blocks = 1; + blocksize = nbytes; + cmd_arg = SFIELD(cmd_arg, CMD53_BYTE_BLK_CNT, nbytes); + cmd_arg = SFIELD(cmd_arg, CMD53_BLK_MODE, 0); + } + + if (fifo) + cmd_arg = SFIELD(cmd_arg, CMD53_OP_CODE, 0); + else + cmd_arg = SFIELD(cmd_arg, CMD53_OP_CODE, 1); + + cmd_arg = SFIELD(cmd_arg, CMD53_FUNCTION, func); + cmd_arg = SFIELD(cmd_arg, CMD53_REG_ADDR, addr); + if (read) + cmd_arg = SFIELD(cmd_arg, CMD53_RW_FLAG, SDIOH_XFER_TYPE_READ); + else + cmd_arg = SFIELD(cmd_arg, CMD53_RW_FLAG, SDIOH_XFER_TYPE_WRITE); + + sd->data_xfer_count = nbytes; + if ((func == 2) && (fifo == 1)) { + sd_data(("%s: %s 53 func %d, %s, addr 0x%x, len %d bytes, r_cnt %d t_cnt %d\n", + __FUNCTION__, read ? "Rd" : "Wr", func, fifo ? "FIXED" : "INCR", + addr, nbytes, sd->r_cnt, sd->t_cnt)); + } + + /* sdspi_cmd_issue() returns with the command complete bit + * in the ISR already cleared + */ + if ((status = sdspi_cmd_issue(sd, local_dma, + SDIOH_CMD_53, cmd_arg, + data, nbytes)) != SUCCESS) { + sd_err(("%s: cmd_issue failed for %s\n", __FUNCTION__, (read ? "read" : "write"))); + return status; + } + + sdspi_cmd_getrsp(sd, &rsp5, 1); + + if (rsp5 != 0x00) { + sd_err(("%s: rsp5 flags = 0x%x, expecting 0x00\n", + __FUNCTION__, rsp5)); + return ERROR; + } + + return SUCCESS; +} + +static int +set_client_block_size(sdioh_info_t *sd, int func, int block_size) +{ + int base; + int err = 0; + + sd_err(("%s: Setting block size %d, func %d\n", __FUNCTION__, block_size, func)); + sd->client_block_size[func] = block_size; + + /* Set the block size in the SDIO Card register */ + base = func * SDIOD_FBR_SIZE; + err = sdspi_card_regwrite(sd, 0, base + SDIOD_CCCR_BLKSIZE_0, 1, block_size & 0xff); + if (!err) { + err = sdspi_card_regwrite(sd, 0, base + SDIOD_CCCR_BLKSIZE_1, 1, + (block_size >> 8) & 0xff); + } + + /* + * Do not set the block size in the SDIO Host register; that + * is func dependent and will get done on an individual + * transaction basis. + */ + + return (err ? BCME_SDIO_ERROR : 0); +} + +/* Reset and re-initialize the device */ +int +sdioh_sdio_reset(sdioh_info_t *si) +{ + si->card_init_done = FALSE; + return sdspi_client_init(si); +} + +#define CRC7_POLYNOM 0x09 +#define CRC7_CRCHIGHBIT 0x40 + +static uint8 sdspi_crc7(unsigned char* p, uint32 len) +{ + uint8 c, j, bit, crc = 0; + uint32 i; + + for (i = 0; i < len; i++) { + c = *p++; + for (j = 0x80; j; j >>= 1) { + bit = crc & CRC7_CRCHIGHBIT; + crc <<= 1; + if (c & j) bit ^= CRC7_CRCHIGHBIT; + if (bit) crc ^= CRC7_POLYNOM; + } + } + + /* Convert the CRC7 to an 8-bit SD CRC */ + crc = (crc << 1) | 1; + + return (crc); +} + +#define CRC16_POLYNOM 0x1021 +#define CRC16_CRCHIGHBIT 0x8000 + +static uint16 sdspi_crc16(unsigned char* p, uint32 len) +{ + uint32 i; + uint16 j, c, bit; + uint16 crc = 0; + + for (i = 0; i < len; i++) { + c = *p++; + for (j = 0x80; j; j >>= 1) { + bit = crc & CRC16_CRCHIGHBIT; + crc <<= 1; + if (c & j) bit ^= CRC16_CRCHIGHBIT; + if (bit) crc ^= CRC16_POLYNOM; + } + } + + return (crc); +} diff --git a/drivers/net/wireless/bcm4329/bcmsdspi_linux.c b/drivers/net/wireless/bcm4329/bcmsdspi_linux.c new file mode 100644 index 000000000000..e2e0ca6abe46 --- /dev/null +++ b/drivers/net/wireless/bcm4329/bcmsdspi_linux.c @@ -0,0 +1,252 @@ +/* + * Broadcom SPI Host Controller Driver - Linux Per-port + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: bcmsdspi_linux.c,v 1.7.2.1.4.3 2008/06/30 21:09:36 Exp $ + */ + +#include <typedefs.h> +#include <pcicfg.h> +#include <bcmutils.h> + +#include <sdio.h> /* SDIO Specs */ +#include <bcmsdbus.h> /* bcmsdh to/from specific controller APIs */ +#include <sdiovar.h> /* to get msglevel bit values */ + +#include <linux/sched.h> /* request_irq(), free_irq() */ + +#include <bcmsdspi.h> +#include <bcmspi.h> + +extern uint sd_crc; +module_param(sd_crc, uint, 0); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) +#define KERNEL26 +#endif + +struct sdos_info { + sdioh_info_t *sd; + spinlock_t lock; + wait_queue_head_t intr_wait_queue; +}; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) +#define BLOCKABLE() (!in_atomic()) +#else +#define BLOCKABLE() (!in_interrupt()) +#endif + +/* Interrupt handler */ +static irqreturn_t +sdspi_isr(int irq, void *dev_id +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) +, struct pt_regs *ptregs +#endif +) +{ + sdioh_info_t *sd; + struct sdos_info *sdos; + bool ours; + + sd = (sdioh_info_t *)dev_id; + sd->local_intrcount++; + + if (!sd->card_init_done) { + sd_err(("%s: Hey Bogus intr...not even initted: irq %d\n", __FUNCTION__, irq)); + return IRQ_RETVAL(FALSE); + } else { + ours = spi_check_client_intr(sd, NULL); + + /* For local interrupts, wake the waiting process */ + if (ours && sd->got_hcint) { + sdos = (struct sdos_info *)sd->sdos_info; + wake_up_interruptible(&sdos->intr_wait_queue); + } + + return IRQ_RETVAL(ours); + } +} + +/* Register with Linux for interrupts */ +int +spi_register_irq(sdioh_info_t *sd, uint irq) +{ + sd_trace(("Entering %s: irq == %d\n", __FUNCTION__, irq)); + if (request_irq(irq, sdspi_isr, IRQF_SHARED, "bcmsdspi", sd) < 0) { + sd_err(("%s: request_irq() failed\n", __FUNCTION__)); + return ERROR; + } + return SUCCESS; +} + +/* Free Linux irq */ +void +spi_free_irq(uint irq, sdioh_info_t *sd) +{ + free_irq(irq, sd); +} + +/* Map Host controller registers */ + +uint32 * +spi_reg_map(osl_t *osh, uintptr addr, int size) +{ + return (uint32 *)REG_MAP(addr, size); +} + +void +spi_reg_unmap(osl_t *osh, uintptr addr, int size) +{ + REG_UNMAP((void*)(uintptr)addr); +} + +int +spi_osinit(sdioh_info_t *sd) +{ + struct sdos_info *sdos; + + sdos = (struct sdos_info*)MALLOC(sd->osh, sizeof(struct sdos_info)); + sd->sdos_info = (void*)sdos; + if (sdos == NULL) + return BCME_NOMEM; + + sdos->sd = sd; + spin_lock_init(&sdos->lock); + init_waitqueue_head(&sdos->intr_wait_queue); + return BCME_OK; +} + +void +spi_osfree(sdioh_info_t *sd) +{ + struct sdos_info *sdos; + ASSERT(sd && sd->sdos_info); + + sdos = (struct sdos_info *)sd->sdos_info; + MFREE(sd->osh, sdos, sizeof(struct sdos_info)); +} + +/* Interrupt enable/disable */ +SDIOH_API_RC +sdioh_interrupt_set(sdioh_info_t *sd, bool enable) +{ + ulong flags; + struct sdos_info *sdos; + + sd_trace(("%s: %s\n", __FUNCTION__, enable ? "Enabling" : "Disabling")); + + sdos = (struct sdos_info *)sd->sdos_info; + ASSERT(sdos); + + if (!(sd->host_init_done && sd->card_init_done)) { + sd_err(("%s: Card & Host are not initted - bailing\n", __FUNCTION__)); + return SDIOH_API_RC_FAIL; + } + + if (enable && !(sd->intr_handler && sd->intr_handler_arg)) { + sd_err(("%s: no handler registered, will not enable\n", __FUNCTION__)); + return SDIOH_API_RC_FAIL; + } + + /* Ensure atomicity for enable/disable calls */ + spin_lock_irqsave(&sdos->lock, flags); + + sd->client_intr_enabled = enable; + if (enable && !sd->lockcount) + spi_devintr_on(sd); + else + spi_devintr_off(sd); + + spin_unlock_irqrestore(&sdos->lock, flags); + + return SDIOH_API_RC_SUCCESS; +} + +/* Protect against reentrancy (disable device interrupts while executing) */ +void +spi_lock(sdioh_info_t *sd) +{ + ulong flags; + struct sdos_info *sdos; + + sdos = (struct sdos_info *)sd->sdos_info; + ASSERT(sdos); + + sd_trace(("%s: %d\n", __FUNCTION__, sd->lockcount)); + + spin_lock_irqsave(&sdos->lock, flags); + if (sd->lockcount) { + sd_err(("%s: Already locked!\n", __FUNCTION__)); + ASSERT(sd->lockcount == 0); + } + spi_devintr_off(sd); + sd->lockcount++; + spin_unlock_irqrestore(&sdos->lock, flags); +} + +/* Enable client interrupt */ +void +spi_unlock(sdioh_info_t *sd) +{ + ulong flags; + struct sdos_info *sdos; + + sd_trace(("%s: %d, %d\n", __FUNCTION__, sd->lockcount, sd->client_intr_enabled)); + ASSERT(sd->lockcount > 0); + + sdos = (struct sdos_info *)sd->sdos_info; + ASSERT(sdos); + + spin_lock_irqsave(&sdos->lock, flags); + if (--sd->lockcount == 0 && sd->client_intr_enabled) { + spi_devintr_on(sd); + } + spin_unlock_irqrestore(&sdos->lock, flags); +} + +void spi_waitbits(sdioh_info_t *sd, bool yield) +{ + struct sdos_info *sdos; + + sdos = (struct sdos_info *)sd->sdos_info; + +#ifndef BCMSDYIELD + ASSERT(!yield); +#endif + sd_trace(("%s: yield %d canblock %d\n", + __FUNCTION__, yield, BLOCKABLE())); + + /* Clear the "interrupt happened" flag and last intrstatus */ + sd->got_hcint = FALSE; + +#ifdef BCMSDYIELD + if (yield && BLOCKABLE()) { + /* Wait for the indication, the interrupt will be masked when the ISR fires. */ + wait_event_interruptible(sdos->intr_wait_queue, (sd->got_hcint)); + } else +#endif /* BCMSDYIELD */ + { + spi_spinbits(sd); + } + +} diff --git a/drivers/net/wireless/bcm4329/bcmsdstd.c b/drivers/net/wireless/bcm4329/bcmsdstd.c new file mode 100644 index 000000000000..0ca1f8ff8a24 --- /dev/null +++ b/drivers/net/wireless/bcm4329/bcmsdstd.c @@ -0,0 +1,3127 @@ +/* + * 'Standard' SDIO HOST CONTROLLER driver + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: bcmsdstd.c,v 1.64.4.1.4.4.2.18 2010/08/17 17:00:48 Exp $ + */ + +#include <typedefs.h> + +#include <bcmdevs.h> +#include <bcmendian.h> +#include <bcmutils.h> +#include <osl.h> +#include <siutils.h> +#include <sdio.h> /* SDIO Device and Protocol Specs */ +#include <sdioh.h> /* SDIO Host Controller Specification */ +#include <bcmsdbus.h> /* bcmsdh to/from specific controller APIs */ +#include <sdiovar.h> /* ioctl/iovars */ +#include <pcicfg.h> + + +#define SD_PAGE_BITS 12 +#define SD_PAGE (1 << SD_PAGE_BITS) + +#include <bcmsdstd.h> + +/* Globals */ +uint sd_msglevel = SDH_ERROR_VAL; +uint sd_hiok = TRUE; /* Use hi-speed mode if available? */ +uint sd_sdmode = SDIOH_MODE_SD4; /* Use SD4 mode by default */ +uint sd_f2_blocksize = 64; /* Default blocksize */ + +#ifdef BCMSDYIELD +bool sd_yieldcpu = TRUE; /* Allow CPU yielding for buffer requests */ +uint sd_minyield = 0; /* Minimum xfer size to allow CPU yield */ +bool sd_forcerb = FALSE; /* Force sync readback in intrs_on/off */ +#endif + +uint sd_divisor = 2; /* Default 48MHz/2 = 24MHz */ + +uint sd_power = 1; /* Default to SD Slot powered ON */ +uint sd_clock = 1; /* Default to SD Clock turned ON */ +uint sd_pci_slot = 0xFFFFffff; /* Used to force selection of a particular PCI slot */ +uint8 sd_dma_mode = DMA_MODE_SDMA; /* Default to SDMA for now */ + +uint sd_toctl = 7; + +static bool trap_errs = FALSE; + +static const char *dma_mode_description[] = { "PIO", "SDMA", "ADMA1", "32b ADMA2", "64b ADMA2" }; + +/* Prototypes */ +static bool sdstd_start_clock(sdioh_info_t *sd, uint16 divisor); +static bool sdstd_start_power(sdioh_info_t *sd); +static bool sdstd_bus_width(sdioh_info_t *sd, int width); +static int sdstd_set_highspeed_mode(sdioh_info_t *sd, bool HSMode); +static int sdstd_set_dma_mode(sdioh_info_t *sd, int8 dma_mode); +static int sdstd_card_enablefuncs(sdioh_info_t *sd); +static void sdstd_cmd_getrsp(sdioh_info_t *sd, uint32 *rsp_buffer, int count); +static int sdstd_cmd_issue(sdioh_info_t *sd, bool use_dma, uint32 cmd, uint32 arg); +static int sdstd_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, + int regsize, uint32 *data); +static int sdstd_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, + int regsize, uint32 data); +static int sdstd_driver_init(sdioh_info_t *sd); +static bool sdstd_reset(sdioh_info_t *sd, bool host_reset, bool client_reset); +static int sdstd_card_buf(sdioh_info_t *sd, int rw, int func, bool fifo, + uint32 addr, int nbytes, uint32 *data); +static int sdstd_abort(sdioh_info_t *sd, uint func); +static int sdstd_check_errs(sdioh_info_t *sdioh_info, uint32 cmd, uint32 arg); +static int set_client_block_size(sdioh_info_t *sd, int func, int blocksize); +static void sd_map_dma(sdioh_info_t * sd); +static void sd_unmap_dma(sdioh_info_t * sd); +static void sd_clear_adma_dscr_buf(sdioh_info_t *sd); +static void sd_fill_dma_data_buf(sdioh_info_t *sd, uint8 data); +static void sd_create_adma_descriptor(sdioh_info_t *sd, + uint32 index, uint32 addr_phys, + uint16 length, uint16 flags); +static void sd_dump_adma_dscr(sdioh_info_t *sd); +static void sdstd_dumpregs(sdioh_info_t *sd); + + +/* + * Private register access routines. + */ + +/* 16 bit PCI regs */ + +extern uint16 sdstd_rreg16(sdioh_info_t *sd, uint reg); +uint16 +sdstd_rreg16(sdioh_info_t *sd, uint reg) +{ + + volatile uint16 data = *(volatile uint16 *)(sd->mem_space + reg); + sd_ctrl(("16: R Reg 0x%02x, Data 0x%x\n", reg, data)); + return data; +} + +extern void sdstd_wreg16(sdioh_info_t *sd, uint reg, uint16 data); +void +sdstd_wreg16(sdioh_info_t *sd, uint reg, uint16 data) +{ + *(volatile uint16 *)(sd->mem_space + reg) = (uint16)data; + sd_ctrl(("16: W Reg 0x%02x, Data 0x%x\n", reg, data)); +} + +static void +sdstd_or_reg16(sdioh_info_t *sd, uint reg, uint16 val) +{ + volatile uint16 data = *(volatile uint16 *)(sd->mem_space + reg); + sd_ctrl(("16: OR Reg 0x%02x, Val 0x%x\n", reg, val)); + data |= val; + *(volatile uint16 *)(sd->mem_space + reg) = (uint16)data; + +} +static void +sdstd_mod_reg16(sdioh_info_t *sd, uint reg, int16 mask, uint16 val) +{ + + volatile uint16 data = *(volatile uint16 *)(sd->mem_space + reg); + sd_ctrl(("16: MOD Reg 0x%02x, Mask 0x%x, Val 0x%x\n", reg, mask, val)); + data &= ~mask; + data |= (val & mask); + *(volatile uint16 *)(sd->mem_space + reg) = (uint16)data; +} + + +/* 32 bit PCI regs */ +static uint32 +sdstd_rreg(sdioh_info_t *sd, uint reg) +{ + volatile uint32 data = *(volatile uint32 *)(sd->mem_space + reg); + sd_ctrl(("32: R Reg 0x%02x, Data 0x%x\n", reg, data)); + return data; +} +static inline void +sdstd_wreg(sdioh_info_t *sd, uint reg, uint32 data) +{ + *(volatile uint32 *)(sd->mem_space + reg) = (uint32)data; + sd_ctrl(("32: W Reg 0x%02x, Data 0x%x\n", reg, data)); + +} + +/* 8 bit PCI regs */ +static inline void +sdstd_wreg8(sdioh_info_t *sd, uint reg, uint8 data) +{ + *(volatile uint8 *)(sd->mem_space + reg) = (uint8)data; + sd_ctrl(("08: W Reg 0x%02x, Data 0x%x\n", reg, data)); +} +static uint8 +sdstd_rreg8(sdioh_info_t *sd, uint reg) +{ + volatile uint8 data = *(volatile uint8 *)(sd->mem_space + reg); + sd_ctrl(("08: R Reg 0x%02x, Data 0x%x\n", reg, data)); + return data; +} + +/* + * Private work routines + */ + +sdioh_info_t *glob_sd; + +/* + * Public entry points & extern's + */ +extern sdioh_info_t * +sdioh_attach(osl_t *osh, void *bar0, uint irq) +{ + sdioh_info_t *sd; + + sd_trace(("%s\n", __FUNCTION__)); + if ((sd = (sdioh_info_t *)MALLOC(osh, sizeof(sdioh_info_t))) == NULL) { + sd_err(("sdioh_attach: out of memory, malloced %d bytes\n", MALLOCED(osh))); + return NULL; + } + bzero((char *)sd, sizeof(sdioh_info_t)); + glob_sd = sd; + sd->osh = osh; + if (sdstd_osinit(sd) != 0) { + sd_err(("%s:sdstd_osinit() failed\n", __FUNCTION__)); + MFREE(sd->osh, sd, sizeof(sdioh_info_t)); + return NULL; + } + sd->mem_space = (volatile char *)sdstd_reg_map(osh, (uintptr)bar0, SDIOH_REG_WINSZ); + sd_init_dma(sd); + sd->irq = irq; + if (sd->mem_space == NULL) { + sd_err(("%s:ioremap() failed\n", __FUNCTION__)); + sdstd_osfree(sd); + MFREE(sd->osh, sd, sizeof(sdioh_info_t)); + return NULL; + } + sd_info(("%s:sd->mem_space = %p\n", __FUNCTION__, sd->mem_space)); + sd->intr_handler = NULL; + sd->intr_handler_arg = NULL; + sd->intr_handler_valid = FALSE; + + /* Set defaults */ + sd->sd_blockmode = TRUE; + sd->use_client_ints = TRUE; + sd->sd_dma_mode = sd_dma_mode; + + if (!sd->sd_blockmode) + sd->sd_dma_mode = DMA_MODE_NONE; + + if (sdstd_driver_init(sd) != SUCCESS) { + /* If host CPU was reset without resetting SD bus or + SD device, the device will still have its RCA but + driver no longer knows what it is (since driver has been restarted). + go through once to clear the RCA and a gain reassign it. + */ + sd_info(("driver_init failed - Reset RCA and try again\n")); + if (sdstd_driver_init(sd) != SUCCESS) { + sd_err(("%s:driver_init() failed()\n", __FUNCTION__)); + if (sd->mem_space) { + sdstd_reg_unmap(osh, (uintptr)sd->mem_space, SDIOH_REG_WINSZ); + sd->mem_space = NULL; + } + sdstd_osfree(sd); + MFREE(sd->osh, sd, sizeof(sdioh_info_t)); + return (NULL); + } + } + + OSL_DMADDRWIDTH(osh, 32); + + /* Always map DMA buffers, so we can switch between DMA modes. */ + sd_map_dma(sd); + + if (sdstd_register_irq(sd, irq) != SUCCESS) { + sd_err(("%s: sdstd_register_irq() failed for irq = %d\n", __FUNCTION__, irq)); + sdstd_free_irq(sd->irq, sd); + if (sd->mem_space) { + sdstd_reg_unmap(osh, (uintptr)sd->mem_space, SDIOH_REG_WINSZ); + sd->mem_space = NULL; + } + + sdstd_osfree(sd); + MFREE(sd->osh, sd, sizeof(sdioh_info_t)); + return (NULL); + } + + sd_trace(("%s: Done\n", __FUNCTION__)); + return sd; +} + +extern SDIOH_API_RC +sdioh_detach(osl_t *osh, sdioh_info_t *sd) +{ + sd_trace(("%s\n", __FUNCTION__)); + if (sd) { + sd_unmap_dma(sd); + sdstd_wreg16(sd, SD_IntrSignalEnable, 0); + sd_trace(("%s: freeing irq %d\n", __FUNCTION__, sd->irq)); + sdstd_free_irq(sd->irq, sd); + if (sd->card_init_done) + sdstd_reset(sd, 1, 1); + if (sd->mem_space) { + sdstd_reg_unmap(osh, (uintptr)sd->mem_space, SDIOH_REG_WINSZ); + sd->mem_space = NULL; + } + + sdstd_osfree(sd); + MFREE(sd->osh, sd, sizeof(sdioh_info_t)); + } + return SDIOH_API_RC_SUCCESS; +} + +/* Configure callback to client when we receive client interrupt */ +extern SDIOH_API_RC +sdioh_interrupt_register(sdioh_info_t *sd, sdioh_cb_fn_t fn, void *argh) +{ + sd_trace(("%s: Entering\n", __FUNCTION__)); + sd->intr_handler = fn; + sd->intr_handler_arg = argh; + sd->intr_handler_valid = TRUE; + return SDIOH_API_RC_SUCCESS; +} + +extern SDIOH_API_RC +sdioh_interrupt_deregister(sdioh_info_t *sd) +{ + sd_trace(("%s: Entering\n", __FUNCTION__)); + sd->intr_handler_valid = FALSE; + sd->intr_handler = NULL; + sd->intr_handler_arg = NULL; + return SDIOH_API_RC_SUCCESS; +} + +extern SDIOH_API_RC +sdioh_interrupt_query(sdioh_info_t *sd, bool *onoff) +{ + sd_trace(("%s: Entering\n", __FUNCTION__)); + *onoff = sd->client_intr_enabled; + return SDIOH_API_RC_SUCCESS; +} + +#if defined(DHD_DEBUG) +extern bool +sdioh_interrupt_pending(sdioh_info_t *sd) +{ + uint16 intrstatus; + intrstatus = sdstd_rreg16(sd, SD_IntrStatus); + return !!(intrstatus & CLIENT_INTR); +} +#endif + +uint +sdioh_query_iofnum(sdioh_info_t *sd) +{ + return sd->num_funcs; +} + +/* IOVar table */ +enum { + IOV_MSGLEVEL = 1, + IOV_BLOCKMODE, + IOV_BLOCKSIZE, + IOV_DMA, + IOV_USEINTS, + IOV_NUMINTS, + IOV_NUMLOCALINTS, + IOV_HOSTREG, + IOV_DEVREG, + IOV_DIVISOR, + IOV_SDMODE, + IOV_HISPEED, + IOV_HCIREGS, + IOV_POWER, + IOV_YIELDCPU, + IOV_MINYIELD, + IOV_FORCERB, + IOV_CLOCK +}; + +const bcm_iovar_t sdioh_iovars[] = { + {"sd_msglevel", IOV_MSGLEVEL, 0, IOVT_UINT32, 0 }, + {"sd_blockmode", IOV_BLOCKMODE, 0, IOVT_BOOL, 0 }, + {"sd_blocksize", IOV_BLOCKSIZE, 0, IOVT_UINT32, 0 }, /* ((fn << 16) | size) */ + {"sd_dma", IOV_DMA, 0, IOVT_UINT32, 0 }, +#ifdef BCMSDYIELD + {"sd_yieldcpu", IOV_YIELDCPU, 0, IOVT_BOOL, 0 }, + {"sd_minyield", IOV_MINYIELD, 0, IOVT_UINT32, 0 }, + {"sd_forcerb", IOV_FORCERB, 0, IOVT_BOOL, 0 }, +#endif + {"sd_ints", IOV_USEINTS, 0, IOVT_BOOL, 0 }, + {"sd_numints", IOV_NUMINTS, 0, IOVT_UINT32, 0 }, + {"sd_numlocalints", IOV_NUMLOCALINTS, 0, IOVT_UINT32, 0 }, + {"sd_hostreg", IOV_HOSTREG, 0, IOVT_BUFFER, sizeof(sdreg_t) }, + {"sd_devreg", IOV_DEVREG, 0, IOVT_BUFFER, sizeof(sdreg_t) }, + {"sd_divisor", IOV_DIVISOR, 0, IOVT_UINT32, 0 }, + {"sd_power", IOV_POWER, 0, IOVT_UINT32, 0 }, + {"sd_clock", IOV_CLOCK, 0, IOVT_UINT32, 0 }, + {"sd_mode", IOV_SDMODE, 0, IOVT_UINT32, 100}, + {"sd_highspeed", IOV_HISPEED, 0, IOVT_UINT32, 0}, + {NULL, 0, 0, 0, 0 } +}; + +int +sdioh_iovar_op(sdioh_info_t *si, const char *name, + void *params, int plen, void *arg, int len, bool set) +{ + const bcm_iovar_t *vi = NULL; + int bcmerror = 0; + int val_size; + int32 int_val = 0; + bool bool_val; + uint32 actionid; + + ASSERT(name); + ASSERT(len >= 0); + + /* Get must have return space; Set does not take qualifiers */ + ASSERT(set || (arg && len)); + ASSERT(!set || (!params && !plen)); + + sd_trace(("%s: Enter (%s %s)\n", __FUNCTION__, (set ? "set" : "get"), name)); + + if ((vi = bcm_iovar_lookup(sdioh_iovars, name)) == NULL) { + bcmerror = BCME_UNSUPPORTED; + goto exit; + } + + if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, set)) != 0) + goto exit; + + /* Set up params so get and set can share the convenience variables */ + if (params == NULL) { + params = arg; + plen = len; + } + + if (vi->type == IOVT_VOID) + val_size = 0; + else if (vi->type == IOVT_BUFFER) + val_size = len; + else + val_size = sizeof(int); + + if (plen >= (int)sizeof(int_val)) + bcopy(params, &int_val, sizeof(int_val)); + + bool_val = (int_val != 0) ? TRUE : FALSE; + + actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid); + switch (actionid) { + case IOV_GVAL(IOV_MSGLEVEL): + int_val = (int32)sd_msglevel; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_MSGLEVEL): + sd_msglevel = int_val; + break; + + case IOV_GVAL(IOV_BLOCKMODE): + int_val = (int32)si->sd_blockmode; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_BLOCKMODE): + si->sd_blockmode = (bool)int_val; + /* Haven't figured out how to make non-block mode with DMA */ + if (!si->sd_blockmode) + si->sd_dma_mode = DMA_MODE_NONE; + break; + +#ifdef BCMSDYIELD + case IOV_GVAL(IOV_YIELDCPU): + int_val = sd_yieldcpu; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_YIELDCPU): + sd_yieldcpu = (bool)int_val; + break; + + case IOV_GVAL(IOV_MINYIELD): + int_val = sd_minyield; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_MINYIELD): + sd_minyield = (bool)int_val; + break; + + case IOV_GVAL(IOV_FORCERB): + int_val = sd_forcerb; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_FORCERB): + sd_forcerb = (bool)int_val; + break; +#endif /* BCMSDYIELD */ + + case IOV_GVAL(IOV_BLOCKSIZE): + if ((uint32)int_val > si->num_funcs) { + bcmerror = BCME_BADARG; + break; + } + int_val = (int32)si->client_block_size[int_val]; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_BLOCKSIZE): + { + uint func = ((uint32)int_val >> 16); + uint blksize = (uint16)int_val; + uint maxsize; + + if (func > si->num_funcs) { + bcmerror = BCME_BADARG; + break; + } + + switch (func) { + case 0: maxsize = 32; break; + case 1: maxsize = BLOCK_SIZE_4318; break; + case 2: maxsize = BLOCK_SIZE_4328; break; + default: maxsize = 0; + } + if (blksize > maxsize) { + bcmerror = BCME_BADARG; + break; + } + if (!blksize) { + blksize = maxsize; + } + + /* Now set it */ + sdstd_lock(si); + bcmerror = set_client_block_size(si, func, blksize); + sdstd_unlock(si); + break; + } + + case IOV_GVAL(IOV_DMA): + int_val = (int32)si->sd_dma_mode; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_DMA): + si->sd_dma_mode = (char)int_val; + sdstd_set_dma_mode(si, si->sd_dma_mode); + break; + + case IOV_GVAL(IOV_USEINTS): + int_val = (int32)si->use_client_ints; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_USEINTS): + si->use_client_ints = (bool)int_val; + if (si->use_client_ints) + si->intmask |= CLIENT_INTR; + else + si->intmask &= ~CLIENT_INTR; + break; + + case IOV_GVAL(IOV_DIVISOR): + int_val = (uint32)sd_divisor; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_DIVISOR): + sd_divisor = int_val; + if (!sdstd_start_clock(si, (uint16)sd_divisor)) { + sd_err(("set clock failed!\n")); + bcmerror = BCME_ERROR; + } + break; + + case IOV_GVAL(IOV_POWER): + int_val = (uint32)sd_power; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_POWER): + sd_power = int_val; + if (sd_power == 1) { + if (sdstd_driver_init(si) != SUCCESS) { + sd_err(("set SD Slot power failed!\n")); + bcmerror = BCME_ERROR; + } else { + sd_err(("SD Slot Powered ON.\n")); + } + } else { + uint8 pwr = 0; + + pwr = SFIELD(pwr, PWR_BUS_EN, 0); + sdstd_wreg8(si, SD_PwrCntrl, pwr); /* Set Voltage level */ + sd_err(("SD Slot Powered OFF.\n")); + } + break; + + case IOV_GVAL(IOV_CLOCK): + int_val = (uint32)sd_clock; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_CLOCK): + sd_clock = int_val; + if (sd_clock == 1) { + sd_info(("SD Clock turned ON.\n")); + if (!sdstd_start_clock(si, (uint16)sd_divisor)) { + sd_err(("sdstd_start_clock failed\n")); + bcmerror = BCME_ERROR; + } + } else { + /* turn off HC clock */ + sdstd_wreg16(si, SD_ClockCntrl, + sdstd_rreg16(si, SD_ClockCntrl) & ~((uint16)0x4)); + + sd_info(("SD Clock turned OFF.\n")); + } + break; + + case IOV_GVAL(IOV_SDMODE): + int_val = (uint32)sd_sdmode; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_SDMODE): + sd_sdmode = int_val; + + if (!sdstd_bus_width(si, sd_sdmode)) { + sd_err(("sdstd_bus_width failed\n")); + bcmerror = BCME_ERROR; + } + break; + + case IOV_GVAL(IOV_HISPEED): + int_val = (uint32)sd_hiok; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_HISPEED): + sd_hiok = int_val; + bcmerror = sdstd_set_highspeed_mode(si, (bool)sd_hiok); + break; + + case IOV_GVAL(IOV_NUMINTS): + int_val = (int32)si->intrcount; + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_NUMLOCALINTS): + int_val = (int32)si->local_intrcount; + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_HOSTREG): + { + sdreg_t *sd_ptr = (sdreg_t *)params; + + if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD_MaxCurCap) { + sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset)); + bcmerror = BCME_BADARG; + break; + } + + sd_trace(("%s: rreg%d at offset %d\n", __FUNCTION__, + (sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32), + sd_ptr->offset)); + if (sd_ptr->offset & 1) + int_val = sdstd_rreg8(si, sd_ptr->offset); + else if (sd_ptr->offset & 2) + int_val = sdstd_rreg16(si, sd_ptr->offset); + else + int_val = sdstd_rreg(si, sd_ptr->offset); + + bcopy(&int_val, arg, sizeof(int_val)); + break; + } + + case IOV_SVAL(IOV_HOSTREG): + { + sdreg_t *sd_ptr = (sdreg_t *)params; + + if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD_MaxCurCap) { + sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset)); + bcmerror = BCME_BADARG; + break; + } + + sd_trace(("%s: wreg%d value 0x%08x at offset %d\n", __FUNCTION__, sd_ptr->value, + (sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32), + sd_ptr->offset)); + if (sd_ptr->offset & 1) + sdstd_wreg8(si, sd_ptr->offset, (uint8)sd_ptr->value); + else if (sd_ptr->offset & 2) + sdstd_wreg16(si, sd_ptr->offset, (uint16)sd_ptr->value); + else + sdstd_wreg(si, sd_ptr->offset, (uint32)sd_ptr->value); + + break; + } + + case IOV_GVAL(IOV_DEVREG): + { + sdreg_t *sd_ptr = (sdreg_t *)params; + uint8 data; + + if (sdioh_cfg_read(si, sd_ptr->func, sd_ptr->offset, &data)) { + bcmerror = BCME_SDIO_ERROR; + break; + } + + int_val = (int)data; + bcopy(&int_val, arg, sizeof(int_val)); + break; + } + + case IOV_SVAL(IOV_DEVREG): + { + sdreg_t *sd_ptr = (sdreg_t *)params; + uint8 data = (uint8)sd_ptr->value; + + if (sdioh_cfg_write(si, sd_ptr->func, sd_ptr->offset, &data)) { + bcmerror = BCME_SDIO_ERROR; + break; + } + break; + } + + + default: + bcmerror = BCME_UNSUPPORTED; + break; + } +exit: + + return bcmerror; +} + +extern SDIOH_API_RC +sdioh_cfg_read(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data) +{ + SDIOH_API_RC status; + /* No lock needed since sdioh_request_byte does locking */ + status = sdioh_request_byte(sd, SDIOH_READ, fnc_num, addr, data); + return status; +} + +extern SDIOH_API_RC +sdioh_cfg_write(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data) +{ + /* No lock needed since sdioh_request_byte does locking */ + SDIOH_API_RC status; + status = sdioh_request_byte(sd, SDIOH_WRITE, fnc_num, addr, data); + return status; +} + +extern SDIOH_API_RC +sdioh_cis_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 length) +{ + uint32 count; + int offset; + uint32 foo; + uint8 *cis = cisd; + + sd_trace(("%s: Func = %d\n", __FUNCTION__, func)); + + if (!sd->func_cis_ptr[func]) { + bzero(cis, length); + return SDIOH_API_RC_FAIL; + } + + sdstd_lock(sd); + *cis = 0; + for (count = 0; count < length; count++) { + offset = sd->func_cis_ptr[func] + count; + if (sdstd_card_regread(sd, 0, offset, 1, &foo)) { + sd_err(("%s: regread failed: Can't read CIS\n", __FUNCTION__)); + sdstd_unlock(sd); + return SDIOH_API_RC_FAIL; + } + *cis = (uint8)(foo & 0xff); + cis++; + } + sdstd_unlock(sd); + return SDIOH_API_RC_SUCCESS; +} + +extern SDIOH_API_RC +sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr, uint8 *byte) +{ + int status; + uint32 cmd_arg; + uint32 rsp5; + + sdstd_lock(sd); + cmd_arg = 0; + cmd_arg = SFIELD(cmd_arg, CMD52_FUNCTION, func); + cmd_arg = SFIELD(cmd_arg, CMD52_REG_ADDR, regaddr); + cmd_arg = SFIELD(cmd_arg, CMD52_RW_FLAG, rw == SDIOH_READ ? 0 : 1); + cmd_arg = SFIELD(cmd_arg, CMD52_RAW, 0); + cmd_arg = SFIELD(cmd_arg, CMD52_DATA, rw == SDIOH_READ ? 0 : *byte); + + if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_52, cmd_arg)) != SUCCESS) { + sdstd_unlock(sd); + return status; + } + + sdstd_cmd_getrsp(sd, &rsp5, 1); + if (sdstd_rreg16 (sd, SD_ErrorIntrStatus) != 0) { + sd_err(("%s: 1: ErrorintrStatus 0x%x\n", + __FUNCTION__, sdstd_rreg16(sd, SD_ErrorIntrStatus))); + } + if (GFIELD(rsp5, RSP5_FLAGS) != 0x10) + sd_err(("%s: rsp5 flags is 0x%x\t %d\n", + __FUNCTION__, GFIELD(rsp5, RSP5_FLAGS), func)); + + if (GFIELD(rsp5, RSP5_STUFF)) + sd_err(("%s: rsp5 stuff is 0x%x: should be 0\n", + __FUNCTION__, GFIELD(rsp5, RSP5_STUFF))); + + if (rw == SDIOH_READ) + *byte = GFIELD(rsp5, RSP5_DATA); + + sdstd_unlock(sd); + return SDIOH_API_RC_SUCCESS; +} + +extern SDIOH_API_RC +sdioh_request_word(sdioh_info_t *sd, uint cmd_type, uint rw, uint func, uint addr, + uint32 *word, uint nbytes) +{ + int status; + bool swap = FALSE; + + sdstd_lock(sd); + + if (rw == SDIOH_READ) { + status = sdstd_card_regread(sd, func, addr, nbytes, word); + if (swap) + *word = BCMSWAP32(*word); + } else { + if (swap) + *word = BCMSWAP32(*word); + status = sdstd_card_regwrite(sd, func, addr, nbytes, *word); + } + + sdstd_unlock(sd); + return (status == SUCCESS ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL); +} + +extern SDIOH_API_RC +sdioh_request_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint rw, uint func, + uint addr, uint reg_width, uint buflen_u, uint8 *buffer, void *pkt) +{ + int len; + int buflen = (int)buflen_u; + bool fifo = (fix_inc == SDIOH_DATA_FIX); + uint8 *localbuf = NULL, *tmpbuf = NULL; + uint tmplen = 0; + bool local_blockmode = sd->sd_blockmode; + + sdstd_lock(sd); + + ASSERT(reg_width == 4); + ASSERT(buflen_u < (1 << 30)); + ASSERT(sd->client_block_size[func]); + + sd_data(("%s: %c len %d r_cnt %d t_cnt %d, pkt @0x%p\n", + __FUNCTION__, rw == SDIOH_READ ? 'R' : 'W', + buflen_u, sd->r_cnt, sd->t_cnt, pkt)); + + /* Break buffer down into blocksize chunks: + * Bytemode: 1 block at a time. + * Blockmode: Multiples of blocksizes at a time w/ max of SD_PAGE. + * Both: leftovers are handled last (will be sent via bytemode). + */ + while (buflen > 0) { + if (local_blockmode) { + /* Max xfer is Page size */ + len = MIN(SD_PAGE, buflen); + + /* Round down to a block boundry */ + if (buflen > sd->client_block_size[func]) + len = (len/sd->client_block_size[func]) * + sd->client_block_size[func]; + if ((func == SDIO_FUNC_1) && ((len % 4) == 3) && (rw == SDIOH_WRITE)) { + tmplen = len; + sd_err(("%s: Rounding up buffer to mod4 length.\n", __FUNCTION__)); + len++; + tmpbuf = buffer; + if ((localbuf = (uint8 *)MALLOC(sd->osh, len)) == NULL) { + sd_err(("out of memory, malloced %d bytes\n", + MALLOCED(sd->osh))); + sdstd_unlock(sd); + return SDIOH_API_RC_FAIL; + } + bcopy(buffer, localbuf, len); + buffer = localbuf; + } + } else { + /* Byte mode: One block at a time */ + len = MIN(sd->client_block_size[func], buflen); + } + + if (sdstd_card_buf(sd, rw, func, fifo, addr, len, (uint32 *)buffer) != SUCCESS) { + sdstd_unlock(sd); + return SDIOH_API_RC_FAIL; + } + + if (local_blockmode) { + if ((func == SDIO_FUNC_1) && ((tmplen % 4) == 3) && (rw == SDIOH_WRITE)) { + if (localbuf) + MFREE(sd->osh, localbuf, len); + len--; + buffer = tmpbuf; + sd_err(("%s: Restoring back buffer ptr and len.\n", __FUNCTION__)); + } + } + + buffer += len; + buflen -= len; + if (!fifo) + addr += len; + } + sdstd_unlock(sd); + return SDIOH_API_RC_SUCCESS; +} + +static +int sdstd_abort(sdioh_info_t *sd, uint func) +{ + int err = 0; + int retries; + + uint16 cmd_reg; + uint32 cmd_arg; + uint32 rsp5; + uint8 rflags; + + uint16 int_reg = 0; + uint16 plain_intstatus; + + /* Argument is write to F0 (CCCR) IOAbort with function number */ + cmd_arg = 0; + cmd_arg = SFIELD(cmd_arg, CMD52_FUNCTION, SDIO_FUNC_0); + cmd_arg = SFIELD(cmd_arg, CMD52_REG_ADDR, SDIOD_CCCR_IOABORT); + cmd_arg = SFIELD(cmd_arg, CMD52_RW_FLAG, SD_IO_OP_WRITE); + cmd_arg = SFIELD(cmd_arg, CMD52_RAW, 0); + cmd_arg = SFIELD(cmd_arg, CMD52_DATA, func); + + /* Command is CMD52 write */ + cmd_reg = 0; + cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48_BUSY); + cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 1); + cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 1); + cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0); + cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_ABORT); + cmd_reg = SFIELD(cmd_reg, CMD_INDEX, SDIOH_CMD_52); + + if (sd->sd_mode == SDIOH_MODE_SPI) { + cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 0); + cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 0); + } + + /* Wait for CMD_INHIBIT to go away as per spec section 3.6.1.1 */ + retries = RETRIES_SMALL; + while (GFIELD(sdstd_rreg(sd, SD_PresentState), PRES_CMD_INHIBIT)) { + if (retries == RETRIES_SMALL) + sd_err(("%s: Waiting for Command Inhibit, state 0x%08x\n", + __FUNCTION__, sdstd_rreg(sd, SD_PresentState))); + if (!--retries) { + sd_err(("%s: Command Inhibit timeout, state 0x%08x\n", + __FUNCTION__, sdstd_rreg(sd, SD_PresentState))); + if (trap_errs) + ASSERT(0); + err = BCME_SDIO_ERROR; + goto done; + } + } + + /* Clear errors from any previous commands */ + if ((plain_intstatus = sdstd_rreg16(sd, SD_ErrorIntrStatus)) != 0) { + sd_err(("abort: clearing errstat 0x%04x\n", plain_intstatus)); + sdstd_wreg16(sd, SD_ErrorIntrStatus, plain_intstatus); + } + plain_intstatus = sdstd_rreg16(sd, SD_IntrStatus); + if (plain_intstatus & ~(SFIELD(0, INTSTAT_CARD_INT, 1))) { + sd_err(("abort: intstatus 0x%04x\n", plain_intstatus)); + if (GFIELD(plain_intstatus, INTSTAT_CMD_COMPLETE)) { + sd_err(("SDSTD_ABORT: CMD COMPLETE SET BEFORE COMMAND GIVEN!!!\n")); + } + if (GFIELD(plain_intstatus, INTSTAT_CARD_REMOVAL)) { + sd_err(("SDSTD_ABORT: INTSTAT_CARD_REMOVAL\n")); + err = BCME_NODEVICE; + goto done; + } + } + + /* Issue the command */ + sdstd_wreg(sd, SD_Arg0, cmd_arg); + sdstd_wreg16(sd, SD_Command, cmd_reg); + + /* In interrupt mode return, expect later CMD_COMPLETE interrupt */ + if (!sd->polled_mode) + return err; + + /* Otherwise, wait for the command to complete */ + retries = RETRIES_LARGE; + do { + int_reg = sdstd_rreg16(sd, SD_IntrStatus); + } while (--retries && + (GFIELD(int_reg, INTSTAT_ERROR_INT) == 0) && + (GFIELD(int_reg, INTSTAT_CMD_COMPLETE) == 0)); + + /* If command completion fails, do a cmd reset and note the error */ + if (!retries) { + sd_err(("%s: CMD_COMPLETE timeout: intr 0x%04x err 0x%04x state 0x%08x\n", + __FUNCTION__, int_reg, + sdstd_rreg16(sd, SD_ErrorIntrStatus), + sdstd_rreg(sd, SD_PresentState))); + + sdstd_wreg8(sd, SD_SoftwareReset, SFIELD(0, SW_RESET_CMD, 1)); + retries = RETRIES_LARGE; + do { + sd_trace(("%s: waiting for CMD line reset\n", __FUNCTION__)); + } while ((GFIELD(sdstd_rreg8(sd, SD_SoftwareReset), + SW_RESET_CMD)) && retries--); + + if (!retries) { + sd_err(("%s: Timeout waiting for CMD line reset\n", __FUNCTION__)); + } + + if (trap_errs) + ASSERT(0); + + err = BCME_SDIO_ERROR; + } + + /* Clear Command Complete interrupt */ + int_reg = SFIELD(0, INTSTAT_CMD_COMPLETE, 1); + sdstd_wreg16(sd, SD_IntrStatus, int_reg); + + /* Check for Errors */ + if ((plain_intstatus = sdstd_rreg16 (sd, SD_ErrorIntrStatus)) != 0) { + sd_err(("%s: ErrorintrStatus: 0x%x, " + "(intrstatus = 0x%x, present state 0x%x) clearing\n", + __FUNCTION__, plain_intstatus, + sdstd_rreg16(sd, SD_IntrStatus), + sdstd_rreg(sd, SD_PresentState))); + + sdstd_wreg16(sd, SD_ErrorIntrStatus, plain_intstatus); + + sdstd_wreg8(sd, SD_SoftwareReset, SFIELD(0, SW_RESET_DAT, 1)); + retries = RETRIES_LARGE; + do { + sd_trace(("%s: waiting for DAT line reset\n", __FUNCTION__)); + } while ((GFIELD(sdstd_rreg8(sd, SD_SoftwareReset), + SW_RESET_DAT)) && retries--); + + if (!retries) { + sd_err(("%s: Timeout waiting for DAT line reset\n", __FUNCTION__)); + } + + if (trap_errs) + ASSERT(0); + + /* ABORT is dataless, only cmd errs count */ + if (plain_intstatus & ERRINT_CMD_ERRS) + err = BCME_SDIO_ERROR; + } + + /* If command failed don't bother looking at response */ + if (err) + goto done; + + /* Otherwise, check the response */ + sdstd_cmd_getrsp(sd, &rsp5, 1); + rflags = GFIELD(rsp5, RSP5_FLAGS); + + if (rflags & SD_RSP_R5_ERRBITS) { + sd_err(("%s: R5 flags include errbits: 0x%02x\n", __FUNCTION__, rflags)); + + /* The CRC error flag applies to the previous command */ + if (rflags & (SD_RSP_R5_ERRBITS & ~SD_RSP_R5_COM_CRC_ERROR)) { + err = BCME_SDIO_ERROR; + goto done; + } + } + + if (((rflags & (SD_RSP_R5_IO_CURRENTSTATE0 | SD_RSP_R5_IO_CURRENTSTATE1)) != 0x10) && + ((rflags & (SD_RSP_R5_IO_CURRENTSTATE0 | SD_RSP_R5_IO_CURRENTSTATE1)) != 0x20)) { + sd_err(("%s: R5 flags has bad state: 0x%02x\n", __FUNCTION__, rflags)); + err = BCME_SDIO_ERROR; + goto done; + } + + if (GFIELD(rsp5, RSP5_STUFF)) { + sd_err(("%s: rsp5 stuff is 0x%x: should be 0\n", + __FUNCTION__, GFIELD(rsp5, RSP5_STUFF))); + err = BCME_SDIO_ERROR; + goto done; + } + +done: + if (err == BCME_NODEVICE) + return err; + + sdstd_wreg8(sd, SD_SoftwareReset, + SFIELD(SFIELD(0, SW_RESET_DAT, 1), SW_RESET_CMD, 1)); + + retries = RETRIES_LARGE; + do { + rflags = sdstd_rreg8(sd, SD_SoftwareReset); + if (!GFIELD(rflags, SW_RESET_DAT) && !GFIELD(rflags, SW_RESET_CMD)) + break; + } while (--retries); + + if (!retries) { + sd_err(("%s: Timeout waiting for DAT/CMD reset: 0x%02x\n", + __FUNCTION__, rflags)); + err = BCME_SDIO_ERROR; + } + + return err; +} + +extern int +sdioh_abort(sdioh_info_t *sd, uint fnum) +{ + int ret; + + sdstd_lock(sd); + ret = sdstd_abort(sd, fnum); + sdstd_unlock(sd); + + return ret; +} + +int +sdioh_start(sdioh_info_t *sd, int stage) +{ + return SUCCESS; +} + +int +sdioh_stop(sdioh_info_t *sd) +{ + return SUCCESS; +} + +static int +sdstd_check_errs(sdioh_info_t *sdioh_info, uint32 cmd, uint32 arg) +{ + uint16 regval; + uint retries; + uint function = 0; + + /* If no errors, we're done */ + if ((regval = sdstd_rreg16(sdioh_info, SD_ErrorIntrStatus)) == 0) + return SUCCESS; + + sd_info(("%s: ErrorIntrStatus 0x%04x (clearing), IntrStatus 0x%04x PresentState 0x%08x\n", + __FUNCTION__, regval, sdstd_rreg16(sdioh_info, SD_IntrStatus), + sdstd_rreg(sdioh_info, SD_PresentState))); + sdstd_wreg16(sdioh_info, SD_ErrorIntrStatus, regval); + + /* On command error, issue CMD reset */ + if (regval & ERRINT_CMD_ERRS) { + sd_trace(("%s: issuing CMD reset\n", __FUNCTION__)); + sdstd_wreg8(sdioh_info, SD_SoftwareReset, SFIELD(0, SW_RESET_CMD, 1)); + for (retries = RETRIES_LARGE; retries; retries--) + if (!(GFIELD(sdstd_rreg8(sdioh_info, SD_SoftwareReset), SW_RESET_CMD))) + break; + if (!retries) { + sd_err(("%s: Timeout waiting for CMD line reset\n", __FUNCTION__)); + } + } + + /* On data error, issue DAT reset */ + if (regval & ERRINT_DATA_ERRS) { + sd_trace(("%s: issuing DAT reset\n", __FUNCTION__)); + sdstd_wreg8(sdioh_info, SD_SoftwareReset, SFIELD(0, SW_RESET_DAT, 1)); + for (retries = RETRIES_LARGE; retries; retries--) + if (!(GFIELD(sdstd_rreg8(sdioh_info, SD_SoftwareReset), SW_RESET_DAT))) + break; + if (!retries) { + sd_err(("%s: Timeout waiting for DAT line reset\n", __FUNCTION__)); + } + } + + /* For an IO command (CMD52 or CMD53) issue an abort to the appropriate function */ + if (cmd == SDIOH_CMD_53) + function = GFIELD(arg, CMD53_FUNCTION); + else if (cmd == SDIOH_CMD_52) + function = GFIELD(arg, CMD52_FUNCTION); + if (function) { + sd_trace(("%s: requesting abort for function %d after cmd %d\n", + __FUNCTION__, function, cmd)); + sdstd_abort(sdioh_info, function); + } + + if (trap_errs) + ASSERT(0); + + return ERROR; +} + + + +/* + * Private/Static work routines + */ +static bool +sdstd_reset(sdioh_info_t *sd, bool host_reset, bool client_reset) +{ + int retries = RETRIES_LARGE; + uchar regval; + + if (!sd) + return TRUE; + + sdstd_lock(sd); + /* Reset client card */ + if (client_reset && (sd->adapter_slot != -1)) { + if (sdstd_card_regwrite(sd, 0, SDIOD_CCCR_IOABORT, 1, 0x8) != SUCCESS) + sd_err(("%s: Cannot write to card reg 0x%x\n", + __FUNCTION__, SDIOD_CCCR_IOABORT)); + else + sd->card_rca = 0; + } + + /* Reset host controller */ + if (host_reset) { + regval = SFIELD(0, SW_RESET_ALL, 1); + sdstd_wreg8(sd, SD_SoftwareReset, regval); + do { + sd_trace(("%s: waiting for reset\n", __FUNCTION__)); + } while ((sdstd_rreg8(sd, SD_SoftwareReset) & regval) && retries--); + + if (!retries) { + sd_err(("%s: Timeout waiting for host reset\n", __FUNCTION__)); + sdstd_unlock(sd); + return (FALSE); + } + + /* A reset should reset bus back to 1 bit mode */ + sd->sd_mode = SDIOH_MODE_SD1; + sdstd_set_dma_mode(sd, sd->sd_dma_mode); + } + sdstd_unlock(sd); + return TRUE; +} + +/* Disable device interrupt */ +void +sdstd_devintr_off(sdioh_info_t *sd) +{ + sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints)); + if (sd->use_client_ints) { + sd->intmask &= ~CLIENT_INTR; + sdstd_wreg16(sd, SD_IntrSignalEnable, sd->intmask); + sdstd_rreg16(sd, SD_IntrSignalEnable); /* Sync readback */ + } +} + +/* Enable device interrupt */ +void +sdstd_devintr_on(sdioh_info_t *sd) +{ + ASSERT(sd->lockcount == 0); + sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints)); + if (sd->use_client_ints) { + uint16 status = sdstd_rreg16(sd, SD_IntrStatusEnable); + sdstd_wreg16(sd, SD_IntrStatusEnable, SFIELD(status, INTSTAT_CARD_INT, 0)); + sdstd_wreg16(sd, SD_IntrStatusEnable, status); + + sd->intmask |= CLIENT_INTR; + sdstd_wreg16(sd, SD_IntrSignalEnable, sd->intmask); + sdstd_rreg16(sd, SD_IntrSignalEnable); /* Sync readback */ + } +} + +#ifdef BCMSDYIELD +/* Enable/disable other interrupts */ +void +sdstd_intrs_on(sdioh_info_t *sd, uint16 norm, uint16 err) +{ + if (err) { + norm = SFIELD(norm, INTSTAT_ERROR_INT, 1); + sdstd_wreg16(sd, SD_ErrorIntrSignalEnable, err); + } + + sd->intmask |= norm; + sdstd_wreg16(sd, SD_IntrSignalEnable, sd->intmask); + if (sd_forcerb) + sdstd_rreg16(sd, SD_IntrSignalEnable); /* Sync readback */ +} + +void +sdstd_intrs_off(sdioh_info_t *sd, uint16 norm, uint16 err) +{ + if (err) { + norm = SFIELD(norm, INTSTAT_ERROR_INT, 1); + sdstd_wreg16(sd, SD_ErrorIntrSignalEnable, 0); + } + + sd->intmask &= ~norm; + sdstd_wreg16(sd, SD_IntrSignalEnable, sd->intmask); + if (sd_forcerb) + sdstd_rreg16(sd, SD_IntrSignalEnable); /* Sync readback */ +} +#endif /* BCMSDYIELD */ + +static int +sdstd_host_init(sdioh_info_t *sd) +{ + int num_slots, full_slot; + uint8 reg8; + + uint32 card_ins; + int slot, first_bar = 0; + bool detect_slots = FALSE; + uint bar; + + /* Check for Arasan ID */ + if ((OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) & 0xFFFF) == VENDOR_SI_IMAGE) { + sd_info(("%s: Found Arasan Standard SDIO Host Controller\n", __FUNCTION__)); + sd->controller_type = SDIOH_TYPE_ARASAN_HDK; + detect_slots = TRUE; + } else if ((OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) & 0xFFFF) == VENDOR_BROADCOM) { + sd_info(("%s: Found Broadcom 27xx Standard SDIO Host Controller\n", __FUNCTION__)); + sd->controller_type = SDIOH_TYPE_BCM27XX; + detect_slots = FALSE; + } else if ((OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) & 0xFFFF) == VENDOR_TI) { + sd_info(("%s: Found TI PCIxx21 Standard SDIO Host Controller\n", __FUNCTION__)); + sd->controller_type = SDIOH_TYPE_TI_PCIXX21; + detect_slots = TRUE; + } else if ((OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) & 0xFFFF) == VENDOR_RICOH) { + sd_info(("%s: Ricoh Co Ltd R5C822 SD/SDIO/MMC/MS/MSPro Host Adapter\n", + __FUNCTION__)); + sd->controller_type = SDIOH_TYPE_RICOH_R5C822; + detect_slots = TRUE; + } else if ((OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) & 0xFFFF) == VENDOR_JMICRON) { + sd_info(("%s: JMicron Standard SDIO Host Controller\n", + __FUNCTION__)); + sd->controller_type = SDIOH_TYPE_JMICRON; + detect_slots = TRUE; + } else { + return ERROR; + } + + /* + * Determine num of slots + * Search each slot + */ + + first_bar = OSL_PCI_READ_CONFIG(sd->osh, SD_SlotInfo, 4) & 0x7; + num_slots = (OSL_PCI_READ_CONFIG(sd->osh, SD_SlotInfo, 4) & 0xff) >> 4; + num_slots &= 7; + num_slots++; /* map bits to num slots according to spec */ + + if (OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) == + ((SDIOH_FPGA_ID << 16) | VENDOR_BROADCOM)) { + sd_err(("%s: Found Broadcom Standard SDIO Host Controller FPGA\n", __FUNCTION__)); + /* Set BAR0 Window to SDIOSTH core */ + OSL_PCI_WRITE_CONFIG(sd->osh, PCI_BAR0_WIN, 4, 0x18001000); + + /* Set defaults particular to this controller. */ + detect_slots = TRUE; + num_slots = 1; + first_bar = 0; + + /* Controller supports ADMA2, so turn it on here. */ + sd->sd_dma_mode = DMA_MODE_ADMA2; + } + + /* Map in each slot on the board and query it to see if a + * card is inserted. Use the first populated slot found. + */ + if (sd->mem_space) { + sdstd_reg_unmap(sd->osh, (uintptr)sd->mem_space, SDIOH_REG_WINSZ); + sd->mem_space = NULL; + } + + full_slot = -1; + + for (slot = 0; slot < num_slots; slot++) { + bar = OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_BAR0 + (4*(slot + first_bar)), 4); + sd->mem_space = (volatile char *)sdstd_reg_map(sd->osh, + (uintptr)bar, SDIOH_REG_WINSZ); + + sd->adapter_slot = -1; + + if (detect_slots) { + card_ins = GFIELD(sdstd_rreg(sd, SD_PresentState), PRES_CARD_PRESENT); + } else { + card_ins = TRUE; + } + + if (card_ins) { + sd_info(("%s: SDIO slot %d: Full\n", __FUNCTION__, slot)); + if (full_slot < 0) + full_slot = slot; + } else { + sd_info(("%s: SDIO slot %d: Empty\n", __FUNCTION__, slot)); + } + + if (sd->mem_space) { + sdstd_reg_unmap(sd->osh, (uintptr)sd->mem_space, SDIOH_REG_WINSZ); + sd->mem_space = NULL; + } + } + + if (full_slot < 0) { + sd_err(("No slots on SDIO controller are populated\n")); + return -1; + } + + bar = OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_BAR0 + (4*(full_slot + first_bar)), 4); + sd->mem_space = (volatile char *)sdstd_reg_map(sd->osh, (uintptr)bar, SDIOH_REG_WINSZ); + + sd_err(("Using slot %d at BAR%d [0x%08x] mem_space 0x%p\n", + full_slot, + (full_slot + first_bar), + OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_BAR0 + (4*(full_slot + first_bar)), 4), + sd->mem_space)); + + + sd->adapter_slot = full_slot; + + sd->version = sdstd_rreg16(sd, SD_HostControllerVersion) & 0xFF; + switch (sd->version) { + case 0: + sd_err(("Host Controller version 1.0, Vendor Revision: 0x%02x\n", + sdstd_rreg16(sd, SD_HostControllerVersion) >> 8)); + break; + case 1: + case 2: + sd_err(("Host Controller version 2.0, Vendor Revision: 0x%02x\n", + sdstd_rreg16(sd, SD_HostControllerVersion) >> 8)); + break; + default: + sd_err(("%s: Host Controller version 0x%02x not supported.\n", + __FUNCTION__, sd->version)); + break; + } + + sd->caps = sdstd_rreg(sd, SD_Capabilities); /* Cache this for later use */ + sd->curr_caps = sdstd_rreg(sd, SD_MaxCurCap); + + sdstd_set_dma_mode(sd, sd->sd_dma_mode); + + + sdstd_reset(sd, 1, 0); + + /* Read SD4/SD1 mode */ + if ((reg8 = sdstd_rreg8(sd, SD_HostCntrl))) { + if (reg8 & SD4_MODE) { + sd_err(("%s: Host cntrlr already in 4 bit mode: 0x%x\n", + __FUNCTION__, reg8)); + } + } + + /* Default power on mode is SD1 */ + sd->sd_mode = SDIOH_MODE_SD1; + sd->polled_mode = TRUE; + sd->host_init_done = TRUE; + sd->card_init_done = FALSE; + sd->adapter_slot = full_slot; + + return (SUCCESS); +} +#define CMD5_RETRIES 200 +static int +get_ocr(sdioh_info_t *sd, uint32 *cmd_arg, uint32 *cmd_rsp) +{ + int retries, status; + + /* Get the Card's Operation Condition. Occasionally the board + * takes a while to become ready + */ + retries = CMD5_RETRIES; + do { + *cmd_rsp = 0; + if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_5, *cmd_arg)) + != SUCCESS) { + sd_err(("%s: CMD5 failed\n", __FUNCTION__)); + return status; + } + sdstd_cmd_getrsp(sd, cmd_rsp, 1); + if (!GFIELD(*cmd_rsp, RSP4_CARD_READY)) + sd_trace(("%s: Waiting for card to become ready\n", __FUNCTION__)); + } while ((!GFIELD(*cmd_rsp, RSP4_CARD_READY)) && --retries); + if (!retries) + return ERROR; + + return (SUCCESS); +} + +static int +sdstd_client_init(sdioh_info_t *sd) +{ + uint32 cmd_arg, cmd_rsp; + int status; + uint8 fn_ints; + + + sd_trace(("%s: Powering up slot %d\n", __FUNCTION__, sd->adapter_slot)); + + /* Clear any pending ints */ + sdstd_wreg16(sd, SD_IntrStatus, 0x1ff); + sdstd_wreg16(sd, SD_ErrorIntrStatus, 0x0fff); + + /* Enable both Normal and Error Status. This does not enable + * interrupts, it only enables the status bits to + * become 'live' + */ + sdstd_wreg16(sd, SD_IntrStatusEnable, 0x1ff); + sdstd_wreg16(sd, SD_ErrorIntrStatusEnable, 0xffff); + + sdstd_wreg16(sd, SD_IntrSignalEnable, 0); /* Disable ints for now. */ + + /* Start at ~400KHz clock rate for initialization */ + if (!sdstd_start_clock(sd, 128)) { + sd_err(("sdstd_start_clock failed\n")); + return ERROR; + } + if (!sdstd_start_power(sd)) { + sd_err(("sdstd_start_power failed\n")); + return ERROR; + } + + if (sd->num_funcs == 0) { + sd_err(("%s: No IO funcs!\n", __FUNCTION__)); + return ERROR; + } + + /* In SPI mode, issue CMD0 first */ + if (sd->sd_mode == SDIOH_MODE_SPI) { + cmd_arg = 0; + if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_0, cmd_arg)) + != SUCCESS) { + sd_err(("BCMSDIOH: cardinit: CMD0 failed!\n")); + return status; + } + } + + if (sd->sd_mode != SDIOH_MODE_SPI) { + uint16 rsp6_status; + + /* Card is operational. Ask it to send an RCA */ + cmd_arg = 0; + if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_3, cmd_arg)) + != SUCCESS) { + sd_err(("%s: CMD3 failed!\n", __FUNCTION__)); + return status; + } + + /* Verify the card status returned with the cmd response */ + sdstd_cmd_getrsp(sd, &cmd_rsp, 1); + rsp6_status = GFIELD(cmd_rsp, RSP6_STATUS); + if (GFIELD(rsp6_status, RSP6STAT_COM_CRC_ERROR) || + GFIELD(rsp6_status, RSP6STAT_ILLEGAL_CMD) || + GFIELD(rsp6_status, RSP6STAT_ERROR)) { + sd_err(("%s: CMD3 response error. Response = 0x%x!\n", + __FUNCTION__, rsp6_status)); + return ERROR; + } + + /* Save the Card's RCA */ + sd->card_rca = GFIELD(cmd_rsp, RSP6_IO_RCA); + sd_info(("RCA is 0x%x\n", sd->card_rca)); + + if (rsp6_status) + sd_err(("raw status is 0x%x\n", rsp6_status)); + + /* Select the card */ + cmd_arg = SFIELD(0, CMD7_RCA, sd->card_rca); + if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_7, cmd_arg)) + != SUCCESS) { + sd_err(("%s: CMD7 failed!\n", __FUNCTION__)); + return status; + } + sdstd_cmd_getrsp(sd, &cmd_rsp, 1); + if (cmd_rsp != SDIOH_CMD7_EXP_STATUS) { + sd_err(("%s: CMD7 response error. Response = 0x%x!\n", + __FUNCTION__, cmd_rsp)); + return ERROR; + } + } + + sdstd_card_enablefuncs(sd); + + if (!sdstd_bus_width(sd, sd_sdmode)) { + sd_err(("sdstd_bus_width failed\n")); + return ERROR; + } + + set_client_block_size(sd, 1, BLOCK_SIZE_4318); + fn_ints = INTR_CTL_FUNC1_EN; + + if (sd->num_funcs >= 2) { + set_client_block_size(sd, 2, sd_f2_blocksize /* BLOCK_SIZE_4328 */); + fn_ints |= INTR_CTL_FUNC2_EN; + } + + /* Enable/Disable Client interrupts */ + /* Turn on here but disable at host controller? */ + if (sdstd_card_regwrite(sd, 0, SDIOD_CCCR_INTEN, 1, + (fn_ints | INTR_CTL_MASTER_EN)) != SUCCESS) { + sd_err(("%s: Could not enable ints in CCCR\n", __FUNCTION__)); + return ERROR; + } + + /* Switch to High-speed clocking mode if both host and device support it */ + sdstd_set_highspeed_mode(sd, (bool)sd_hiok); + + /* After configuring for High-Speed mode, set the desired clock rate. */ + if (!sdstd_start_clock(sd, (uint16)sd_divisor)) { + sd_err(("sdstd_start_clock failed\n")); + return ERROR; + } + + sd->card_init_done = TRUE; + + return SUCCESS; +} + +static int +sdstd_set_highspeed_mode(sdioh_info_t *sd, bool HSMode) +{ + uint32 regdata; + int status; + uint8 reg8; + + reg8 = sdstd_rreg8(sd, SD_HostCntrl); + + + if (HSMode == TRUE) { + if (sd_hiok && (GFIELD(sd->caps, CAP_HIGHSPEED)) == 0) { + sd_err(("Host Controller does not support hi-speed mode.\n")); + return BCME_ERROR; + } + + sd_info(("Attempting to enable High-Speed mode.\n")); + + if ((status = sdstd_card_regread(sd, 0, SDIOD_CCCR_SPEED_CONTROL, + 1, ®data)) != SUCCESS) { + return BCME_SDIO_ERROR; + } + if (regdata & SDIO_SPEED_SHS) { + sd_info(("Device supports High-Speed mode.\n")); + + regdata |= SDIO_SPEED_EHS; + + sd_info(("Writing %08x to Card at %08x\n", + regdata, SDIOD_CCCR_SPEED_CONTROL)); + if ((status = sdstd_card_regwrite(sd, 0, SDIOD_CCCR_SPEED_CONTROL, + 1, regdata)) != BCME_OK) { + return BCME_SDIO_ERROR; + } + + if ((status = sdstd_card_regread(sd, 0, SDIOD_CCCR_SPEED_CONTROL, + 1, ®data)) != BCME_OK) { + return BCME_SDIO_ERROR; + } + + sd_info(("Read %08x to Card at %08x\n", regdata, SDIOD_CCCR_SPEED_CONTROL)); + + reg8 = SFIELD(reg8, HOST_HI_SPEED_EN, 1); + + sd_err(("High-speed clocking mode enabled.\n")); + } + else { + sd_err(("Device does not support High-Speed Mode.\n")); + reg8 = SFIELD(reg8, HOST_HI_SPEED_EN, 0); + } + } else { + /* Force off device bit */ + if ((status = sdstd_card_regread(sd, 0, SDIOD_CCCR_SPEED_CONTROL, + 1, ®data)) != BCME_OK) { + return status; + } + if (regdata & SDIO_SPEED_EHS) { + regdata &= ~SDIO_SPEED_EHS; + if ((status = sdstd_card_regwrite(sd, 0, SDIOD_CCCR_SPEED_CONTROL, + 1, regdata)) != BCME_OK) { + return status; + } + } + + sd_err(("High-speed clocking mode disabled.\n")); + reg8 = SFIELD(reg8, HOST_HI_SPEED_EN, 0); + } + + sdstd_wreg8(sd, SD_HostCntrl, reg8); + + return BCME_OK; +} + +/* Select DMA Mode: + * If dma_mode == DMA_MODE_AUTO, pick the "best" mode. + * Otherwise, pick the selected mode if supported. + * If not supported, use PIO mode. + */ +static int +sdstd_set_dma_mode(sdioh_info_t *sd, int8 dma_mode) +{ + uint8 reg8, dma_sel_bits = SDIOH_SDMA_MODE; + int8 prev_dma_mode = sd->sd_dma_mode; + + switch (prev_dma_mode) { + case DMA_MODE_AUTO: + sd_dma(("%s: Selecting best DMA mode supported by controller.\n", + __FUNCTION__)); + if (GFIELD(sd->caps, CAP_ADMA2)) { + sd->sd_dma_mode = DMA_MODE_ADMA2; + dma_sel_bits = SDIOH_ADMA2_MODE; + } else if (GFIELD(sd->caps, CAP_ADMA1)) { + sd->sd_dma_mode = DMA_MODE_ADMA1; + dma_sel_bits = SDIOH_ADMA1_MODE; + } else if (GFIELD(sd->caps, CAP_DMA)) { + sd->sd_dma_mode = DMA_MODE_SDMA; + } else { + sd->sd_dma_mode = DMA_MODE_NONE; + } + break; + case DMA_MODE_NONE: + sd->sd_dma_mode = DMA_MODE_NONE; + break; + case DMA_MODE_SDMA: + if (GFIELD(sd->caps, CAP_DMA)) { + sd->sd_dma_mode = DMA_MODE_SDMA; + } else { + sd_err(("%s: SDMA not supported by controller.\n", __FUNCTION__)); + sd->sd_dma_mode = DMA_MODE_NONE; + } + break; + case DMA_MODE_ADMA1: + if (GFIELD(sd->caps, CAP_ADMA1)) { + sd->sd_dma_mode = DMA_MODE_ADMA1; + dma_sel_bits = SDIOH_ADMA1_MODE; + } else { + sd_err(("%s: ADMA1 not supported by controller.\n", __FUNCTION__)); + sd->sd_dma_mode = DMA_MODE_NONE; + } + break; + case DMA_MODE_ADMA2: + if (GFIELD(sd->caps, CAP_ADMA2)) { + sd->sd_dma_mode = DMA_MODE_ADMA2; + dma_sel_bits = SDIOH_ADMA2_MODE; + } else { + sd_err(("%s: ADMA2 not supported by controller.\n", __FUNCTION__)); + sd->sd_dma_mode = DMA_MODE_NONE; + } + break; + case DMA_MODE_ADMA2_64: + sd_err(("%s: 64b ADMA2 not supported by driver.\n", __FUNCTION__)); + sd->sd_dma_mode = DMA_MODE_NONE; + break; + default: + sd_err(("%s: Unsupported DMA Mode %d requested.\n", __FUNCTION__, + prev_dma_mode)); + sd->sd_dma_mode = DMA_MODE_NONE; + break; + } + + /* clear SysAddr, only used for SDMA */ + sdstd_wreg(sd, SD_SysAddr, 0); + + sd_err(("%s: %s mode selected.\n", __FUNCTION__, dma_mode_description[sd->sd_dma_mode])); + + reg8 = sdstd_rreg8(sd, SD_HostCntrl); + reg8 = SFIELD(reg8, HOST_DMA_SEL, dma_sel_bits); + sdstd_wreg8(sd, SD_HostCntrl, reg8); + sd_dma(("%s: SD_HostCntrl=0x%02x\n", __FUNCTION__, reg8)); + + return BCME_OK; +} + + +bool +sdstd_start_clock(sdioh_info_t *sd, uint16 new_sd_divisor) +{ + uint rc, count; + uint16 divisor; + + /* turn off HC clock */ + sdstd_wreg16(sd, SD_ClockCntrl, + sdstd_rreg16(sd, SD_ClockCntrl) & ~((uint16)0x4)); /* Disable the HC clock */ + + /* Set divisor */ + + divisor = (new_sd_divisor >> 1) << 8; + + sd_info(("Clock control is 0x%x\n", sdstd_rreg16(sd, SD_ClockCntrl))); + sdstd_mod_reg16(sd, SD_ClockCntrl, 0xff00, divisor); + sd_info(("%s: Using clock divisor of %d (regval 0x%04x)\n", __FUNCTION__, + new_sd_divisor, divisor)); + + sd_info(("Primary Clock Freq = %d MHz\n", GFIELD(sd->caps, CAP_TO_CLKFREQ))); + + if (GFIELD(sd->caps, CAP_TO_CLKFREQ) == 50) { + sd_info(("%s: Resulting SDIO clock is %d %s\n", __FUNCTION__, + ((50 % new_sd_divisor) ? (50000 / new_sd_divisor) : (50 / new_sd_divisor)), + ((50 % new_sd_divisor) ? "KHz" : "MHz"))); + } else if (GFIELD(sd->caps, CAP_TO_CLKFREQ) == 48) { + sd_info(("%s: Resulting SDIO clock is %d %s\n", __FUNCTION__, + ((48 % new_sd_divisor) ? (48000 / new_sd_divisor) : (48 / new_sd_divisor)), + ((48 % new_sd_divisor) ? "KHz" : "MHz"))); + } else if (GFIELD(sd->caps, CAP_TO_CLKFREQ) == 33) { + sd_info(("%s: Resulting SDIO clock is %d %s\n", __FUNCTION__, + ((33 % new_sd_divisor) ? (33000 / new_sd_divisor) : (33 / new_sd_divisor)), + ((33 % new_sd_divisor) ? "KHz" : "MHz"))); + + } else if (sd->controller_type == SDIOH_TYPE_BCM27XX) { + } else { + sd_err(("Need to determine divisor for %d MHz clocks\n", + GFIELD(sd->caps, CAP_TO_CLKFREQ))); + sd_err(("Consult SD Host Controller Spec: Clock Control Register\n")); + return (FALSE); + } + + sdstd_or_reg16(sd, SD_ClockCntrl, 0x1); /* Enable the clock */ + + /* Wait for clock to stabilize */ + rc = (sdstd_rreg16(sd, SD_ClockCntrl) & 2); + count = 0; + while (!rc) { + OSL_DELAY(1); + sd_info(("Waiting for clock to become stable 0x%x\n", rc)); + rc = (sdstd_rreg16(sd, SD_ClockCntrl) & 2); + count++; + if (count > 10000) { + sd_err(("%s:Clocks failed to stabilize after %u attempts", + __FUNCTION__, count)); + return (FALSE); + } + } + /* Turn on clock */ + sdstd_or_reg16(sd, SD_ClockCntrl, 0x4); + + /* Set timeout control (adjust default value based on divisor). + * Disabling timeout interrupts during setting is advised by host spec. + */ + { + uint16 regdata; + uint toval; + + toval = sd_toctl; + divisor = new_sd_divisor; + + while (toval && !(divisor & 1)) { + toval -= 1; + divisor >>= 1; + } + + regdata = sdstd_rreg16(sd, SD_ErrorIntrStatusEnable); + sdstd_wreg16(sd, SD_ErrorIntrStatusEnable, (regdata & ~ERRINT_DATA_TIMEOUT_BIT)); + sdstd_wreg8(sd, SD_TimeoutCntrl, (uint8)toval); + sdstd_wreg16(sd, SD_ErrorIntrStatusEnable, regdata); + } + + OSL_DELAY(2); + + sd_info(("Final Clock control is 0x%x\n", sdstd_rreg16(sd, SD_ClockCntrl))); + + return TRUE; +} + +bool +sdstd_start_power(sdioh_info_t *sd) +{ + char *s; + uint32 cmd_arg; + uint32 cmd_rsp; + uint8 pwr = 0; + int volts; + + volts = 0; + s = NULL; + if (GFIELD(sd->caps, CAP_VOLT_1_8)) { + volts = 5; + s = "1.8"; + } + if (GFIELD(sd->caps, CAP_VOLT_3_0)) { + volts = 6; + s = "3.0"; + } + if (GFIELD(sd->caps, CAP_VOLT_3_3)) { + volts = 7; + s = "3.3"; + } + + pwr = SFIELD(pwr, PWR_VOLTS, volts); + pwr = SFIELD(pwr, PWR_BUS_EN, 1); + sdstd_wreg8(sd, SD_PwrCntrl, pwr); /* Set Voltage level */ + sd_info(("Setting Bus Power to %s Volts\n", s)); + + /* Wait for power to stabilize, Dongle takes longer than NIC. */ + OSL_DELAY(250000); + + /* Get the Card's Operation Condition. Occasionally the board + * takes a while to become ready + */ + cmd_arg = 0; + cmd_rsp = 0; + if (get_ocr(sd, &cmd_arg, &cmd_rsp) != SUCCESS) { + sd_err(("%s: Failed to get OCR bailing\n", __FUNCTION__)); + sdstd_reset(sd, 0, 1); + return FALSE; + } + + sd_info(("mem_present = %d\n", GFIELD(cmd_rsp, RSP4_MEM_PRESENT))); + sd_info(("num_funcs = %d\n", GFIELD(cmd_rsp, RSP4_NUM_FUNCS))); + sd_info(("card_ready = %d\n", GFIELD(cmd_rsp, RSP4_CARD_READY))); + sd_info(("OCR = 0x%x\n", GFIELD(cmd_rsp, RSP4_IO_OCR))); + + /* Verify that the card supports I/O mode */ + if (GFIELD(cmd_rsp, RSP4_NUM_FUNCS) == 0) { + sd_err(("%s: Card does not support I/O\n", __FUNCTION__)); + return ERROR; + } + sd->num_funcs = GFIELD(cmd_rsp, RSP4_NUM_FUNCS); + + /* Examine voltage: Arasan only supports 3.3 volts, + * so look for 3.2-3.3 Volts and also 3.3-3.4 volts. + */ + + if ((GFIELD(cmd_rsp, RSP4_IO_OCR) & (0x3 << 20)) == 0) { + sd_err(("This client does not support 3.3 volts!\n")); + return ERROR; + } + sd_info(("Leaving bus power at 3.3 Volts\n")); + + cmd_arg = SFIELD(0, CMD5_OCR, 0xfff000); + cmd_rsp = 0; + get_ocr(sd, &cmd_arg, &cmd_rsp); + sd_info(("OCR = 0x%x\n", GFIELD(cmd_rsp, RSP4_IO_OCR))); + return TRUE; +} + +bool +sdstd_bus_width(sdioh_info_t *sd, int new_mode) +{ + uint32 regdata; + int status; + uint8 reg8; + + sd_trace(("%s\n", __FUNCTION__)); + if (sd->sd_mode == new_mode) { + sd_info(("%s: Already at width %d\n", __FUNCTION__, new_mode)); + /* Could exit, but continue just in case... */ + } + + /* Set client side via reg 0x7 in CCCR */ + if ((status = sdstd_card_regread (sd, 0, SDIOD_CCCR_BICTRL, 1, ®data)) != SUCCESS) + return (bool)status; + regdata &= ~BUS_SD_DATA_WIDTH_MASK; + if (new_mode == SDIOH_MODE_SD4) { + sd_info(("Changing to SD4 Mode\n")); + regdata |= SD4_MODE; + } else if (new_mode == SDIOH_MODE_SD1) { + sd_info(("Changing to SD1 Mode\n")); + } else { + sd_err(("SPI Mode not supported by Standard Host Controller\n")); + } + + if ((status = sdstd_card_regwrite (sd, 0, SDIOD_CCCR_BICTRL, 1, regdata)) != SUCCESS) + return (bool)status; + + /* Set host side via Host reg */ + reg8 = sdstd_rreg8(sd, SD_HostCntrl) & ~SD4_MODE; + if (new_mode == SDIOH_MODE_SD4) + reg8 |= SD4_MODE; + sdstd_wreg8(sd, SD_HostCntrl, reg8); + + sd->sd_mode = new_mode; + + return TRUE; +} + +static int +sdstd_driver_init(sdioh_info_t *sd) +{ + sd_trace(("%s\n", __FUNCTION__)); + if ((sdstd_host_init(sd)) != SUCCESS) { + return ERROR; + } + + if (sdstd_client_init(sd) != SUCCESS) { + return ERROR; + } + + return SUCCESS; +} + +static int +sdstd_get_cisaddr(sdioh_info_t *sd, uint32 regaddr) +{ + /* read 24 bits and return valid 17 bit addr */ + int i; + uint32 scratch, regdata; + uint8 *ptr = (uint8 *)&scratch; + for (i = 0; i < 3; i++) { + if ((sdstd_card_regread (sd, 0, regaddr, 1, ®data)) != SUCCESS) + sd_err(("%s: Can't read!\n", __FUNCTION__)); + + *ptr++ = (uint8) regdata; + regaddr++; + } + /* Only the lower 17-bits are valid */ + scratch = ltoh32(scratch); + scratch &= 0x0001FFFF; + return (scratch); +} + +static int +sdstd_card_enablefuncs(sdioh_info_t *sd) +{ + int status; + uint32 regdata; + uint32 fbraddr; + uint8 func; + + sd_trace(("%s\n", __FUNCTION__)); + + /* Get the Card's common CIS address */ + sd->com_cis_ptr = sdstd_get_cisaddr(sd, SDIOD_CCCR_CISPTR_0); + sd->func_cis_ptr[0] = sd->com_cis_ptr; + sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr)); + + /* Get the Card's function CIS (for each function) */ + for (fbraddr = SDIOD_FBR_STARTADDR, func = 1; + func <= sd->num_funcs; func++, fbraddr += SDIOD_FBR_SIZE) { + sd->func_cis_ptr[func] = sdstd_get_cisaddr(sd, SDIOD_FBR_CISPTR_0 + fbraddr); + sd_info(("%s: Function %d CIS Ptr = 0x%x\n", + __FUNCTION__, func, sd->func_cis_ptr[func])); + } + + /* Enable function 1 on the card */ + regdata = SDIO_FUNC_ENABLE_1; + if ((status = sdstd_card_regwrite(sd, 0, SDIOD_CCCR_IOEN, 1, regdata)) != SUCCESS) + return status; + + return SUCCESS; +} + +/* Read client card reg */ +static int +sdstd_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data) +{ + int status; + uint32 cmd_arg; + uint32 rsp5; + + + cmd_arg = 0; + + if ((func == 0) || (regsize == 1)) { + cmd_arg = SFIELD(cmd_arg, CMD52_FUNCTION, func); + cmd_arg = SFIELD(cmd_arg, CMD52_REG_ADDR, regaddr); + cmd_arg = SFIELD(cmd_arg, CMD52_RW_FLAG, SDIOH_XFER_TYPE_READ); + cmd_arg = SFIELD(cmd_arg, CMD52_RAW, 0); + cmd_arg = SFIELD(cmd_arg, CMD52_DATA, 0); + + if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_52, cmd_arg)) + != SUCCESS) + return status; + + sdstd_cmd_getrsp(sd, &rsp5, 1); + if (sdstd_rreg16(sd, SD_ErrorIntrStatus) != 0) { + sd_err(("%s: 1: ErrorintrStatus 0x%x\n", + __FUNCTION__, sdstd_rreg16(sd, SD_ErrorIntrStatus))); + } + + if (GFIELD(rsp5, RSP5_FLAGS) != 0x10) + sd_err(("%s: rsp5 flags is 0x%x\t %d\n", + __FUNCTION__, GFIELD(rsp5, RSP5_FLAGS), func)); + + if (GFIELD(rsp5, RSP5_STUFF)) + sd_err(("%s: rsp5 stuff is 0x%x: should be 0\n", + __FUNCTION__, GFIELD(rsp5, RSP5_STUFF))); + *data = GFIELD(rsp5, RSP5_DATA); + } else { + cmd_arg = SFIELD(cmd_arg, CMD53_BYTE_BLK_CNT, regsize); + cmd_arg = SFIELD(cmd_arg, CMD53_OP_CODE, 1); + cmd_arg = SFIELD(cmd_arg, CMD53_BLK_MODE, 0); + cmd_arg = SFIELD(cmd_arg, CMD53_FUNCTION, func); + cmd_arg = SFIELD(cmd_arg, CMD53_REG_ADDR, regaddr); + cmd_arg = SFIELD(cmd_arg, CMD53_RW_FLAG, SDIOH_XFER_TYPE_READ); + + sd->data_xfer_count = regsize; + + /* sdstd_cmd_issue() returns with the command complete bit + * in the ISR already cleared + */ + if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_53, cmd_arg)) + != SUCCESS) + return status; + + sdstd_cmd_getrsp(sd, &rsp5, 1); + + if (GFIELD(rsp5, RSP5_FLAGS) != 0x10) + sd_err(("%s: rsp5 flags is 0x%x\t %d\n", + __FUNCTION__, GFIELD(rsp5, RSP5_FLAGS), func)); + + if (GFIELD(rsp5, RSP5_STUFF)) + sd_err(("%s: rsp5 stuff is 0x%x: should be 0\n", + __FUNCTION__, GFIELD(rsp5, RSP5_STUFF))); + + if (sd->polled_mode) { + volatile uint16 int_reg; + int retries = RETRIES_LARGE; + + /* Wait for Read Buffer to become ready */ + do { + int_reg = sdstd_rreg16(sd, SD_IntrStatus); + } while (--retries && (GFIELD(int_reg, INTSTAT_BUF_READ_READY) == 0)); + + if (!retries) { + sd_err(("%s: Timeout on Buf_Read_Ready: " + "intStat: 0x%x errint: 0x%x PresentState 0x%x\n", + __FUNCTION__, int_reg, + sdstd_rreg16(sd, SD_ErrorIntrStatus), + sdstd_rreg(sd, SD_PresentState))); + sdstd_check_errs(sd, SDIOH_CMD_53, cmd_arg); + return (ERROR); + } + + /* Have Buffer Ready, so clear it and read the data */ + sdstd_wreg16(sd, SD_IntrStatus, SFIELD(0, INTSTAT_BUF_READ_READY, 1)); + if (regsize == 2) + *data = sdstd_rreg16(sd, SD_BufferDataPort0); + else + *data = sdstd_rreg(sd, SD_BufferDataPort0); + + /* Check Status. + * After the data is read, the Transfer Complete bit should be on + */ + retries = RETRIES_LARGE; + do { + int_reg = sdstd_rreg16(sd, SD_IntrStatus); + } while (--retries && (GFIELD(int_reg, INTSTAT_XFER_COMPLETE) == 0)); + + /* Check for any errors from the data phase */ + if (sdstd_check_errs(sd, SDIOH_CMD_53, cmd_arg)) + return ERROR; + + if (!retries) { + sd_err(("%s: Timeout on xfer complete: " + "intr 0x%04x err 0x%04x state 0x%08x\n", + __FUNCTION__, int_reg, + sdstd_rreg16(sd, SD_ErrorIntrStatus), + sdstd_rreg(sd, SD_PresentState))); + return (ERROR); + } + + sdstd_wreg16(sd, SD_IntrStatus, SFIELD(0, INTSTAT_XFER_COMPLETE, 1)); + } + } + if (sd->polled_mode) { + if (regsize == 2) + *data &= 0xffff; + } + return SUCCESS; +} + +bool +check_client_intr(sdioh_info_t *sd) +{ + uint16 raw_int, cur_int, old_int; + + raw_int = sdstd_rreg16(sd, SD_IntrStatus); + cur_int = raw_int & sd->intmask; + + if (!cur_int) { + /* Not an error -- might share interrupts... */ + return FALSE; + } + + if (GFIELD(cur_int, INTSTAT_CARD_INT)) { + old_int = sdstd_rreg16(sd, SD_IntrStatusEnable); + sdstd_wreg16(sd, SD_IntrStatusEnable, SFIELD(old_int, INTSTAT_CARD_INT, 0)); + + if (sd->client_intr_enabled && sd->use_client_ints) { + sd->intrcount++; + ASSERT(sd->intr_handler); + ASSERT(sd->intr_handler_arg); + (sd->intr_handler)(sd->intr_handler_arg); + } else { + sd_err(("%s: Not ready for intr: enabled %d, handler %p\n", + __FUNCTION__, sd->client_intr_enabled, sd->intr_handler)); + } + sdstd_wreg16(sd, SD_IntrStatusEnable, old_int); + } else { + /* Local interrupt: disable, set flag, and save intrstatus */ + sdstd_wreg16(sd, SD_IntrSignalEnable, 0); + sdstd_wreg16(sd, SD_ErrorIntrSignalEnable, 0); + sd->local_intrcount++; + sd->got_hcint = TRUE; + sd->last_intrstatus = cur_int; + } + + return TRUE; +} + +void +sdstd_spinbits(sdioh_info_t *sd, uint16 norm, uint16 err) +{ + uint16 int_reg, err_reg; + int retries = RETRIES_LARGE; + + do { + int_reg = sdstd_rreg16(sd, SD_IntrStatus); + err_reg = sdstd_rreg16(sd, SD_ErrorIntrStatus); + } while (--retries && !(int_reg & norm) && !(err_reg & err)); + + norm |= sd->intmask; + if (err_reg & err) + norm = SFIELD(norm, INTSTAT_ERROR_INT, 1); + sd->last_intrstatus = int_reg & norm; +} + +/* write a client register */ +static int +sdstd_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 data) +{ + int status; + uint32 cmd_arg, rsp5, flags; + + cmd_arg = 0; + + if ((func == 0) || (regsize == 1)) { + cmd_arg = SFIELD(cmd_arg, CMD52_FUNCTION, func); + cmd_arg = SFIELD(cmd_arg, CMD52_REG_ADDR, regaddr); + cmd_arg = SFIELD(cmd_arg, CMD52_RW_FLAG, SDIOH_XFER_TYPE_WRITE); + cmd_arg = SFIELD(cmd_arg, CMD52_RAW, 0); + cmd_arg = SFIELD(cmd_arg, CMD52_DATA, data & 0xff); + if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_52, cmd_arg)) + != SUCCESS) + return status; + + sdstd_cmd_getrsp(sd, &rsp5, 1); + flags = GFIELD(rsp5, RSP5_FLAGS); + if (flags && (flags != 0x10)) + sd_err(("%s: rsp5.rsp5.flags = 0x%x, expecting 0x10\n", + __FUNCTION__, flags)); + } + else { + cmd_arg = SFIELD(cmd_arg, CMD53_BYTE_BLK_CNT, regsize); + cmd_arg = SFIELD(cmd_arg, CMD53_OP_CODE, 1); + cmd_arg = SFIELD(cmd_arg, CMD53_BLK_MODE, 0); + cmd_arg = SFIELD(cmd_arg, CMD53_FUNCTION, func); + cmd_arg = SFIELD(cmd_arg, CMD53_REG_ADDR, regaddr); + cmd_arg = SFIELD(cmd_arg, CMD53_RW_FLAG, SDIOH_XFER_TYPE_WRITE); + + sd->data_xfer_count = regsize; + + /* sdstd_cmd_issue() returns with the command complete bit + * in the ISR already cleared + */ + if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_53, cmd_arg)) + != SUCCESS) + return status; + + sdstd_cmd_getrsp(sd, &rsp5, 1); + + if (GFIELD(rsp5, RSP5_FLAGS) != 0x10) + sd_err(("%s: rsp5 flags = 0x%x, expecting 0x10\n", + __FUNCTION__, GFIELD(rsp5, RSP5_FLAGS))); + if (GFIELD(rsp5, RSP5_STUFF)) + sd_err(("%s: rsp5 stuff is 0x%x: expecting 0\n", + __FUNCTION__, GFIELD(rsp5, RSP5_STUFF))); + + if (sd->polled_mode) { + uint16 int_reg; + int retries = RETRIES_LARGE; + + /* Wait for Write Buffer to become ready */ + do { + int_reg = sdstd_rreg16(sd, SD_IntrStatus); + } while (--retries && (GFIELD(int_reg, INTSTAT_BUF_WRITE_READY) == 0)); + + if (!retries) { + sd_err(("%s: Timeout on Buf_Write_Ready: intStat: 0x%x " + "errint: 0x%x PresentState 0x%x\n", + __FUNCTION__, int_reg, + sdstd_rreg16(sd, SD_ErrorIntrStatus), + sdstd_rreg(sd, SD_PresentState))); + sdstd_check_errs(sd, SDIOH_CMD_53, cmd_arg); + return (ERROR); + } + /* Clear Write Buf Ready bit */ + int_reg = 0; + int_reg = SFIELD(int_reg, INTSTAT_BUF_WRITE_READY, 1); + sdstd_wreg16(sd, SD_IntrStatus, int_reg); + + /* At this point we have Buffer Ready, so write the data */ + if (regsize == 2) + sdstd_wreg16(sd, SD_BufferDataPort0, (uint16) data); + else + sdstd_wreg(sd, SD_BufferDataPort0, data); + + /* Wait for Transfer Complete */ + retries = RETRIES_LARGE; + do { + int_reg = sdstd_rreg16(sd, SD_IntrStatus); + } while (--retries && (GFIELD(int_reg, INTSTAT_XFER_COMPLETE) == 0)); + + /* Check for any errors from the data phase */ + if (sdstd_check_errs(sd, SDIOH_CMD_53, cmd_arg)) + return ERROR; + + if (retries == 0) { + sd_err(("%s: Timeout for xfer complete; State = 0x%x, " + "intr state=0x%x, Errintstatus 0x%x rcnt %d, tcnt %d\n", + __FUNCTION__, sdstd_rreg(sd, SD_PresentState), + int_reg, sdstd_rreg16(sd, SD_ErrorIntrStatus), + sd->r_cnt, sd->t_cnt)); + } + /* Clear the status bits */ + sdstd_wreg16(sd, SD_IntrStatus, SFIELD(int_reg, INTSTAT_CARD_INT, 0)); + } + } + return SUCCESS; +} + +void +sdstd_cmd_getrsp(sdioh_info_t *sd, uint32 *rsp_buffer, int count /* num 32 bit words */) +{ + int rsp_count; + int respaddr = SD_Response0; + + if (count > 4) + count = 4; + + for (rsp_count = 0; rsp_count < count; rsp_count++) { + *rsp_buffer++ = sdstd_rreg(sd, respaddr); + respaddr += 4; + } +} + +static int +sdstd_cmd_issue(sdioh_info_t *sdioh_info, bool use_dma, uint32 cmd, uint32 arg) +{ + uint16 cmd_reg; + int retries; + uint32 cmd_arg; + uint16 xfer_reg = 0; + + + if ((sdioh_info->sd_mode == SDIOH_MODE_SPI) && + ((cmd == SDIOH_CMD_3) || (cmd == SDIOH_CMD_7) || (cmd == SDIOH_CMD_15))) { + sd_err(("%s: Cmd %d is not for SPI\n", __FUNCTION__, cmd)); + return ERROR; + } + + retries = RETRIES_SMALL; + while ((GFIELD(sdstd_rreg(sdioh_info, SD_PresentState), PRES_CMD_INHIBIT)) && --retries) { + if (retries == RETRIES_SMALL) + sd_err(("%s: Waiting for Command Inhibit cmd = %d 0x%x\n", + __FUNCTION__, cmd, sdstd_rreg(sdioh_info, SD_PresentState))); + } + if (!retries) { + sd_err(("%s: Command Inhibit timeout\n", __FUNCTION__)); + if (trap_errs) + ASSERT(0); + return ERROR; + } + + + cmd_reg = 0; + switch (cmd) { + case SDIOH_CMD_0: /* Set Card to Idle State - No Response */ + sd_data(("%s: CMD%d\n", __FUNCTION__, cmd)); + cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_NONE); + cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 0); + cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 0); + cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0); + cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL); + cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd); + break; + + case SDIOH_CMD_3: /* Ask card to send RCA - Response R6 */ + sd_data(("%s: CMD%d\n", __FUNCTION__, cmd)); + cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48); + cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 0); + cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 0); + cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0); + cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL); + cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd); + break; + + case SDIOH_CMD_5: /* Send Operation condition - Response R4 */ + sd_data(("%s: CMD%d\n", __FUNCTION__, cmd)); + cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48); + cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 0); + cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 0); + cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0); + cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL); + cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd); + break; + + case SDIOH_CMD_7: /* Select card - Response R1 */ + sd_data(("%s: CMD%d\n", __FUNCTION__, cmd)); + cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48); + cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 1); + cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 1); + cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0); + cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL); + cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd); + break; + + case SDIOH_CMD_15: /* Set card to inactive state - Response None */ + sd_data(("%s: CMD%d\n", __FUNCTION__, cmd)); + cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_NONE); + cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 0); + cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 0); + cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0); + cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL); + cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd); + break; + + case SDIOH_CMD_52: /* IO R/W Direct (single byte) - Response R5 */ + + sd_data(("%s: CMD52 func(%d) addr(0x%x) %s data(0x%x)\n", + __FUNCTION__, + GFIELD(arg, CMD52_FUNCTION), + GFIELD(arg, CMD52_REG_ADDR), + GFIELD(arg, CMD52_RW_FLAG) ? "W" : "R", + GFIELD(arg, CMD52_DATA))); + + cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48); + cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 1); + cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 1); + cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0); + cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL); + cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd); + break; + + case SDIOH_CMD_53: /* IO R/W Extended (multiple bytes/blocks) */ + + sd_data(("%s: CMD53 func(%d) addr(0x%x) %s mode(%s) cnt(%d), %s\n", + __FUNCTION__, + GFIELD(arg, CMD53_FUNCTION), + GFIELD(arg, CMD53_REG_ADDR), + GFIELD(arg, CMD53_RW_FLAG) ? "W" : "R", + GFIELD(arg, CMD53_BLK_MODE) ? "Block" : "Byte", + GFIELD(arg, CMD53_BYTE_BLK_CNT), + GFIELD(arg, CMD53_OP_CODE) ? "Incrementing addr" : "Single addr")); + + cmd_arg = arg; + xfer_reg = 0; + + cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48); + cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 1); + cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 1); + cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 1); + cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL); + cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd); + + use_dma = USE_DMA(sdioh_info) && GFIELD(cmd_arg, CMD53_BLK_MODE); + + if (GFIELD(cmd_arg, CMD53_BLK_MODE)) { + uint16 blocksize; + uint16 blockcount; + int func; + + ASSERT(sdioh_info->sd_blockmode); + + func = GFIELD(cmd_arg, CMD53_FUNCTION); + blocksize = MIN((int)sdioh_info->data_xfer_count, + sdioh_info->client_block_size[func]); + blockcount = GFIELD(cmd_arg, CMD53_BYTE_BLK_CNT); + + /* data_xfer_cnt is already setup so that for multiblock mode, + * it is the entire buffer length. For non-block or single block, + * it is < 64 bytes + */ + if (use_dma) { + switch (sdioh_info->sd_dma_mode) { + case DMA_MODE_SDMA: + sd_dma(("%s: SDMA: SysAddr reg was 0x%x now 0x%x\n", + __FUNCTION__, sdstd_rreg(sdioh_info, SD_SysAddr), + (uint32)sdioh_info->dma_phys)); + sdstd_wreg(sdioh_info, SD_SysAddr, sdioh_info->dma_phys); + break; + case DMA_MODE_ADMA1: + case DMA_MODE_ADMA2: + sd_dma(("%s: ADMA: Using ADMA\n", __FUNCTION__)); + sd_create_adma_descriptor(sdioh_info, 0, + sdioh_info->dma_phys, blockcount*blocksize, + ADMA2_ATTRIBUTE_VALID | ADMA2_ATTRIBUTE_END | + ADMA2_ATTRIBUTE_INT | ADMA2_ATTRIBUTE_ACT_TRAN); + /* Dump descriptor if DMA debugging is enabled. */ + if (sd_msglevel & SDH_DMA_VAL) { + sd_dump_adma_dscr(sdioh_info); + } + + sdstd_wreg(sdioh_info, SD_ADMA_SysAddr, + sdioh_info->adma2_dscr_phys); + break; + default: + sd_err(("%s: unsupported DMA mode %d.\n", + __FUNCTION__, sdioh_info->sd_dma_mode)); + break; + } + } + + sd_trace(("%s: Setting block count %d, block size %d bytes\n", + __FUNCTION__, blockcount, blocksize)); + sdstd_wreg16(sdioh_info, SD_BlockSize, blocksize); + sdstd_wreg16(sdioh_info, SD_BlockCount, blockcount); + + xfer_reg = SFIELD(xfer_reg, XFER_DMA_ENABLE, use_dma); + + if (sdioh_info->client_block_size[func] != blocksize) + set_client_block_size(sdioh_info, 1, blocksize); + + if (blockcount > 1) { + xfer_reg = SFIELD(xfer_reg, XFER_MULTI_BLOCK, 1); + xfer_reg = SFIELD(xfer_reg, XFER_BLK_COUNT_EN, 1); + xfer_reg = SFIELD(xfer_reg, XFER_CMD_12_EN, 0); + } else { + xfer_reg = SFIELD(xfer_reg, XFER_MULTI_BLOCK, 0); + xfer_reg = SFIELD(xfer_reg, XFER_BLK_COUNT_EN, 0); + xfer_reg = SFIELD(xfer_reg, XFER_CMD_12_EN, 0); + } + + if (GFIELD(cmd_arg, CMD53_RW_FLAG) == SDIOH_XFER_TYPE_READ) + xfer_reg = SFIELD(xfer_reg, XFER_DATA_DIRECTION, 1); + else + xfer_reg = SFIELD(xfer_reg, XFER_DATA_DIRECTION, 0); + + retries = RETRIES_SMALL; + while (GFIELD(sdstd_rreg(sdioh_info, SD_PresentState), + PRES_DAT_INHIBIT) && --retries) + sd_err(("%s: Waiting for Data Inhibit cmd = %d\n", + __FUNCTION__, cmd)); + if (!retries) { + sd_err(("%s: Data Inhibit timeout\n", __FUNCTION__)); + if (trap_errs) + ASSERT(0); + return ERROR; + } + sdstd_wreg16(sdioh_info, SD_TransferMode, xfer_reg); + + } else { /* Non block mode */ + uint16 bytes = GFIELD(cmd_arg, CMD53_BYTE_BLK_CNT); + /* The byte/block count field only has 9 bits, + * so, to do a 512-byte bytemode transfer, this + * field will contain 0, but we need to tell the + * controller we're transferring 512 bytes. + */ + if (bytes == 0) bytes = 512; + + if (use_dma) + sdstd_wreg(sdioh_info, SD_SysAddr, sdioh_info->dma_phys); + + /* PCI: Transfer Mode register 0x0c */ + xfer_reg = SFIELD(xfer_reg, XFER_DMA_ENABLE, bytes <= 4 ? 0 : use_dma); + xfer_reg = SFIELD(xfer_reg, XFER_CMD_12_EN, 0); + if (GFIELD(cmd_arg, CMD53_RW_FLAG) == SDIOH_XFER_TYPE_READ) + xfer_reg = SFIELD(xfer_reg, XFER_DATA_DIRECTION, 1); + else + xfer_reg = SFIELD(xfer_reg, XFER_DATA_DIRECTION, 0); + /* See table 2-8 Host Controller spec ver 1.00 */ + xfer_reg = SFIELD(xfer_reg, XFER_BLK_COUNT_EN, 0); /* Dont care */ + xfer_reg = SFIELD(xfer_reg, XFER_MULTI_BLOCK, 0); + + sdstd_wreg16(sdioh_info, SD_BlockSize, bytes); + + sdstd_wreg16(sdioh_info, SD_BlockCount, 1); + + retries = RETRIES_SMALL; + while (GFIELD(sdstd_rreg(sdioh_info, SD_PresentState), + PRES_DAT_INHIBIT) && --retries) + sd_err(("%s: Waiting for Data Inhibit cmd = %d\n", + __FUNCTION__, cmd)); + if (!retries) { + sd_err(("%s: Data Inhibit timeout\n", __FUNCTION__)); + if (trap_errs) + ASSERT(0); + return ERROR; + } + sdstd_wreg16(sdioh_info, SD_TransferMode, xfer_reg); + } + break; + + default: + sd_err(("%s: Unknown command\n", __FUNCTION__)); + return ERROR; + } + + if (sdioh_info->sd_mode == SDIOH_MODE_SPI) { + cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 0); + cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 0); + } + + /* Setup and issue the SDIO command */ + sdstd_wreg(sdioh_info, SD_Arg0, arg); + sdstd_wreg16(sdioh_info, SD_Command, cmd_reg); + + /* If we are in polled mode, wait for the command to complete. + * In interrupt mode, return immediately. The calling function will + * know that the command has completed when the CMDATDONE interrupt + * is asserted + */ + if (sdioh_info->polled_mode) { + uint16 int_reg = 0; + int retries = RETRIES_LARGE; + + do { + int_reg = sdstd_rreg16(sdioh_info, SD_IntrStatus); + } while (--retries && + (GFIELD(int_reg, INTSTAT_ERROR_INT) == 0) && + (GFIELD(int_reg, INTSTAT_CMD_COMPLETE) == 0)); + + if (!retries) { + sd_err(("%s: CMD_COMPLETE timeout: intrStatus: 0x%x " + "error stat 0x%x state 0x%x\n", + __FUNCTION__, int_reg, + sdstd_rreg16(sdioh_info, SD_ErrorIntrStatus), + sdstd_rreg(sdioh_info, SD_PresentState))); + + /* Attempt to reset CMD line when we get a CMD timeout */ + sdstd_wreg8(sdioh_info, SD_SoftwareReset, SFIELD(0, SW_RESET_CMD, 1)); + retries = RETRIES_LARGE; + do { + sd_trace(("%s: waiting for CMD line reset\n", __FUNCTION__)); + } while ((GFIELD(sdstd_rreg8(sdioh_info, SD_SoftwareReset), + SW_RESET_CMD)) && retries--); + + if (!retries) { + sd_err(("%s: Timeout waiting for CMD line reset\n", __FUNCTION__)); + } + + if (trap_errs) + ASSERT(0); + return (ERROR); + } + + /* Clear Command Complete interrupt */ + int_reg = SFIELD(0, INTSTAT_CMD_COMPLETE, 1); + sdstd_wreg16(sdioh_info, SD_IntrStatus, int_reg); + + /* Check for Errors */ + if (sdstd_check_errs(sdioh_info, cmd, arg)) { + if (trap_errs) + ASSERT(0); + return ERROR; + } + } + return SUCCESS; +} + + +static int +sdstd_card_buf(sdioh_info_t *sd, int rw, int func, bool fifo, uint32 addr, int nbytes, uint32 *data) +{ + int status; + uint32 cmd_arg; + uint32 rsp5; + uint16 int_reg, int_bit; + uint flags; + int num_blocks, blocksize; + bool local_blockmode, local_dma; + bool read = rw == SDIOH_READ ? 1 : 0; + bool yield = FALSE; + + ASSERT(nbytes); + + cmd_arg = 0; + + sd_data(("%s: %s 53 addr 0x%x, len %d bytes, r_cnt %d t_cnt %d\n", + __FUNCTION__, read ? "Rd" : "Wr", addr, nbytes, sd->r_cnt, sd->t_cnt)); + + if (read) sd->r_cnt++; else sd->t_cnt++; + + local_blockmode = sd->sd_blockmode; + local_dma = USE_DMA(sd); + + /* Don't bother with block mode on small xfers */ + if (nbytes < sd->client_block_size[func]) { + sd_data(("setting local blockmode to false: nbytes (%d) != block_size (%d)\n", + nbytes, sd->client_block_size[func])); + local_blockmode = FALSE; + local_dma = FALSE; + } + + if (local_blockmode) { + blocksize = MIN(sd->client_block_size[func], nbytes); + num_blocks = nbytes/blocksize; + cmd_arg = SFIELD(cmd_arg, CMD53_BYTE_BLK_CNT, num_blocks); + cmd_arg = SFIELD(cmd_arg, CMD53_BLK_MODE, 1); + } else { + num_blocks = 1; + blocksize = nbytes; + cmd_arg = SFIELD(cmd_arg, CMD53_BYTE_BLK_CNT, nbytes); + cmd_arg = SFIELD(cmd_arg, CMD53_BLK_MODE, 0); + } + + if (local_dma && !read) { + bcopy(data, sd->dma_buf, nbytes); + sd_sync_dma(sd, read, nbytes); + } + + if (fifo) + cmd_arg = SFIELD(cmd_arg, CMD53_OP_CODE, 0); + else + cmd_arg = SFIELD(cmd_arg, CMD53_OP_CODE, 1); + + cmd_arg = SFIELD(cmd_arg, CMD53_FUNCTION, func); + cmd_arg = SFIELD(cmd_arg, CMD53_REG_ADDR, addr); + if (read) + cmd_arg = SFIELD(cmd_arg, CMD53_RW_FLAG, SDIOH_XFER_TYPE_READ); + else + cmd_arg = SFIELD(cmd_arg, CMD53_RW_FLAG, SDIOH_XFER_TYPE_WRITE); + + sd->data_xfer_count = nbytes; + + /* sdstd_cmd_issue() returns with the command complete bit + * in the ISR already cleared + */ + if ((status = sdstd_cmd_issue(sd, local_dma, SDIOH_CMD_53, cmd_arg)) != SUCCESS) { + sd_err(("%s: cmd_issue failed for %s\n", __FUNCTION__, (read ? "read" : "write"))); + return status; + } + + sdstd_cmd_getrsp(sd, &rsp5, 1); + + if ((flags = GFIELD(rsp5, RSP5_FLAGS)) != 0x10) { + sd_err(("%s: Rsp5: nbytes %d, dma %d blockmode %d, read %d " + "numblocks %d, blocksize %d\n", + __FUNCTION__, nbytes, local_dma, local_dma, read, num_blocks, blocksize)); + + if (flags & 1) + sd_err(("%s: rsp5: Command not accepted: arg out of range 0x%x, " + "bytes %d dma %d\n", + __FUNCTION__, flags, GFIELD(cmd_arg, CMD53_BYTE_BLK_CNT), + GFIELD(cmd_arg, CMD53_BLK_MODE))); + if (flags & 0x8) + sd_err(("%s: Rsp5: General Error\n", __FUNCTION__)); + + sd_err(("%s: rsp5 flags = 0x%x, expecting 0x10 returning error\n", + __FUNCTION__, flags)); + if (trap_errs) + ASSERT(0); + return ERROR; + } + + if (GFIELD(rsp5, RSP5_STUFF)) + sd_err(("%s: rsp5 stuff is 0x%x: expecting 0\n", + __FUNCTION__, GFIELD(rsp5, RSP5_STUFF))); + +#ifdef BCMSDYIELD + yield = sd_yieldcpu && ((uint)nbytes >= sd_minyield); +#endif + + if (!local_dma) { + int bytes, i; + uint32 tmp; + for (i = 0; i < num_blocks; i++) { + int words; + + /* Decide which status bit we're waiting for */ + if (read) + int_bit = SFIELD(0, INTSTAT_BUF_READ_READY, 1); + else + int_bit = SFIELD(0, INTSTAT_BUF_WRITE_READY, 1); + + /* If not on, wait for it (or for xfer error) */ + int_reg = sdstd_rreg16(sd, SD_IntrStatus); + if (!(int_reg & int_bit)) + int_reg = sdstd_waitbits(sd, int_bit, ERRINT_TRANSFER_ERRS, yield); + + /* Confirm we got the bit w/o error */ + if (!(int_reg & int_bit) || GFIELD(int_reg, INTSTAT_ERROR_INT)) { + sd_err(("%s: Error or timeout for Buf_%s_Ready: intStat: 0x%x " + "errint: 0x%x PresentState 0x%x\n", + __FUNCTION__, read ? "Read" : "Write", int_reg, + sdstd_rreg16(sd, SD_ErrorIntrStatus), + sdstd_rreg(sd, SD_PresentState))); + sdstd_dumpregs(sd); + sdstd_check_errs(sd, SDIOH_CMD_53, cmd_arg); + return (ERROR); + } + + /* Clear Buf Ready bit */ + sdstd_wreg16(sd, SD_IntrStatus, int_bit); + + /* At this point we have Buffer Ready, write the data 4 bytes at a time */ + for (words = blocksize/4; words; words--) { + if (read) + *data = sdstd_rreg(sd, SD_BufferDataPort0); + else + sdstd_wreg(sd, SD_BufferDataPort0, *data); + data++; + } + + bytes = blocksize % 4; + + /* If no leftover bytes, go to next block */ + if (!bytes) + continue; + + switch (bytes) { + case 1: + /* R/W 8 bits */ + if (read) + *(data++) = (uint32)(sdstd_rreg8(sd, SD_BufferDataPort0)); + else + sdstd_wreg8(sd, SD_BufferDataPort0, + (uint8)(*(data++) & 0xff)); + break; + case 2: + /* R/W 16 bits */ + if (read) + *(data++) = (uint32)sdstd_rreg16(sd, SD_BufferDataPort0); + else + sdstd_wreg16(sd, SD_BufferDataPort0, (uint16)(*(data++))); + break; + case 3: + /* R/W 24 bits: + * SD_BufferDataPort0[0-15] | SD_BufferDataPort1[16-23] + */ + if (read) { + tmp = (uint32)sdstd_rreg16(sd, SD_BufferDataPort0); + tmp |= ((uint32)(sdstd_rreg8(sd, + SD_BufferDataPort1)) << 16); + *(data++) = tmp; + } else { + tmp = *(data++); + sdstd_wreg16(sd, SD_BufferDataPort0, (uint16)tmp & 0xffff); + sdstd_wreg8(sd, SD_BufferDataPort1, + (uint8)((tmp >> 16) & 0xff)); + } + break; + default: + sd_err(("%s: Unexpected bytes leftover %d\n", + __FUNCTION__, bytes)); + ASSERT(0); + break; + } + } + } /* End PIO processing */ + + /* Wait for Transfer Complete or Transfer Error */ + int_bit = SFIELD(0, INTSTAT_XFER_COMPLETE, 1); + + /* If not on, wait for it (or for xfer error) */ + int_reg = sdstd_rreg16(sd, SD_IntrStatus); + if (!(int_reg & int_bit)) + int_reg = sdstd_waitbits(sd, int_bit, ERRINT_TRANSFER_ERRS, yield); + + /* Check for any errors from the data phase */ + if (sdstd_check_errs(sd, SDIOH_CMD_53, cmd_arg)) + return ERROR; + + /* May have gotten a software timeout if not blocking? */ + int_reg = sdstd_rreg16(sd, SD_IntrStatus); + if (!(int_reg & int_bit)) { + sd_err(("%s: Error or Timeout for xfer complete; %s, dma %d, State 0x%08x, " + "intr 0x%04x, Err 0x%04x, len = %d, rcnt %d, tcnt %d\n", + __FUNCTION__, read ? "R" : "W", local_dma, + sdstd_rreg(sd, SD_PresentState), int_reg, + sdstd_rreg16(sd, SD_ErrorIntrStatus), nbytes, + sd->r_cnt, sd->t_cnt)); + sdstd_dumpregs(sd); + return ERROR; + } + + /* Clear the status bits */ + int_reg = int_bit; + if (local_dma) { + /* DMA Complete */ + /* Reads in particular don't have DMA_COMPLETE set */ + int_reg = SFIELD(int_reg, INTSTAT_DMA_INT, 1); + } + sdstd_wreg16(sd, SD_IntrStatus, int_reg); + + /* Fetch data */ + if (local_dma && read) { + sd_sync_dma(sd, read, nbytes); + bcopy(sd->dma_buf, data, nbytes); + } + return SUCCESS; +} + +static int +set_client_block_size(sdioh_info_t *sd, int func, int block_size) +{ + int base; + int err = 0; + + + sd_err(("%s: Setting block size %d, func %d\n", __FUNCTION__, block_size, func)); + sd->client_block_size[func] = block_size; + + /* Set the block size in the SDIO Card register */ + base = func * SDIOD_FBR_SIZE; + err = sdstd_card_regwrite(sd, 0, base+SDIOD_CCCR_BLKSIZE_0, 1, block_size & 0xff); + if (!err) { + err = sdstd_card_regwrite(sd, 0, base+SDIOD_CCCR_BLKSIZE_1, 1, + (block_size >> 8) & 0xff); + } + + /* Do not set the block size in the SDIO Host register, that + * is func dependent and will get done on an individual + * transaction basis + */ + + return (err ? BCME_SDIO_ERROR : 0); +} + +/* Reset and re-initialize the device */ +int +sdioh_sdio_reset(sdioh_info_t *si) +{ + uint8 hreg; + + /* Reset the attached device (use slower clock for safety) */ + sdstd_start_clock(si, 128); + sdstd_reset(si, 0, 1); + + /* Reset portions of the host state accordingly */ + hreg = sdstd_rreg8(si, SD_HostCntrl); + hreg = SFIELD(hreg, HOST_HI_SPEED_EN, 0); + hreg = SFIELD(hreg, HOST_DATA_WIDTH, 0); + si->sd_mode = SDIOH_MODE_SD1; + + /* Reinitialize the card */ + si->card_init_done = FALSE; + return sdstd_client_init(si); +} + + +static void +sd_map_dma(sdioh_info_t * sd) +{ + + void *va; + + if ((va = DMA_ALLOC_CONSISTENT(sd->osh, SD_PAGE, + &sd->dma_start_phys, 0x12, 12)) == NULL) { + sd->sd_dma_mode = DMA_MODE_NONE; + sd->dma_start_buf = 0; + sd->dma_buf = (void *)0; + sd->dma_phys = 0; + sd->alloced_dma_size = SD_PAGE; + sd_err(("%s: DMA_ALLOC failed. Disabling DMA support.\n", __FUNCTION__)); + } else { + sd->dma_start_buf = va; + sd->dma_buf = (void *)ROUNDUP((uintptr)va, SD_PAGE); + sd->dma_phys = ROUNDUP((sd->dma_start_phys), SD_PAGE); + sd->alloced_dma_size = SD_PAGE; + sd_err(("%s: Mapped DMA Buffer %dbytes @virt/phys: %p/0x%lx\n", + __FUNCTION__, sd->alloced_dma_size, sd->dma_buf, sd->dma_phys)); + sd_fill_dma_data_buf(sd, 0xA5); + } + + if ((va = DMA_ALLOC_CONSISTENT(sd->osh, SD_PAGE, + &sd->adma2_dscr_start_phys, 0x12, 12)) == NULL) { + sd->sd_dma_mode = DMA_MODE_NONE; + sd->adma2_dscr_start_buf = 0; + sd->adma2_dscr_buf = (void *)0; + sd->adma2_dscr_phys = 0; + sd->alloced_adma2_dscr_size = 0; + sd_err(("%s: DMA_ALLOC failed for descriptor buffer. " + "Disabling DMA support.\n", __FUNCTION__)); + } else { + sd->adma2_dscr_start_buf = va; + sd->adma2_dscr_buf = (void *)ROUNDUP((uintptr)va, SD_PAGE); + sd->adma2_dscr_phys = ROUNDUP((sd->adma2_dscr_start_phys), SD_PAGE); + sd->alloced_adma2_dscr_size = SD_PAGE; + } + + sd_err(("%s: Mapped ADMA2 Descriptor Buffer %dbytes @virt/phys: %p/0x%lx\n", + __FUNCTION__, sd->alloced_adma2_dscr_size, sd->adma2_dscr_buf, + sd->adma2_dscr_phys)); + sd_clear_adma_dscr_buf(sd); +} + +static void +sd_unmap_dma(sdioh_info_t * sd) +{ + if (sd->dma_start_buf) { + DMA_FREE_CONSISTENT(sd->osh, sd->dma_start_buf, sd->alloced_dma_size, + sd->dma_start_phys, 0x12); + } + + if (sd->adma2_dscr_start_buf) { + DMA_FREE_CONSISTENT(sd->osh, sd->adma2_dscr_start_buf, sd->alloced_adma2_dscr_size, + sd->adma2_dscr_start_phys, 0x12); + } +} + +static void sd_clear_adma_dscr_buf(sdioh_info_t *sd) +{ + bzero((char *)sd->adma2_dscr_buf, SD_PAGE); + sd_dump_adma_dscr(sd); +} + +static void sd_fill_dma_data_buf(sdioh_info_t *sd, uint8 data) +{ + memset((char *)sd->dma_buf, data, SD_PAGE); +} + + +static void sd_create_adma_descriptor(sdioh_info_t *sd, uint32 index, + uint32 addr_phys, uint16 length, uint16 flags) +{ + adma2_dscr_32b_t *adma2_dscr_table; + adma1_dscr_t *adma1_dscr_table; + + adma2_dscr_table = sd->adma2_dscr_buf; + adma1_dscr_table = sd->adma2_dscr_buf; + + switch (sd->sd_dma_mode) { + case DMA_MODE_ADMA2: + sd_dma(("%s: creating ADMA2 descriptor for index %d\n", + __FUNCTION__, index)); + + adma2_dscr_table[index].phys_addr = addr_phys; + adma2_dscr_table[index].len_attr = length << 16; + adma2_dscr_table[index].len_attr |= flags; + break; + case DMA_MODE_ADMA1: + /* ADMA1 requires two descriptors, one for len + * and the other for data transfer + */ + index <<= 1; + + sd_dma(("%s: creating ADMA1 descriptor for index %d\n", + __FUNCTION__, index)); + + adma1_dscr_table[index].phys_addr_attr = length << 12; + adma1_dscr_table[index].phys_addr_attr |= (ADMA1_ATTRIBUTE_ACT_SET | + ADMA2_ATTRIBUTE_VALID); + adma1_dscr_table[index+1].phys_addr_attr = addr_phys & 0xFFFFF000; + adma1_dscr_table[index+1].phys_addr_attr |= (flags & 0x3f); + break; + default: + sd_err(("%s: cannot create ADMA descriptor for DMA mode %d\n", + __FUNCTION__, sd->sd_dma_mode)); + break; + } +} + + +static void sd_dump_adma_dscr(sdioh_info_t *sd) +{ + adma2_dscr_32b_t *adma2_dscr_table; + adma1_dscr_t *adma1_dscr_table; + uint32 i = 0; + uint16 flags; + char flags_str[32]; + + ASSERT(sd->adma2_dscr_buf != NULL); + + adma2_dscr_table = sd->adma2_dscr_buf; + adma1_dscr_table = sd->adma2_dscr_buf; + + switch (sd->sd_dma_mode) { + case DMA_MODE_ADMA2: + sd_err(("ADMA2 Descriptor Table (%dbytes) @virt/phys: %p/0x%lx\n", + SD_PAGE, sd->adma2_dscr_buf, sd->adma2_dscr_phys)); + sd_err((" #[Descr VA ] Buffer PA | Len | Flags (5:4 2 1 0)" + " |\n")); + while (adma2_dscr_table->len_attr & ADMA2_ATTRIBUTE_VALID) { + flags = adma2_dscr_table->len_attr & 0xFFFF; + sprintf(flags_str, "%s%s%s%s", + ((flags & ADMA2_ATTRIBUTE_ACT_LINK) == + ADMA2_ATTRIBUTE_ACT_LINK) ? "LINK " : + ((flags & ADMA2_ATTRIBUTE_ACT_LINK) == + ADMA2_ATTRIBUTE_ACT_TRAN) ? "TRAN " : + ((flags & ADMA2_ATTRIBUTE_ACT_LINK) == + ADMA2_ATTRIBUTE_ACT_NOP) ? "NOP " : "RSV ", + (flags & ADMA2_ATTRIBUTE_INT ? "INT " : " "), + (flags & ADMA2_ATTRIBUTE_END ? "END " : " "), + (flags & ADMA2_ATTRIBUTE_VALID ? "VALID" : "")); + sd_err(("%2d[0x%p]: 0x%08x | 0x%04x | 0x%04x (%s) |\n", + i, adma2_dscr_table, adma2_dscr_table->phys_addr, + adma2_dscr_table->len_attr >> 16, flags, flags_str)); + i++; + + /* Follow LINK descriptors or skip to next. */ + if ((flags & ADMA2_ATTRIBUTE_ACT_LINK) == + ADMA2_ATTRIBUTE_ACT_LINK) { + adma2_dscr_table = phys_to_virt( + adma2_dscr_table->phys_addr); + } else { + adma2_dscr_table++; + } + + } + break; + case DMA_MODE_ADMA1: + sd_err(("ADMA1 Descriptor Table (%dbytes) @virt/phys: %p/0x%lx\n", + SD_PAGE, sd->adma2_dscr_buf, sd->adma2_dscr_phys)); + sd_err((" #[Descr VA ] Buffer PA | Flags (5:4 2 1 0) |\n")); + + for (i = 0; adma1_dscr_table->phys_addr_attr & ADMA2_ATTRIBUTE_VALID; i++) { + flags = adma1_dscr_table->phys_addr_attr & 0x3F; + sprintf(flags_str, "%s%s%s%s", + ((flags & ADMA2_ATTRIBUTE_ACT_LINK) == + ADMA2_ATTRIBUTE_ACT_LINK) ? "LINK " : + ((flags & ADMA2_ATTRIBUTE_ACT_LINK) == + ADMA2_ATTRIBUTE_ACT_TRAN) ? "TRAN " : + ((flags & ADMA2_ATTRIBUTE_ACT_LINK) == + ADMA2_ATTRIBUTE_ACT_NOP) ? "NOP " : "SET ", + (flags & ADMA2_ATTRIBUTE_INT ? "INT " : " "), + (flags & ADMA2_ATTRIBUTE_END ? "END " : " "), + (flags & ADMA2_ATTRIBUTE_VALID ? "VALID" : "")); + sd_err(("%2d[0x%p]: 0x%08x | 0x%04x | (%s) |\n", + i, adma1_dscr_table, + adma1_dscr_table->phys_addr_attr & 0xFFFFF000, + flags, flags_str)); + + /* Follow LINK descriptors or skip to next. */ + if ((flags & ADMA2_ATTRIBUTE_ACT_LINK) == + ADMA2_ATTRIBUTE_ACT_LINK) { + adma1_dscr_table = phys_to_virt( + adma1_dscr_table->phys_addr_attr & 0xFFFFF000); + } else { + adma1_dscr_table++; + } + } + break; + default: + sd_err(("Unknown DMA Descriptor Table Format.\n")); + break; + } +} + +static void sdstd_dumpregs(sdioh_info_t *sd) +{ + sd_err(("IntrStatus: 0x%04x ErrorIntrStatus 0x%04x\n", + sdstd_rreg16(sd, SD_IntrStatus), + sdstd_rreg16(sd, SD_ErrorIntrStatus))); + sd_err(("IntrStatusEnable: 0x%04x ErrorIntrStatusEnable 0x%04x\n", + sdstd_rreg16(sd, SD_IntrStatusEnable), + sdstd_rreg16(sd, SD_ErrorIntrStatusEnable))); + sd_err(("IntrSignalEnable: 0x%04x ErrorIntrSignalEnable 0x%04x\n", + sdstd_rreg16(sd, SD_IntrSignalEnable), + sdstd_rreg16(sd, SD_ErrorIntrSignalEnable))); +} diff --git a/drivers/net/wireless/bcm4329/bcmsdstd_linux.c b/drivers/net/wireless/bcm4329/bcmsdstd_linux.c new file mode 100644 index 000000000000..a8b98e2054a0 --- /dev/null +++ b/drivers/net/wireless/bcm4329/bcmsdstd_linux.c @@ -0,0 +1,251 @@ +/* + * 'Standard' SDIO HOST CONTROLLER driver - linux portion + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: bcmsdstd_linux.c,v 1.11.18.2.16.1 2010/08/17 17:03:13 Exp $ + */ + +#include <typedefs.h> +#include <pcicfg.h> +#include <bcmutils.h> +#include <sdio.h> /* SDIO Specs */ +#include <bcmsdbus.h> /* bcmsdh to/from specific controller APIs */ +#include <sdiovar.h> /* to get msglevel bit values */ + +#include <linux/sched.h> /* request_irq() */ + +#include <bcmsdstd.h> + +struct sdos_info { + sdioh_info_t *sd; + spinlock_t lock; + wait_queue_head_t intr_wait_queue; +}; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) +#define BLOCKABLE() (!in_atomic()) +#else +#define BLOCKABLE() (!in_interrupt()) +#endif + +/* Interrupt handler */ +static irqreturn_t +sdstd_isr(int irq, void *dev_id +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) +, struct pt_regs *ptregs +#endif +) +{ + sdioh_info_t *sd; + struct sdos_info *sdos; + bool ours; + + sd = (sdioh_info_t *)dev_id; + + if (!sd->card_init_done) { + sd_err(("%s: Hey Bogus intr...not even initted: irq %d\n", __FUNCTION__, irq)); + return IRQ_RETVAL(FALSE); + } else { + ours = check_client_intr(sd); + + /* For local interrupts, wake the waiting process */ + if (ours && sd->got_hcint) { + sd_trace(("INTR->WAKE\n")); + sdos = (struct sdos_info *)sd->sdos_info; + wake_up_interruptible(&sdos->intr_wait_queue); + } + return IRQ_RETVAL(ours); + } +} + +/* Register with Linux for interrupts */ +int +sdstd_register_irq(sdioh_info_t *sd, uint irq) +{ + sd_trace(("Entering %s: irq == %d\n", __FUNCTION__, irq)); + if (request_irq(irq, sdstd_isr, IRQF_SHARED, "bcmsdstd", sd) < 0) { + sd_err(("%s: request_irq() failed\n", __FUNCTION__)); + return ERROR; + } + return SUCCESS; +} + +/* Free Linux irq */ +void +sdstd_free_irq(uint irq, sdioh_info_t *sd) +{ + free_irq(irq, sd); +} + +/* Map Host controller registers */ + +uint32 * +sdstd_reg_map(osl_t *osh, int32 addr, int size) +{ + return (uint32 *)REG_MAP(addr, size); +} + +void +sdstd_reg_unmap(osl_t *osh, int32 addr, int size) +{ + REG_UNMAP((void*)(uintptr)addr); +} + +int +sdstd_osinit(sdioh_info_t *sd) +{ + struct sdos_info *sdos; + + sdos = (struct sdos_info*)MALLOC(sd->osh, sizeof(struct sdos_info)); + sd->sdos_info = (void*)sdos; + if (sdos == NULL) + return BCME_NOMEM; + + sdos->sd = sd; + spin_lock_init(&sdos->lock); + init_waitqueue_head(&sdos->intr_wait_queue); + return BCME_OK; +} + +void +sdstd_osfree(sdioh_info_t *sd) +{ + struct sdos_info *sdos; + ASSERT(sd && sd->sdos_info); + + sdos = (struct sdos_info *)sd->sdos_info; + MFREE(sd->osh, sdos, sizeof(struct sdos_info)); +} + +/* Interrupt enable/disable */ +SDIOH_API_RC +sdioh_interrupt_set(sdioh_info_t *sd, bool enable) +{ + ulong flags; + struct sdos_info *sdos; + + sd_trace(("%s: %s\n", __FUNCTION__, enable ? "Enabling" : "Disabling")); + + sdos = (struct sdos_info *)sd->sdos_info; + ASSERT(sdos); + + if (!(sd->host_init_done && sd->card_init_done)) { + sd_err(("%s: Card & Host are not initted - bailing\n", __FUNCTION__)); + return SDIOH_API_RC_FAIL; + } + + if (enable && !(sd->intr_handler && sd->intr_handler_arg)) { + sd_err(("%s: no handler registered, will not enable\n", __FUNCTION__)); + return SDIOH_API_RC_FAIL; + } + + /* Ensure atomicity for enable/disable calls */ + spin_lock_irqsave(&sdos->lock, flags); + + sd->client_intr_enabled = enable; + if (enable && !sd->lockcount) + sdstd_devintr_on(sd); + else + sdstd_devintr_off(sd); + + spin_unlock_irqrestore(&sdos->lock, flags); + + return SDIOH_API_RC_SUCCESS; +} + +/* Protect against reentrancy (disable device interrupts while executing) */ +void +sdstd_lock(sdioh_info_t *sd) +{ + ulong flags; + struct sdos_info *sdos; + + sdos = (struct sdos_info *)sd->sdos_info; + ASSERT(sdos); + + sd_trace(("%s: %d\n", __FUNCTION__, sd->lockcount)); + + spin_lock_irqsave(&sdos->lock, flags); + if (sd->lockcount) { + sd_err(("%s: Already locked! called from %p\n", + __FUNCTION__, + __builtin_return_address(0))); + ASSERT(sd->lockcount == 0); + } + sdstd_devintr_off(sd); + sd->lockcount++; + spin_unlock_irqrestore(&sdos->lock, flags); +} + +/* Enable client interrupt */ +void +sdstd_unlock(sdioh_info_t *sd) +{ + ulong flags; + struct sdos_info *sdos; + + sd_trace(("%s: %d, %d\n", __FUNCTION__, sd->lockcount, sd->client_intr_enabled)); + ASSERT(sd->lockcount > 0); + + sdos = (struct sdos_info *)sd->sdos_info; + ASSERT(sdos); + + spin_lock_irqsave(&sdos->lock, flags); + if (--sd->lockcount == 0 && sd->client_intr_enabled) { + sdstd_devintr_on(sd); + } + spin_unlock_irqrestore(&sdos->lock, flags); +} + +uint16 +sdstd_waitbits(sdioh_info_t *sd, uint16 norm, uint16 err, bool yield) +{ + struct sdos_info *sdos; + + sdos = (struct sdos_info *)sd->sdos_info; + +#ifndef BCMSDYIELD + ASSERT(!yield); +#endif + sd_trace(("%s: int 0x%02x err 0x%02x yield %d canblock %d\n", + __FUNCTION__, norm, err, yield, BLOCKABLE())); + + /* Clear the "interrupt happened" flag and last intrstatus */ + sd->got_hcint = FALSE; + sd->last_intrstatus = 0; + +#ifdef BCMSDYIELD + if (yield && BLOCKABLE()) { + /* Enable interrupts, wait for the indication, then disable */ + sdstd_intrs_on(sd, norm, err); + wait_event_interruptible(sdos->intr_wait_queue, (sd->got_hcint)); + sdstd_intrs_off(sd, norm, err); + } else +#endif /* BCMSDYIELD */ + { + sdstd_spinbits(sd, norm, err); + } + + sd_trace(("%s: last_intrstatus 0x%04x\n", __FUNCTION__, sd->last_intrstatus)); + + return sd->last_intrstatus; +} diff --git a/drivers/net/wireless/bcm4329/bcmspibrcm.c b/drivers/net/wireless/bcm4329/bcmspibrcm.c new file mode 100644 index 000000000000..0f131a40f4b8 --- /dev/null +++ b/drivers/net/wireless/bcm4329/bcmspibrcm.c @@ -0,0 +1,1726 @@ +/* + * Broadcom BCMSDH to gSPI Protocol Conversion Layer + * + * Copyright (C) 2010, Broadcom Corporation + * All Rights Reserved. + * + * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom Corporation; + * the contents of this file may not be disclosed to third parties, copied + * or duplicated in any form, in whole or in part, without the prior + * written permission of Broadcom Corporation. + * + * $Id: bcmspibrcm.c,v 1.11.2.10.2.9.6.11 2009/05/21 13:21:57 Exp $ + */ + +#define HSMODE + +#include <typedefs.h> + +#include <bcmdevs.h> +#include <bcmendian.h> +#include <bcmutils.h> +#include <osl.h> +#include <hndsoc.h> +#include <siutils.h> +#include <sbchipc.h> +#include <sbsdio.h> +#include <spid.h> + +#include <bcmsdbus.h> /* bcmsdh to/from specific controller APIs */ +#include <sdiovar.h> /* ioctl/iovars */ +#include <sdio.h> + +#include <pcicfg.h> + + +#include <bcmspibrcm.h> +#include <bcmspi.h> + +#define F0_RESPONSE_DELAY 16 +#define F1_RESPONSE_DELAY 16 +#define F2_RESPONSE_DELAY F0_RESPONSE_DELAY + +#define CMDLEN 4 + +#define DWORDMODE_ON (sd->chip == BCM4329_CHIP_ID) && (sd->chiprev == 2) && (sd->dwordmode == TRUE) + +/* Globals */ +uint sd_msglevel = 0; + +uint sd_hiok = FALSE; /* Use hi-speed mode if available? */ +uint sd_sdmode = SDIOH_MODE_SPI; /* Use SD4 mode by default */ +uint sd_f2_blocksize = 64; /* Default blocksize */ + + +uint sd_divisor = 2; +uint sd_power = 1; /* Default to SD Slot powered ON */ +uint sd_clock = 1; /* Default to SD Clock turned ON */ +uint sd_crc = 0; /* Default to SPI CRC Check turned OFF */ + +uint8 spi_outbuf[SPI_MAX_PKT_LEN]; +uint8 spi_inbuf[SPI_MAX_PKT_LEN]; + +/* 128bytes buffer is enough to clear data-not-available and program response-delay F0 bits + * assuming we will not exceed F0 response delay > 100 bytes at 48MHz. + */ +#define BUF2_PKT_LEN 128 +uint8 spi_outbuf2[BUF2_PKT_LEN]; +uint8 spi_inbuf2[BUF2_PKT_LEN]; + +/* Prototypes */ +static bool bcmspi_test_card(sdioh_info_t *sd); +static bool bcmspi_host_device_init_adapt(sdioh_info_t *sd); +static int bcmspi_set_highspeed_mode(sdioh_info_t *sd, bool hsmode); +static int bcmspi_cmd_issue(sdioh_info_t *sd, bool use_dma, uint32 cmd_arg, + uint32 *data, uint32 datalen); +static int bcmspi_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, + int regsize, uint32 *data); +static int bcmspi_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, + int regsize, uint32 data); +static int bcmspi_card_bytewrite(sdioh_info_t *sd, int func, uint32 regaddr, + uint8 *data); +static int bcmspi_driver_init(sdioh_info_t *sd); +static int bcmspi_card_buf(sdioh_info_t *sd, int rw, int func, bool fifo, + uint32 addr, int nbytes, uint32 *data); +static int bcmspi_card_regread_fixedaddr(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, + uint32 *data); +static void bcmspi_cmd_getdstatus(sdioh_info_t *sd, uint32 *dstatus_buffer); +static int bcmspi_update_stats(sdioh_info_t *sd, uint32 cmd_arg); + +/* + * Public entry points & extern's + */ +extern sdioh_info_t * +sdioh_attach(osl_t *osh, void *bar0, uint irq) +{ + sdioh_info_t *sd; + + sd_trace(("%s\n", __FUNCTION__)); + if ((sd = (sdioh_info_t *)MALLOC(osh, sizeof(sdioh_info_t))) == NULL) { + sd_err(("%s: out of memory, malloced %d bytes\n", __FUNCTION__, MALLOCED(osh))); + return NULL; + } + bzero((char *)sd, sizeof(sdioh_info_t)); + sd->osh = osh; + if (spi_osinit(sd) != 0) { + sd_err(("%s: spi_osinit() failed\n", __FUNCTION__)); + MFREE(sd->osh, sd, sizeof(sdioh_info_t)); + return NULL; + } + + sd->bar0 = bar0; + sd->irq = irq; + sd->intr_handler = NULL; + sd->intr_handler_arg = NULL; + sd->intr_handler_valid = FALSE; + + /* Set defaults */ + sd->use_client_ints = TRUE; + sd->sd_use_dma = FALSE; /* DMA Not supported */ + + /* Spi device default is 16bit mode, change to 4 when device is changed to 32bit + * mode + */ + sd->wordlen = 2; + + if (!spi_hw_attach(sd)) { + sd_err(("%s: spi_hw_attach() failed\n", __FUNCTION__)); + spi_osfree(sd); + MFREE(sd->osh, sd, sizeof(sdioh_info_t)); + return (NULL); + } + + if (bcmspi_driver_init(sd) != SUCCESS) { + sd_err(("%s: bcmspi_driver_init() failed()\n", __FUNCTION__)); + spi_hw_detach(sd); + spi_osfree(sd); + MFREE(sd->osh, sd, sizeof(sdioh_info_t)); + return (NULL); + } + + if (spi_register_irq(sd, irq) != SUCCESS) { + sd_err(("%s: spi_register_irq() failed for irq = %d\n", __FUNCTION__, irq)); + spi_hw_detach(sd); + spi_osfree(sd); + MFREE(sd->osh, sd, sizeof(sdioh_info_t)); + return (NULL); + } + + sd_trace(("%s: Done\n", __FUNCTION__)); + + return sd; +} + +extern SDIOH_API_RC +sdioh_detach(osl_t *osh, sdioh_info_t *sd) +{ + sd_trace(("%s\n", __FUNCTION__)); + if (sd) { + sd_err(("%s: detaching from hardware\n", __FUNCTION__)); + spi_free_irq(sd->irq, sd); + spi_hw_detach(sd); + spi_osfree(sd); + MFREE(sd->osh, sd, sizeof(sdioh_info_t)); + } + return SDIOH_API_RC_SUCCESS; +} + +/* Configure callback to client when we recieve client interrupt */ +extern SDIOH_API_RC +sdioh_interrupt_register(sdioh_info_t *sd, sdioh_cb_fn_t fn, void *argh) +{ + sd_trace(("%s: Entering\n", __FUNCTION__)); + sd->intr_handler = fn; + sd->intr_handler_arg = argh; + sd->intr_handler_valid = TRUE; + return SDIOH_API_RC_SUCCESS; +} + +extern SDIOH_API_RC +sdioh_interrupt_deregister(sdioh_info_t *sd) +{ + sd_trace(("%s: Entering\n", __FUNCTION__)); + sd->intr_handler_valid = FALSE; + sd->intr_handler = NULL; + sd->intr_handler_arg = NULL; + return SDIOH_API_RC_SUCCESS; +} + +extern SDIOH_API_RC +sdioh_interrupt_query(sdioh_info_t *sd, bool *onoff) +{ + sd_trace(("%s: Entering\n", __FUNCTION__)); + *onoff = sd->client_intr_enabled; + return SDIOH_API_RC_SUCCESS; +} + +#if defined(DHD_DEBUG) +extern bool +sdioh_interrupt_pending(sdioh_info_t *sd) +{ + return 0; +} +#endif + +extern SDIOH_API_RC +sdioh_query_device(sdioh_info_t *sd) +{ + /* Return a BRCM ID appropriate to the dongle class */ + return (sd->num_funcs > 1) ? BCM4329_D11NDUAL_ID : BCM4318_D11G_ID; +} + +/* Provide dstatus bits of spi-transaction for dhd layers. */ +extern uint32 +sdioh_get_dstatus(sdioh_info_t *sd) +{ + return sd->card_dstatus; +} + +extern void +sdioh_chipinfo(sdioh_info_t *sd, uint32 chip, uint32 chiprev) +{ + sd->chip = chip; + sd->chiprev = chiprev; +} + +extern void +sdioh_dwordmode(sdioh_info_t *sd, bool set) +{ + uint8 reg = 0; + int status; + + if ((status = sdioh_request_byte(sd, SDIOH_READ, SPI_FUNC_0, SPID_STATUS_ENABLE, ®)) != + SUCCESS) { + sd_err(("%s: Failed to set dwordmode in gSPI\n", __FUNCTION__)); + return; + } + + if (set) { + reg |= DWORD_PKT_LEN_EN; + sd->dwordmode = TRUE; + sd->client_block_size[SPI_FUNC_2] = 4096; /* h2spi's limit is 4KB, we support 8KB */ + } else { + reg &= ~DWORD_PKT_LEN_EN; + sd->dwordmode = FALSE; + sd->client_block_size[SPI_FUNC_2] = 2048; + } + + if ((status = sdioh_request_byte(sd, SDIOH_WRITE, SPI_FUNC_0, SPID_STATUS_ENABLE, ®)) != + SUCCESS) { + sd_err(("%s: Failed to set dwordmode in gSPI\n", __FUNCTION__)); + return; + } +} + + +uint +sdioh_query_iofnum(sdioh_info_t *sd) +{ + return sd->num_funcs; +} + +/* IOVar table */ +enum { + IOV_MSGLEVEL = 1, + IOV_BLOCKMODE, + IOV_BLOCKSIZE, + IOV_DMA, + IOV_USEINTS, + IOV_NUMINTS, + IOV_NUMLOCALINTS, + IOV_HOSTREG, + IOV_DEVREG, + IOV_DIVISOR, + IOV_SDMODE, + IOV_HISPEED, + IOV_HCIREGS, + IOV_POWER, + IOV_CLOCK, + IOV_SPIERRSTATS, + IOV_RESP_DELAY_ALL +}; + +const bcm_iovar_t sdioh_iovars[] = { + {"sd_msglevel", IOV_MSGLEVEL, 0, IOVT_UINT32, 0 }, + {"sd_blocksize", IOV_BLOCKSIZE, 0, IOVT_UINT32, 0 }, /* ((fn << 16) | size) */ + {"sd_dma", IOV_DMA, 0, IOVT_BOOL, 0 }, + {"sd_ints", IOV_USEINTS, 0, IOVT_BOOL, 0 }, + {"sd_numints", IOV_NUMINTS, 0, IOVT_UINT32, 0 }, + {"sd_numlocalints", IOV_NUMLOCALINTS, 0, IOVT_UINT32, 0 }, + {"sd_hostreg", IOV_HOSTREG, 0, IOVT_BUFFER, sizeof(sdreg_t) }, + {"sd_devreg", IOV_DEVREG, 0, IOVT_BUFFER, sizeof(sdreg_t) }, + {"sd_divisor", IOV_DIVISOR, 0, IOVT_UINT32, 0 }, + {"sd_power", IOV_POWER, 0, IOVT_UINT32, 0 }, + {"sd_clock", IOV_CLOCK, 0, IOVT_UINT32, 0 }, + {"sd_mode", IOV_SDMODE, 0, IOVT_UINT32, 100}, + {"sd_highspeed", IOV_HISPEED, 0, IOVT_UINT32, 0}, + {"spi_errstats", IOV_SPIERRSTATS, 0, IOVT_BUFFER, sizeof(struct spierrstats_t) }, + {"spi_respdelay", IOV_RESP_DELAY_ALL, 0, IOVT_BOOL, 0 }, + {NULL, 0, 0, 0, 0 } +}; + +int +sdioh_iovar_op(sdioh_info_t *si, const char *name, + void *params, int plen, void *arg, int len, bool set) +{ + const bcm_iovar_t *vi = NULL; + int bcmerror = 0; + int val_size; + int32 int_val = 0; + bool bool_val; + uint32 actionid; +/* + sdioh_regs_t *regs; +*/ + + ASSERT(name); + ASSERT(len >= 0); + + /* Get must have return space; Set does not take qualifiers */ + ASSERT(set || (arg && len)); + ASSERT(!set || (!params && !plen)); + + sd_trace(("%s: Enter (%s %s)\n", __FUNCTION__, (set ? "set" : "get"), name)); + + if ((vi = bcm_iovar_lookup(sdioh_iovars, name)) == NULL) { + bcmerror = BCME_UNSUPPORTED; + goto exit; + } + + if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, set)) != 0) + goto exit; + + /* Set up params so get and set can share the convenience variables */ + if (params == NULL) { + params = arg; + plen = len; + } + + if (vi->type == IOVT_VOID) + val_size = 0; + else if (vi->type == IOVT_BUFFER) + val_size = len; + else + val_size = sizeof(int); + + if (plen >= (int)sizeof(int_val)) + bcopy(params, &int_val, sizeof(int_val)); + + bool_val = (int_val != 0) ? TRUE : FALSE; + + actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid); + switch (actionid) { + case IOV_GVAL(IOV_MSGLEVEL): + int_val = (int32)sd_msglevel; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_MSGLEVEL): + sd_msglevel = int_val; + break; + + case IOV_GVAL(IOV_BLOCKSIZE): + if ((uint32)int_val > si->num_funcs) { + bcmerror = BCME_BADARG; + break; + } + int_val = (int32)si->client_block_size[int_val]; + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_DMA): + int_val = (int32)si->sd_use_dma; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_DMA): + si->sd_use_dma = (bool)int_val; + break; + + case IOV_GVAL(IOV_USEINTS): + int_val = (int32)si->use_client_ints; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_USEINTS): + break; + + case IOV_GVAL(IOV_DIVISOR): + int_val = (uint32)sd_divisor; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_DIVISOR): + sd_divisor = int_val; + if (!spi_start_clock(si, (uint16)sd_divisor)) { + sd_err(("%s: set clock failed\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + } + break; + + case IOV_GVAL(IOV_POWER): + int_val = (uint32)sd_power; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_POWER): + sd_power = int_val; + break; + + case IOV_GVAL(IOV_CLOCK): + int_val = (uint32)sd_clock; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_CLOCK): + sd_clock = int_val; + break; + + case IOV_GVAL(IOV_SDMODE): + int_val = (uint32)sd_sdmode; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_SDMODE): + sd_sdmode = int_val; + break; + + case IOV_GVAL(IOV_HISPEED): + int_val = (uint32)sd_hiok; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_HISPEED): + sd_hiok = int_val; + + if (!bcmspi_set_highspeed_mode(si, (bool)sd_hiok)) { + sd_err(("%s: Failed changing highspeed mode to %d.\n", + __FUNCTION__, sd_hiok)); + bcmerror = BCME_ERROR; + return ERROR; + } + break; + + case IOV_GVAL(IOV_NUMINTS): + int_val = (int32)si->intrcount; + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_NUMLOCALINTS): + int_val = (int32)si->local_intrcount; + bcopy(&int_val, arg, val_size); + break; + case IOV_GVAL(IOV_DEVREG): + { + sdreg_t *sd_ptr = (sdreg_t *)params; + uint8 data; + + if (sdioh_cfg_read(si, sd_ptr->func, sd_ptr->offset, &data)) { + bcmerror = BCME_SDIO_ERROR; + break; + } + + int_val = (int)data; + bcopy(&int_val, arg, sizeof(int_val)); + break; + } + + case IOV_SVAL(IOV_DEVREG): + { + sdreg_t *sd_ptr = (sdreg_t *)params; + uint8 data = (uint8)sd_ptr->value; + + if (sdioh_cfg_write(si, sd_ptr->func, sd_ptr->offset, &data)) { + bcmerror = BCME_SDIO_ERROR; + break; + } + break; + } + + + case IOV_GVAL(IOV_SPIERRSTATS): + { + bcopy(&si->spierrstats, arg, sizeof(struct spierrstats_t)); + break; + } + + case IOV_SVAL(IOV_SPIERRSTATS): + { + bzero(&si->spierrstats, sizeof(struct spierrstats_t)); + break; + } + + case IOV_GVAL(IOV_RESP_DELAY_ALL): + int_val = (int32)si->resp_delay_all; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_RESP_DELAY_ALL): + si->resp_delay_all = (bool)int_val; + int_val = STATUS_ENABLE|INTR_WITH_STATUS; + if (si->resp_delay_all) + int_val |= RESP_DELAY_ALL; + else { + if (bcmspi_card_regwrite(si, SPI_FUNC_0, SPID_RESPONSE_DELAY, 1, + F1_RESPONSE_DELAY) != SUCCESS) { + sd_err(("%s: Unable to set response delay.\n", __FUNCTION__)); + bcmerror = BCME_SDIO_ERROR; + break; + } + } + + if (bcmspi_card_regwrite(si, SPI_FUNC_0, SPID_STATUS_ENABLE, 1, int_val) + != SUCCESS) { + sd_err(("%s: Unable to set response delay.\n", __FUNCTION__)); + bcmerror = BCME_SDIO_ERROR; + break; + } + break; + + default: + bcmerror = BCME_UNSUPPORTED; + break; + } +exit: + + return bcmerror; +} + +extern SDIOH_API_RC +sdioh_cfg_read(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data) +{ + SDIOH_API_RC status; + /* No lock needed since sdioh_request_byte does locking */ + status = sdioh_request_byte(sd, SDIOH_READ, fnc_num, addr, data); + return status; +} + +extern SDIOH_API_RC +sdioh_cfg_write(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data) +{ + /* No lock needed since sdioh_request_byte does locking */ + SDIOH_API_RC status; + + if ((fnc_num == SPI_FUNC_1) && (addr == SBSDIO_FUNC1_FRAMECTRL)) { + uint8 dummy_data; + status = sdioh_cfg_read(sd, fnc_num, addr, &dummy_data); + if (status) { + sd_err(("sdioh_cfg_read() failed.\n")); + return status; + } + } + + status = sdioh_request_byte(sd, SDIOH_WRITE, fnc_num, addr, data); + return status; +} + +extern SDIOH_API_RC +sdioh_cis_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 length) +{ + uint32 count; + int offset; + uint32 cis_byte; + uint16 *cis = (uint16 *)cisd; + uint bar0 = SI_ENUM_BASE; + int status; + uint8 data; + + sd_trace(("%s: Func %d\n", __FUNCTION__, func)); + + spi_lock(sd); + + /* Set sb window address to 0x18000000 */ + data = (bar0 >> 8) & SBSDIO_SBADDRLOW_MASK; + status = bcmspi_card_bytewrite(sd, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW, &data); + if (status == SUCCESS) { + data = (bar0 >> 16) & SBSDIO_SBADDRMID_MASK; + status = bcmspi_card_bytewrite(sd, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRMID, &data); + } else { + sd_err(("%s: Unable to set sb-addr-windows\n", __FUNCTION__)); + spi_unlock(sd); + return (BCME_ERROR); + } + if (status == SUCCESS) { + data = (bar0 >> 24) & SBSDIO_SBADDRHIGH_MASK; + status = bcmspi_card_bytewrite(sd, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRHIGH, &data); + } else { + sd_err(("%s: Unable to set sb-addr-windows\n", __FUNCTION__)); + spi_unlock(sd); + return (BCME_ERROR); + } + + offset = CC_OTP; /* OTP offset in chipcommon. */ + for (count = 0; count < length/2; count++) { + if (bcmspi_card_regread (sd, SDIO_FUNC_1, offset, 2, &cis_byte) < 0) { + sd_err(("%s: regread failed: Can't read CIS\n", __FUNCTION__)); + spi_unlock(sd); + return (BCME_ERROR); + } + + *cis = (uint16)cis_byte; + cis++; + offset += 2; + } + + spi_unlock(sd); + + return (BCME_OK); +} + +extern SDIOH_API_RC +sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr, uint8 *byte) +{ + int status; + uint32 cmd_arg; + uint32 dstatus; + uint32 data = (uint32)(*byte); + + spi_lock(sd); + + cmd_arg = 0; + cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func); + cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1); /* Incremental access */ + cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr); + cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, rw == SDIOH_READ ? 0 : 1); + cmd_arg = SFIELD(cmd_arg, SPI_LEN, 1); + + sd_trace(("%s cmd_arg = 0x%x\n", __FUNCTION__, cmd_arg)); + sd_trace(("%s: rw=%d, func=%d, regaddr=0x%08x, data=0x%x\n", __FUNCTION__, rw, func, + regaddr, data)); + + if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, + cmd_arg, &data, 1)) != SUCCESS) { + spi_unlock(sd); + return status; + } + + if (rw == SDIOH_READ) + *byte = (uint8)data; + + bcmspi_cmd_getdstatus(sd, &dstatus); + if (dstatus) + sd_trace(("dstatus =0x%x\n", dstatus)); + + spi_unlock(sd); + return SDIOH_API_RC_SUCCESS; +} + +extern SDIOH_API_RC +sdioh_request_word(sdioh_info_t *sd, uint cmd_type, uint rw, uint func, uint addr, + uint32 *word, uint nbytes) +{ + int status; + + spi_lock(sd); + + if (rw == SDIOH_READ) + status = bcmspi_card_regread(sd, func, addr, nbytes, word); + else + status = bcmspi_card_regwrite(sd, func, addr, nbytes, *word); + + spi_unlock(sd); + return (status == SUCCESS ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL); +} + +extern SDIOH_API_RC +sdioh_request_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint rw, uint func, + uint addr, uint reg_width, uint buflen_u, uint8 *buffer, void *pkt) +{ + int len; + int buflen = (int)buflen_u; + bool fifo = (fix_inc == SDIOH_DATA_FIX); + + spi_lock(sd); + + ASSERT(reg_width == 4); + ASSERT(buflen_u < (1 << 30)); + ASSERT(sd->client_block_size[func]); + + sd_data(("%s: %c len %d r_cnt %d t_cnt %d, pkt @0x%p\n", + __FUNCTION__, rw == SDIOH_READ ? 'R' : 'W', + buflen_u, sd->r_cnt, sd->t_cnt, pkt)); + + /* Break buffer down into blocksize chunks. */ + while (buflen > 0) { + len = MIN(sd->client_block_size[func], buflen); + if (bcmspi_card_buf(sd, rw, func, fifo, addr, len, (uint32 *)buffer) != SUCCESS) { + sd_err(("%s: bcmspi_card_buf %s failed\n", + __FUNCTION__, rw == SDIOH_READ ? "Read" : "Write")); + spi_unlock(sd); + return SDIOH_API_RC_FAIL; + } + buffer += len; + buflen -= len; + if (!fifo) + addr += len; + } + spi_unlock(sd); + return SDIOH_API_RC_SUCCESS; +} + +/* This function allows write to gspi bus when another rd/wr function is deep down the call stack. + * Its main aim is to have simpler spi writes rather than recursive writes. + * e.g. When there is a need to program response delay on the fly after detecting the SPI-func + * this call will allow to program the response delay. + */ +static int +bcmspi_card_byterewrite(sdioh_info_t *sd, int func, uint32 regaddr, uint8 byte) +{ + uint32 cmd_arg; + uint32 datalen = 1; + uint32 hostlen; + + cmd_arg = 0; + + cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 1); + cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1); /* Incremental access */ + cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func); + cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr); + cmd_arg = SFIELD(cmd_arg, SPI_LEN, datalen); + + sd_trace(("%s cmd_arg = 0x%x\n", __FUNCTION__, cmd_arg)); + + + /* Set up and issue the SPI command. MSByte goes out on bus first. Increase datalen + * according to the wordlen mode(16/32bit) the device is in. + */ + ASSERT(sd->wordlen == 4 || sd->wordlen == 2); + datalen = ROUNDUP(datalen, sd->wordlen); + + /* Start by copying command in the spi-outbuffer */ + if (sd->wordlen == 4) { /* 32bit spid */ + *(uint32 *)spi_outbuf2 = bcmswap32(cmd_arg); + if (datalen & 0x3) + datalen += (4 - (datalen & 0x3)); + } else if (sd->wordlen == 2) { /* 16bit spid */ + *(uint16 *)spi_outbuf2 = bcmswap16(cmd_arg & 0xffff); + *(uint16 *)&spi_outbuf2[2] = bcmswap16((cmd_arg & 0xffff0000) >> 16); + if (datalen & 0x1) + datalen++; + } else { + sd_err(("%s: Host is %d bit spid, could not create SPI command.\n", + __FUNCTION__, 8 * sd->wordlen)); + return ERROR; + } + + /* for Write, put the data into the output buffer */ + if (datalen != 0) { + if (sd->wordlen == 4) { /* 32bit spid */ + *(uint32 *)&spi_outbuf2[CMDLEN] = bcmswap32(byte); + } else if (sd->wordlen == 2) { /* 16bit spid */ + *(uint16 *)&spi_outbuf2[CMDLEN] = bcmswap16(byte & 0xffff); + *(uint16 *)&spi_outbuf2[CMDLEN + 2] = + bcmswap16((byte & 0xffff0000) >> 16); + } + } + + /* +4 for cmd, +4 for dstatus */ + hostlen = datalen + 8; + hostlen += (4 - (hostlen & 0x3)); + spi_sendrecv(sd, spi_outbuf2, spi_inbuf2, hostlen); + + /* Last 4bytes are dstatus. Device is configured to return status bits. */ + if (sd->wordlen == 4) { /* 32bit spid */ + sd->card_dstatus = bcmswap32(*(uint32 *)&spi_inbuf2[datalen + CMDLEN ]); + } else if (sd->wordlen == 2) { /* 16bit spid */ + sd->card_dstatus = (bcmswap16(*(uint16 *)&spi_inbuf2[datalen + CMDLEN ]) | + (bcmswap16(*(uint16 *)&spi_inbuf2[datalen + CMDLEN + 2]) << 16)); + } else { + sd_err(("%s: Host is %d bit machine, could not read SPI dstatus.\n", + __FUNCTION__, 8 * sd->wordlen)); + return ERROR; + } + + if (sd->card_dstatus) + sd_trace(("dstatus after byte rewrite = 0x%x\n", sd->card_dstatus)); + + return (BCME_OK); +} + +/* Program the response delay corresponding to the spi function */ +static int +bcmspi_prog_resp_delay(sdioh_info_t *sd, int func, uint8 resp_delay) +{ + if (sd->resp_delay_all == FALSE) + return (BCME_OK); + + if (sd->prev_fun == func) + return (BCME_OK); + + if (F0_RESPONSE_DELAY == F1_RESPONSE_DELAY) + return (BCME_OK); + + bcmspi_card_byterewrite(sd, SPI_FUNC_0, SPID_RESPONSE_DELAY, resp_delay); + + /* Remember function for which to avoid reprogramming resp-delay in next iteration */ + sd->prev_fun = func; + + return (BCME_OK); + +} + +#define GSPI_RESYNC_PATTERN 0x0 + +/* A resync pattern is a 32bit MOSI line with all zeros. Its a special command in gSPI. + * It resets the spi-bkplane logic so that all F1 related ping-pong buffer logic is + * synchronised and all queued resuests are cancelled. + */ +static int +bcmspi_resync_f1(sdioh_info_t *sd) +{ + uint32 cmd_arg = GSPI_RESYNC_PATTERN, data = 0, datalen = 0; + + + /* Set up and issue the SPI command. MSByte goes out on bus first. Increase datalen + * according to the wordlen mode(16/32bit) the device is in. + */ + ASSERT(sd->wordlen == 4 || sd->wordlen == 2); + datalen = ROUNDUP(datalen, sd->wordlen); + + /* Start by copying command in the spi-outbuffer */ + *(uint32 *)spi_outbuf2 = cmd_arg; + + /* for Write, put the data into the output buffer */ + *(uint32 *)&spi_outbuf2[CMDLEN] = data; + + /* +4 for cmd, +4 for dstatus */ + spi_sendrecv(sd, spi_outbuf2, spi_inbuf2, datalen + 8); + + /* Last 4bytes are dstatus. Device is configured to return status bits. */ + if (sd->wordlen == 4) { /* 32bit spid */ + sd->card_dstatus = bcmswap32(*(uint32 *)&spi_inbuf2[datalen + CMDLEN ]); + } else if (sd->wordlen == 2) { /* 16bit spid */ + sd->card_dstatus = (bcmswap16(*(uint16 *)&spi_inbuf2[datalen + CMDLEN ]) | + (bcmswap16(*(uint16 *)&spi_inbuf2[datalen + CMDLEN + 2]) << 16)); + } else { + sd_err(("%s: Host is %d bit machine, could not read SPI dstatus.\n", + __FUNCTION__, 8 * sd->wordlen)); + return ERROR; + } + + if (sd->card_dstatus) + sd_trace(("dstatus after resync pattern write = 0x%x\n", sd->card_dstatus)); + + return (BCME_OK); +} + +uint32 dstatus_count = 0; + +static int +bcmspi_update_stats(sdioh_info_t *sd, uint32 cmd_arg) +{ + uint32 dstatus = sd->card_dstatus; + struct spierrstats_t *spierrstats = &sd->spierrstats; + int err = SUCCESS; + + sd_trace(("cmd = 0x%x, dstatus = 0x%x\n", cmd_arg, dstatus)); + + /* Store dstatus of last few gSPI transactions */ + spierrstats->dstatus[dstatus_count % NUM_PREV_TRANSACTIONS] = dstatus; + spierrstats->spicmd[dstatus_count % NUM_PREV_TRANSACTIONS] = cmd_arg; + dstatus_count++; + + if (sd->card_init_done == FALSE) + return err; + + if (dstatus & STATUS_DATA_NOT_AVAILABLE) { + spierrstats->dna++; + sd_trace(("Read data not available on F1 addr = 0x%x\n", + GFIELD(cmd_arg, SPI_REG_ADDR))); + /* Clear dna bit */ + bcmspi_card_byterewrite(sd, SPI_FUNC_0, SPID_INTR_REG, DATA_UNAVAILABLE); + } + + if (dstatus & STATUS_UNDERFLOW) { + spierrstats->rdunderflow++; + sd_err(("FIFO underflow happened due to current F2 read command.\n")); + } + + if (dstatus & STATUS_OVERFLOW) { + spierrstats->wroverflow++; + sd_err(("FIFO overflow happened due to current (F1/F2) write command.\n")); + if ((sd->chip == BCM4329_CHIP_ID) && (sd->chiprev == 0)) { + bcmspi_card_byterewrite(sd, SPI_FUNC_0, SPID_INTR_REG, F1_OVERFLOW); + bcmspi_resync_f1(sd); + sd_err(("Recovering from F1 FIFO overflow.\n")); + } else { + err = ERROR_OF; + } + } + + if (dstatus & STATUS_F2_INTR) { + spierrstats->f2interrupt++; + sd_trace(("Interrupt from F2. SW should clear corresponding IntStatus bits\n")); + } + + if (dstatus & STATUS_F3_INTR) { + spierrstats->f3interrupt++; + sd_err(("Interrupt from F3. SW should clear corresponding IntStatus bits\n")); + } + + if (dstatus & STATUS_HOST_CMD_DATA_ERR) { + spierrstats->hostcmddataerr++; + sd_err(("Error in CMD or Host data, detected by CRC/Checksum (optional)\n")); + } + + if (dstatus & STATUS_F2_PKT_AVAILABLE) { + spierrstats->f2pktavailable++; + sd_trace(("Packet is available/ready in F2 TX FIFO\n")); + sd_trace(("Packet length = %d\n", sd->dwordmode ? + ((dstatus & STATUS_F2_PKT_LEN_MASK) >> (STATUS_F2_PKT_LEN_SHIFT - 2)) : + ((dstatus & STATUS_F2_PKT_LEN_MASK) >> STATUS_F2_PKT_LEN_SHIFT))); + } + + if (dstatus & STATUS_F3_PKT_AVAILABLE) { + spierrstats->f3pktavailable++; + sd_err(("Packet is available/ready in F3 TX FIFO\n")); + sd_err(("Packet length = %d\n", + (dstatus & STATUS_F3_PKT_LEN_MASK) >> STATUS_F3_PKT_LEN_SHIFT)); + } + + return err; +} + +extern int +sdioh_abort(sdioh_info_t *sd, uint func) +{ + return 0; +} + +int +sdioh_start(sdioh_info_t *sd, int stage) +{ + return SUCCESS; +} + +int +sdioh_stop(sdioh_info_t *sd) +{ + return SUCCESS; +} + + + +/* + * Private/Static work routines + */ +static int +bcmspi_host_init(sdioh_info_t *sd) +{ + + /* Default power on mode */ + sd->sd_mode = SDIOH_MODE_SPI; + sd->polled_mode = TRUE; + sd->host_init_done = TRUE; + sd->card_init_done = FALSE; + sd->adapter_slot = 1; + + return (SUCCESS); +} + +static int +get_client_blocksize(sdioh_info_t *sd) +{ + uint32 regdata[2]; + int status; + + /* Find F1/F2/F3 max packet size */ + if ((status = bcmspi_card_regread(sd, 0, SPID_F1_INFO_REG, + 8, regdata)) != SUCCESS) { + return status; + } + + sd_trace(("pkt_size regdata[0] = 0x%x, regdata[1] = 0x%x\n", + regdata[0], regdata[1])); + + sd->client_block_size[1] = (regdata[0] & F1_MAX_PKT_SIZE) >> 2; + sd_trace(("Func1 blocksize = %d\n", sd->client_block_size[1])); + ASSERT(sd->client_block_size[1] == BLOCK_SIZE_F1); + + sd->client_block_size[2] = ((regdata[0] >> 16) & F2_MAX_PKT_SIZE) >> 2; + sd_trace(("Func2 blocksize = %d\n", sd->client_block_size[2])); + ASSERT(sd->client_block_size[2] == BLOCK_SIZE_F2); + + sd->client_block_size[3] = (regdata[1] & F3_MAX_PKT_SIZE) >> 2; + sd_trace(("Func3 blocksize = %d\n", sd->client_block_size[3])); + ASSERT(sd->client_block_size[3] == BLOCK_SIZE_F3); + + return 0; +} + +static int +bcmspi_client_init(sdioh_info_t *sd) +{ + uint32 status_en_reg = 0; + sd_trace(("%s: Powering up slot %d\n", __FUNCTION__, sd->adapter_slot)); + +#ifdef HSMODE + if (!spi_start_clock(sd, (uint16)sd_divisor)) { + sd_err(("spi_start_clock failed\n")); + return ERROR; + } +#else + /* Start at ~400KHz clock rate for initialization */ + if (!spi_start_clock(sd, 128)) { + sd_err(("spi_start_clock failed\n")); + return ERROR; + } +#endif /* HSMODE */ + + if (!bcmspi_host_device_init_adapt(sd)) { + sd_err(("bcmspi_host_device_init_adapt failed\n")); + return ERROR; + } + + if (!bcmspi_test_card(sd)) { + sd_err(("bcmspi_test_card failed\n")); + return ERROR; + } + + sd->num_funcs = SPI_MAX_IOFUNCS; + + get_client_blocksize(sd); + + /* Apply resync pattern cmd with all zeros to reset spi-bkplane F1 logic */ + bcmspi_resync_f1(sd); + + sd->dwordmode = FALSE; + + bcmspi_card_regread(sd, 0, SPID_STATUS_ENABLE, 1, &status_en_reg); + + sd_trace(("%s: Enabling interrupt with dstatus \n", __FUNCTION__)); + status_en_reg |= INTR_WITH_STATUS; + + + if (bcmspi_card_regwrite(sd, SPI_FUNC_0, SPID_STATUS_ENABLE, 1, + status_en_reg & 0xff) != SUCCESS) { + sd_err(("%s: Unable to set response delay for all fun's.\n", __FUNCTION__)); + return ERROR; + } + + +#ifndef HSMODE + /* After configuring for High-Speed mode, set the desired clock rate. */ + if (!spi_start_clock(sd, 4)) { + sd_err(("spi_start_clock failed\n")); + return ERROR; + } +#endif /* HSMODE */ + + sd->card_init_done = TRUE; + + + return SUCCESS; +} + +static int +bcmspi_set_highspeed_mode(sdioh_info_t *sd, bool hsmode) +{ + uint32 regdata; + int status; + + if ((status = bcmspi_card_regread(sd, 0, SPID_CONFIG, + 4, ®data)) != SUCCESS) + return status; + + sd_trace(("In %s spih-ctrl = 0x%x \n", __FUNCTION__, regdata)); + + + if (hsmode == TRUE) { + sd_trace(("Attempting to enable High-Speed mode.\n")); + + if (regdata & HIGH_SPEED_MODE) { + sd_trace(("Device is already in High-Speed mode.\n")); + return status; + } else { + regdata |= HIGH_SPEED_MODE; + sd_trace(("Writing %08x to device at %08x\n", regdata, SPID_CONFIG)); + if ((status = bcmspi_card_regwrite(sd, 0, SPID_CONFIG, + 4, regdata)) != SUCCESS) { + return status; + } + } + } else { + sd_trace(("Attempting to disable High-Speed mode.\n")); + + if (regdata & HIGH_SPEED_MODE) { + regdata &= ~HIGH_SPEED_MODE; + sd_trace(("Writing %08x to device at %08x\n", regdata, SPID_CONFIG)); + if ((status = bcmspi_card_regwrite(sd, 0, SPID_CONFIG, + 4, regdata)) != SUCCESS) + return status; + } + else { + sd_trace(("Device is already in Low-Speed mode.\n")); + return status; + } + } + + spi_controller_highspeed_mode(sd, hsmode); + + return TRUE; +} + +#define bcmspi_find_curr_mode(sd) { \ + sd->wordlen = 2; \ + status = bcmspi_card_regread_fixedaddr(sd, 0, SPID_TEST_READ, 4, ®data); \ + regdata &= 0xff; \ + if ((regdata == 0xad) || (regdata == 0x5b) || \ + (regdata == 0x5d) || (regdata == 0x5a)) \ + break; \ + sd->wordlen = 4; \ + status = bcmspi_card_regread_fixedaddr(sd, 0, SPID_TEST_READ, 4, ®data); \ + regdata &= 0xff; \ + if ((regdata == 0xad) || (regdata == 0x5b) || \ + (regdata == 0x5d) || (regdata == 0x5a)) \ + break; \ + sd_trace(("Silicon testability issue: regdata = 0x%x." \ + " Expected 0xad, 0x5a, 0x5b or 0x5d.\n", regdata)); \ + OSL_DELAY(100000); \ +} + +#define INIT_ADAPT_LOOP 100 + +/* Adapt clock-phase-speed-bitwidth between host and device */ +static bool +bcmspi_host_device_init_adapt(sdioh_info_t *sd) +{ + uint32 wrregdata, regdata = 0; + int status; + int i; + + /* Due to a silicon testability issue, the first command from the Host + * to the device will get corrupted (first bit will be lost). So the + * Host should poll the device with a safe read request. ie: The Host + * should try to read F0 addr 0x14 using the Fixed address mode + * (This will prevent a unintended write command to be detected by device) + */ + for (i = 0; i < INIT_ADAPT_LOOP; i++) { + /* If device was not power-cycled it will stay in 32bit mode with + * response-delay-all bit set. Alternate the iteration so that + * read either with or without response-delay for F0 to succeed. + */ + bcmspi_find_curr_mode(sd); + sd->resp_delay_all = (i & 0x1) ? TRUE : FALSE; + + bcmspi_find_curr_mode(sd); + sd->dwordmode = TRUE; + + bcmspi_find_curr_mode(sd); + sd->dwordmode = FALSE; + } + + /* Bail out, device not detected */ + if (i == INIT_ADAPT_LOOP) + return FALSE; + + /* Softreset the spid logic */ + if ((sd->dwordmode) || (sd->wordlen == 4)) { + bcmspi_card_regwrite(sd, 0, SPID_RESET_BP, 1, RESET_ON_WLAN_BP_RESET|RESET_SPI); + bcmspi_card_regread(sd, 0, SPID_RESET_BP, 1, ®data); + sd_trace(("reset reg read = 0x%x\n", regdata)); + sd_trace(("dwordmode = %d, wordlen = %d, resp_delay_all = %d\n", sd->dwordmode, + sd->wordlen, sd->resp_delay_all)); + /* Restore default state after softreset */ + sd->wordlen = 2; + sd->dwordmode = FALSE; + } + + if (sd->wordlen == 4) { + if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4, ®data)) != + SUCCESS) + return FALSE; + if (regdata == TEST_RO_DATA_32BIT_LE) { + sd_trace(("Spid is already in 32bit LE mode. Value read = 0x%x\n", + regdata)); + sd_trace(("Spid power was left on.\n")); + } else { + sd_err(("Spid power was left on but signature read failed." + " Value read = 0x%x\n", regdata)); + return FALSE; + } + } else { + sd->wordlen = 2; + +#define CTRL_REG_DEFAULT 0x00010430 /* according to the host m/c */ + + wrregdata = (CTRL_REG_DEFAULT); + sd->resp_delay_all = TRUE; + if (sd->resp_delay_all == TRUE) { + /* Enable response delay for all */ + wrregdata |= (RESP_DELAY_ALL << 16); + /* Program response delay value */ + wrregdata &= 0xffff00ff; + wrregdata |= (F1_RESPONSE_DELAY << 8); + sd->prev_fun = SPI_FUNC_1; + bcmspi_card_regwrite(sd, 0, SPID_CONFIG, 4, wrregdata); + } + + if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4, ®data)) != SUCCESS) + return FALSE; + sd_trace(("(we are still in 16bit mode) 32bit READ LE regdata = 0x%x\n", regdata)); + +#ifndef HSMODE + wrregdata |= (CLOCK_PHASE | CLOCK_POLARITY); + wrregdata &= ~HIGH_SPEED_MODE; + bcmspi_card_regwrite(sd, 0, SPID_CONFIG, 4, wrregdata); +#endif /* HSMODE */ + + for (i = 0; i < INIT_ADAPT_LOOP; i++) { + if ((regdata == 0xfdda7d5b) || (regdata == 0xfdda7d5a)) { + sd_trace(("0xfeedbead was leftshifted by 1-bit.\n")); + if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4, + ®data)) != SUCCESS) + return FALSE; + } + OSL_DELAY(1000); + } + + + /* Change to host controller intr-polarity of active-low */ + wrregdata &= ~INTR_POLARITY; + sd_trace(("(we are still in 16bit mode) 32bit Write LE reg-ctrl-data = 0x%x\n", + wrregdata)); + /* Change to 32bit mode */ + wrregdata |= WORD_LENGTH_32; + bcmspi_card_regwrite(sd, 0, SPID_CONFIG, 4, wrregdata); + + /* Change command/data packaging in 32bit LE mode */ + sd->wordlen = 4; + + if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4, ®data)) != SUCCESS) + return FALSE; + + if (regdata == TEST_RO_DATA_32BIT_LE) { + sd_trace(("Read spid passed. Value read = 0x%x\n", regdata)); + sd_trace(("Spid had power-on cycle OR spi was soft-resetted \n")); + } else { + sd_err(("Stale spid reg values read as it was kept powered. Value read =" + "0x%x\n", regdata)); + return FALSE; + } + } + + + return TRUE; +} + +static bool +bcmspi_test_card(sdioh_info_t *sd) +{ + uint32 regdata; + int status; + + if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4, ®data)) != SUCCESS) + return FALSE; + + if (regdata == (TEST_RO_DATA_32BIT_LE)) + sd_trace(("32bit LE regdata = 0x%x\n", regdata)); + else { + sd_trace(("Incorrect 32bit LE regdata = 0x%x\n", regdata)); + return FALSE; + } + + +#define RW_PATTERN1 0xA0A1A2A3 +#define RW_PATTERN2 0x4B5B6B7B + + regdata = RW_PATTERN1; + if ((status = bcmspi_card_regwrite(sd, 0, SPID_TEST_RW, 4, regdata)) != SUCCESS) + return FALSE; + regdata = 0; + if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_RW, 4, ®data)) != SUCCESS) + return FALSE; + if (regdata != RW_PATTERN1) { + sd_err(("Write-Read spid failed. Value wrote = 0x%x, Value read = 0x%x\n", + RW_PATTERN1, regdata)); + return FALSE; + } else + sd_trace(("R/W spid passed. Value read = 0x%x\n", regdata)); + + regdata = RW_PATTERN2; + if ((status = bcmspi_card_regwrite(sd, 0, SPID_TEST_RW, 4, regdata)) != SUCCESS) + return FALSE; + regdata = 0; + if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_RW, 4, ®data)) != SUCCESS) + return FALSE; + if (regdata != RW_PATTERN2) { + sd_err(("Write-Read spid failed. Value wrote = 0x%x, Value read = 0x%x\n", + RW_PATTERN2, regdata)); + return FALSE; + } else + sd_trace(("R/W spid passed. Value read = 0x%x\n", regdata)); + + return TRUE; +} + +static int +bcmspi_driver_init(sdioh_info_t *sd) +{ + sd_trace(("%s\n", __FUNCTION__)); + if ((bcmspi_host_init(sd)) != SUCCESS) { + return ERROR; + } + + if (bcmspi_client_init(sd) != SUCCESS) { + return ERROR; + } + + return SUCCESS; +} + +/* Read device reg */ +static int +bcmspi_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data) +{ + int status; + uint32 cmd_arg, dstatus; + + ASSERT(regsize); + + if (func == 2) + sd_trace(("Reg access on F2 will generate error indication in dstatus bits.\n")); + + cmd_arg = 0; + cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 0); + cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1); /* Incremental access */ + cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func); + cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr); + cmd_arg = SFIELD(cmd_arg, SPI_LEN, regsize == BLOCK_SIZE_F2 ? 0 : regsize); + + sd_trace(("%s cmd_arg = 0x%x\n", __FUNCTION__, cmd_arg)); + sd_trace(("%s: rw=%d, func=%d, regaddr=0x%08x, data=0x%x\n", __FUNCTION__, 0, func, + regaddr, *data)); + + if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, data, regsize)) + != SUCCESS) + return status; + + bcmspi_cmd_getdstatus(sd, &dstatus); + if (dstatus) + sd_trace(("dstatus =0x%x\n", dstatus)); + + return SUCCESS; +} + +static int +bcmspi_card_regread_fixedaddr(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data) +{ + + int status; + uint32 cmd_arg; + uint32 dstatus; + + ASSERT(regsize); + + if (func == 2) + sd_trace(("Reg access on F2 will generate error indication in dstatus bits.\n")); + + cmd_arg = 0; + cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 0); + cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 0); /* Fixed access */ + cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func); + cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr); + cmd_arg = SFIELD(cmd_arg, SPI_LEN, regsize); + + sd_trace(("%s cmd_arg = 0x%x\n", __FUNCTION__, cmd_arg)); + + if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, data, regsize)) + != SUCCESS) + return status; + + sd_trace(("%s: rw=%d, func=%d, regaddr=0x%08x, data=0x%x\n", __FUNCTION__, 0, func, + regaddr, *data)); + + bcmspi_cmd_getdstatus(sd, &dstatus); + sd_trace(("dstatus =0x%x\n", dstatus)); + return SUCCESS; +} + +/* write a device register */ +static int +bcmspi_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 data) +{ + int status; + uint32 cmd_arg, dstatus; + + ASSERT(regsize); + + cmd_arg = 0; + + cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 1); + cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1); /* Incremental access */ + cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func); + cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr); + cmd_arg = SFIELD(cmd_arg, SPI_LEN, regsize == BLOCK_SIZE_F2 ? 0 : regsize); + + sd_trace(("%s cmd_arg = 0x%x\n", __FUNCTION__, cmd_arg)); + sd_trace(("%s: rw=%d, func=%d, regaddr=0x%08x, data=0x%x\n", __FUNCTION__, 1, func, + regaddr, data)); + + + if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, &data, regsize)) + != SUCCESS) + return status; + + bcmspi_cmd_getdstatus(sd, &dstatus); + if (dstatus) + sd_trace(("dstatus =0x%x\n", dstatus)); + + return SUCCESS; +} + +/* write a device register - 1 byte */ +static int +bcmspi_card_bytewrite(sdioh_info_t *sd, int func, uint32 regaddr, uint8 *byte) +{ + int status; + uint32 cmd_arg; + uint32 dstatus; + uint32 data = (uint32)(*byte); + + cmd_arg = 0; + cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func); + cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1); /* Incremental access */ + cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr); + cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 1); + cmd_arg = SFIELD(cmd_arg, SPI_LEN, 1); + + sd_trace(("%s cmd_arg = 0x%x\n", __FUNCTION__, cmd_arg)); + sd_trace(("%s: func=%d, regaddr=0x%08x, data=0x%x\n", __FUNCTION__, func, + regaddr, data)); + + if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, + cmd_arg, &data, 1)) != SUCCESS) { + return status; + } + + bcmspi_cmd_getdstatus(sd, &dstatus); + if (dstatus) + sd_trace(("dstatus =0x%x\n", dstatus)); + + return SUCCESS; +} + +void +bcmspi_cmd_getdstatus(sdioh_info_t *sd, uint32 *dstatus_buffer) +{ + *dstatus_buffer = sd->card_dstatus; +} + +/* 'data' is of type uint32 whereas other buffers are of type uint8 */ +static int +bcmspi_cmd_issue(sdioh_info_t *sd, bool use_dma, uint32 cmd_arg, + uint32 *data, uint32 datalen) +{ + uint32 i, j; + uint8 resp_delay = 0; + int err = SUCCESS; + uint32 hostlen; + uint32 spilen = 0; + uint32 dstatus_idx = 0; + uint16 templen, buslen, len, *ptr = NULL; + + sd_trace(("spi cmd = 0x%x\n", cmd_arg)); + + if (DWORDMODE_ON) { + spilen = GFIELD(cmd_arg, SPI_LEN); + if ((GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_0) || + (GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_1)) + dstatus_idx = spilen * 3; + + if ((GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_2) && + (GFIELD(cmd_arg, SPI_RW_FLAG) == 1)) { + spilen = spilen << 2; + dstatus_idx = (spilen % 16) ? (16 - (spilen % 16)) : 0; + /* convert len to mod16 size */ + spilen = ROUNDUP(spilen, 16); + cmd_arg = SFIELD(cmd_arg, SPI_LEN, (spilen >> 2)); + } + } + + /* Set up and issue the SPI command. MSByte goes out on bus first. Increase datalen + * according to the wordlen mode(16/32bit) the device is in. + */ + if (sd->wordlen == 4) { /* 32bit spid */ + *(uint32 *)spi_outbuf = bcmswap32(cmd_arg); + if (datalen & 0x3) + datalen += (4 - (datalen & 0x3)); + } else if (sd->wordlen == 2) { /* 16bit spid */ + *(uint16 *)spi_outbuf = bcmswap16(cmd_arg & 0xffff); + *(uint16 *)&spi_outbuf[2] = bcmswap16((cmd_arg & 0xffff0000) >> 16); + if (datalen & 0x1) + datalen++; + if (datalen < 4) + datalen = ROUNDUP(datalen, 4); + } else { + sd_err(("Host is %d bit spid, could not create SPI command.\n", + 8 * sd->wordlen)); + return ERROR; + } + + /* for Write, put the data into the output buffer */ + if (GFIELD(cmd_arg, SPI_RW_FLAG) == 1) { + /* We send len field of hw-header always a mod16 size, both from host and dongle */ + if (DWORDMODE_ON) { + if (GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_2) { + ptr = (uint16 *)&data[0]; + templen = *ptr; + /* ASSERT(*ptr == ~*(ptr + 1)); */ + templen = ROUNDUP(templen, 16); + *ptr = templen; + sd_trace(("actual tx len = %d\n", (uint16)(~*(ptr+1)))); + } + } + + if (datalen != 0) { + for (i = 0; i < datalen/4; i++) { + if (sd->wordlen == 4) { /* 32bit spid */ + *(uint32 *)&spi_outbuf[i * 4 + CMDLEN] = + bcmswap32(data[i]); + } else if (sd->wordlen == 2) { /* 16bit spid */ + *(uint16 *)&spi_outbuf[i * 4 + CMDLEN] = + bcmswap16(data[i] & 0xffff); + *(uint16 *)&spi_outbuf[i * 4 + CMDLEN + 2] = + bcmswap16((data[i] & 0xffff0000) >> 16); + } + } + } + } + + /* Append resp-delay number of bytes and clock them out for F0/1/2 reads. */ + if (GFIELD(cmd_arg, SPI_RW_FLAG) == 0) { + int func = GFIELD(cmd_arg, SPI_FUNCTION); + switch (func) { + case 0: + resp_delay = sd->resp_delay_all ? F0_RESPONSE_DELAY : 0; + break; + case 1: + resp_delay = F1_RESPONSE_DELAY; + break; + case 2: + resp_delay = sd->resp_delay_all ? F2_RESPONSE_DELAY : 0; + break; + default: + ASSERT(0); + break; + } + /* Program response delay */ + bcmspi_prog_resp_delay(sd, func, resp_delay); + } + + /* +4 for cmd and +4 for dstatus */ + hostlen = datalen + 8 + resp_delay; + hostlen += dstatus_idx; + hostlen += (4 - (hostlen & 0x3)); + spi_sendrecv(sd, spi_outbuf, spi_inbuf, hostlen); + + /* for Read, get the data into the input buffer */ + if (datalen != 0) { + if (GFIELD(cmd_arg, SPI_RW_FLAG) == 0) { /* if read cmd */ + for (j = 0; j < datalen/4; j++) { + if (sd->wordlen == 4) { /* 32bit spid */ + data[j] = bcmswap32(*(uint32 *)&spi_inbuf[j * 4 + + CMDLEN + resp_delay]); + } else if (sd->wordlen == 2) { /* 16bit spid */ + data[j] = (bcmswap16(*(uint16 *)&spi_inbuf[j * 4 + + CMDLEN + resp_delay])) | + ((bcmswap16(*(uint16 *)&spi_inbuf[j * 4 + + CMDLEN + resp_delay + 2])) << 16); + } + } + + if ((DWORDMODE_ON) && (GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_2)) { + ptr = (uint16 *)&data[0]; + templen = *ptr; + buslen = len = ~(*(ptr + 1)); + buslen = ROUNDUP(buslen, 16); + /* populate actual len in hw-header */ + if (templen == buslen) + *ptr = len; + } + } + } + + /* Restore back the len field of the hw header */ + if (DWORDMODE_ON) { + if ((GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_2) && + (GFIELD(cmd_arg, SPI_RW_FLAG) == 1)) { + ptr = (uint16 *)&data[0]; + *ptr = (uint16)(~*(ptr+1)); + } + } + + dstatus_idx += (datalen + CMDLEN + resp_delay); + /* Last 4bytes are dstatus. Device is configured to return status bits. */ + if (sd->wordlen == 4) { /* 32bit spid */ + sd->card_dstatus = bcmswap32(*(uint32 *)&spi_inbuf[dstatus_idx]); + } else if (sd->wordlen == 2) { /* 16bit spid */ + sd->card_dstatus = (bcmswap16(*(uint16 *)&spi_inbuf[dstatus_idx]) | + (bcmswap16(*(uint16 *)&spi_inbuf[dstatus_idx + 2]) << 16)); + } else { + sd_err(("Host is %d bit machine, could not read SPI dstatus.\n", + 8 * sd->wordlen)); + return ERROR; + } + if (sd->card_dstatus == 0xffffffff) { + sd_err(("looks like not a GSPI device or device is not powered.\n")); + } + + err = bcmspi_update_stats(sd, cmd_arg); + + return err; + +} + +static int +bcmspi_card_buf(sdioh_info_t *sd, int rw, int func, bool fifo, + uint32 addr, int nbytes, uint32 *data) +{ + int status; + uint32 cmd_arg; + bool write = rw == SDIOH_READ ? 0 : 1; + uint retries = 0; + + bool enable; + uint32 spilen; + + cmd_arg = 0; + + ASSERT(nbytes); + ASSERT(nbytes <= sd->client_block_size[func]); + + if (write) sd->t_cnt++; else sd->r_cnt++; + + if (func == 2) { + /* Frame len check limited by gSPI. */ + if ((nbytes > 2000) && write) { + sd_trace((">2KB write: F2 wr of %d bytes\n", nbytes)); + } + /* ASSERT(nbytes <= 2048); Fix bigger len gspi issue and uncomment. */ + /* If F2 fifo on device is not ready to receive data, don't do F2 transfer */ + if (write) { + uint32 dstatus; + /* check F2 ready with cached one */ + bcmspi_cmd_getdstatus(sd, &dstatus); + if ((dstatus & STATUS_F2_RX_READY) == 0) { + retries = WAIT_F2RXFIFORDY; + enable = 0; + while (retries-- && !enable) { + OSL_DELAY(WAIT_F2RXFIFORDY_DELAY * 1000); + bcmspi_card_regread(sd, SPI_FUNC_0, SPID_STATUS_REG, 4, + &dstatus); + if (dstatus & STATUS_F2_RX_READY) + enable = TRUE; + } + if (!enable) { + struct spierrstats_t *spierrstats = &sd->spierrstats; + spierrstats->f2rxnotready++; + sd_err(("F2 FIFO is not ready to receive data.\n")); + return ERROR; + } + sd_trace(("No of retries on F2 ready %d\n", + (WAIT_F2RXFIFORDY - retries))); + } + } + } + + /* F2 transfers happen on 0 addr */ + addr = (func == 2) ? 0 : addr; + + /* In pio mode buffer is read using fixed address fifo in func 1 */ + if ((func == 1) && (fifo)) + cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 0); + else + cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1); + + cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func); + cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, addr); + cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, write); + spilen = sd->data_xfer_count = MIN(sd->client_block_size[func], nbytes); + if ((sd->dwordmode == TRUE) && (GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_2)) { + /* convert len to mod4 size */ + spilen = spilen + ((spilen & 0x3) ? (4 - (spilen & 0x3)): 0); + cmd_arg = SFIELD(cmd_arg, SPI_LEN, (spilen >> 2)); + } else + cmd_arg = SFIELD(cmd_arg, SPI_LEN, spilen); + + if ((func == 2) && (fifo == 1)) { + sd_data(("%s: %s func %d, %s, addr 0x%x, len %d bytes, r_cnt %d t_cnt %d\n", + __FUNCTION__, write ? "Wr" : "Rd", func, "INCR", + addr, nbytes, sd->r_cnt, sd->t_cnt)); + } + + sd_trace(("%s cmd_arg = 0x%x\n", __FUNCTION__, cmd_arg)); + sd_data(("%s: %s func %d, %s, addr 0x%x, len %d bytes, r_cnt %d t_cnt %d\n", + __FUNCTION__, write ? "Wd" : "Rd", func, "INCR", + addr, nbytes, sd->r_cnt, sd->t_cnt)); + + + if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, + data, nbytes)) != SUCCESS) { + sd_err(("%s: cmd_issue failed for %s\n", __FUNCTION__, + (write ? "write" : "read"))); + return status; + } + + /* gSPI expects that hw-header-len is equal to spi-command-len */ + if ((func == 2) && (rw == SDIOH_WRITE) && (sd->dwordmode == FALSE)) { + ASSERT((uint16)sd->data_xfer_count == (uint16)(*data & 0xffff)); + ASSERT((uint16)sd->data_xfer_count == (uint16)(~((*data & 0xffff0000) >> 16))); + } + + if ((nbytes > 2000) && !write) { + sd_trace((">2KB read: F2 rd of %d bytes\n", nbytes)); + } + + return SUCCESS; +} + +/* Reset and re-initialize the device */ +int +sdioh_sdio_reset(sdioh_info_t *si) +{ + si->card_init_done = FALSE; + return bcmspi_client_init(si); +} diff --git a/drivers/net/wireless/bcm4329/bcmutils.c b/drivers/net/wireless/bcm4329/bcmutils.c new file mode 100644 index 000000000000..43c04ee92f38 --- /dev/null +++ b/drivers/net/wireless/bcm4329/bcmutils.c @@ -0,0 +1,1838 @@ +/* + * Driver O/S-independent utility routines + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * $Id: bcmutils.c,v 1.210.4.5.2.4.6.19 2010/04/26 06:05:25 Exp $ + */ + +#include <typedefs.h> +#include <bcmdefs.h> +#include <stdarg.h> +#include <bcmutils.h> +#ifdef BCMDRIVER +#include <osl.h> +#include <siutils.h> +#else +#include <stdio.h> +#include <string.h> +/* This case for external supplicant use */ +#if defined(BCMEXTSUP) +#include <bcm_osl.h> +#endif + +#endif /* BCMDRIVER */ +#include <bcmendian.h> +#include <bcmdevs.h> +#include <proto/ethernet.h> +#include <proto/vlan.h> +#include <proto/bcmip.h> +#include <proto/802.1d.h> +#include <proto/802.11.h> + + +#ifdef BCMDRIVER + + +/* copy a pkt buffer chain into a buffer */ +uint +pktcopy(osl_t *osh, void *p, uint offset, int len, uchar *buf) +{ + uint n, ret = 0; + + if (len < 0) + len = 4096; /* "infinite" */ + + /* skip 'offset' bytes */ + for (; p && offset; p = PKTNEXT(osh, p)) { + if (offset < (uint)PKTLEN(osh, p)) + break; + offset -= PKTLEN(osh, p); + } + + if (!p) + return 0; + + /* copy the data */ + for (; p && len; p = PKTNEXT(osh, p)) { + n = MIN((uint)PKTLEN(osh, p) - offset, (uint)len); + bcopy(PKTDATA(osh, p) + offset, buf, n); + buf += n; + len -= n; + ret += n; + offset = 0; + } + + return ret; +} + +/* copy a buffer into a pkt buffer chain */ +uint +pktfrombuf(osl_t *osh, void *p, uint offset, int len, uchar *buf) +{ + uint n, ret = 0; + + /* skip 'offset' bytes */ + for (; p && offset; p = PKTNEXT(osh, p)) { + if (offset < (uint)PKTLEN(osh, p)) + break; + offset -= PKTLEN(osh, p); + } + + if (!p) + return 0; + + /* copy the data */ + for (; p && len; p = PKTNEXT(osh, p)) { + n = MIN((uint)PKTLEN(osh, p) - offset, (uint)len); + bcopy(buf, PKTDATA(osh, p) + offset, n); + buf += n; + len -= n; + ret += n; + offset = 0; + } + + return ret; +} + + + +/* return total length of buffer chain */ +uint +pkttotlen(osl_t *osh, void *p) +{ + uint total; + + total = 0; + for (; p; p = PKTNEXT(osh, p)) + total += PKTLEN(osh, p); + return (total); +} + +/* return the last buffer of chained pkt */ +void * +pktlast(osl_t *osh, void *p) +{ + for (; PKTNEXT(osh, p); p = PKTNEXT(osh, p)) + ; + + return (p); +} + +/* count segments of a chained packet */ +uint +pktsegcnt(osl_t *osh, void *p) +{ + uint cnt; + + for (cnt = 0; p; p = PKTNEXT(osh, p)) + cnt++; + + return cnt; +} + + +/* + * osl multiple-precedence packet queue + * hi_prec is always >= the number of the highest non-empty precedence + */ +void * +pktq_penq(struct pktq *pq, int prec, void *p) +{ + struct pktq_prec *q; + + ASSERT(prec >= 0 && prec < pq->num_prec); + ASSERT(PKTLINK(p) == NULL); /* queueing chains not allowed */ + + ASSERT(!pktq_full(pq)); + ASSERT(!pktq_pfull(pq, prec)); + + q = &pq->q[prec]; + + if (q->head) + PKTSETLINK(q->tail, p); + else + q->head = p; + + q->tail = p; + q->len++; + + pq->len++; + + if (pq->hi_prec < prec) + pq->hi_prec = (uint8)prec; + + return p; +} + +void * +pktq_penq_head(struct pktq *pq, int prec, void *p) +{ + struct pktq_prec *q; + + ASSERT(prec >= 0 && prec < pq->num_prec); + ASSERT(PKTLINK(p) == NULL); /* queueing chains not allowed */ + + ASSERT(!pktq_full(pq)); + ASSERT(!pktq_pfull(pq, prec)); + + q = &pq->q[prec]; + + if (q->head == NULL) + q->tail = p; + + PKTSETLINK(p, q->head); + q->head = p; + q->len++; + + pq->len++; + + if (pq->hi_prec < prec) + pq->hi_prec = (uint8)prec; + + return p; +} + +void * +pktq_pdeq(struct pktq *pq, int prec) +{ + struct pktq_prec *q; + void *p; + + ASSERT(prec >= 0 && prec < pq->num_prec); + + q = &pq->q[prec]; + + if ((p = q->head) == NULL) + return NULL; + + if ((q->head = PKTLINK(p)) == NULL) + q->tail = NULL; + + q->len--; + + pq->len--; + + PKTSETLINK(p, NULL); + + return p; +} + +void * +pktq_pdeq_tail(struct pktq *pq, int prec) +{ + struct pktq_prec *q; + void *p, *prev; + + ASSERT(prec >= 0 && prec < pq->num_prec); + + q = &pq->q[prec]; + + if ((p = q->head) == NULL) + return NULL; + + for (prev = NULL; p != q->tail; p = PKTLINK(p)) + prev = p; + + if (prev) + PKTSETLINK(prev, NULL); + else + q->head = NULL; + + q->tail = prev; + q->len--; + + pq->len--; + + return p; +} + +void +pktq_pflush(osl_t *osh, struct pktq *pq, int prec, bool dir) +{ + struct pktq_prec *q; + void *p; + + q = &pq->q[prec]; + p = q->head; + while (p) { + q->head = PKTLINK(p); + PKTSETLINK(p, NULL); + PKTFREE(osh, p, dir); + q->len--; + pq->len--; + p = q->head; + } + ASSERT(q->len == 0); + q->tail = NULL; +} + +bool +pktq_pdel(struct pktq *pq, void *pktbuf, int prec) +{ + struct pktq_prec *q; + void *p; + + ASSERT(prec >= 0 && prec < pq->num_prec); + + if (!pktbuf) + return FALSE; + + q = &pq->q[prec]; + + if (q->head == pktbuf) { + if ((q->head = PKTLINK(pktbuf)) == NULL) + q->tail = NULL; + } else { + for (p = q->head; p && PKTLINK(p) != pktbuf; p = PKTLINK(p)) + ; + if (p == NULL) + return FALSE; + + PKTSETLINK(p, PKTLINK(pktbuf)); + if (q->tail == pktbuf) + q->tail = p; + } + + q->len--; + pq->len--; + PKTSETLINK(pktbuf, NULL); + return TRUE; +} + +void +pktq_init(struct pktq *pq, int num_prec, int max_len) +{ + int prec; + + ASSERT(num_prec > 0 && num_prec <= PKTQ_MAX_PREC); + + /* pq is variable size; only zero out what's requested */ + bzero(pq, OFFSETOF(struct pktq, q) + (sizeof(struct pktq_prec) * num_prec)); + + pq->num_prec = (uint16)num_prec; + + pq->max = (uint16)max_len; + + for (prec = 0; prec < num_prec; prec++) + pq->q[prec].max = pq->max; +} + +void * +pktq_deq(struct pktq *pq, int *prec_out) +{ + struct pktq_prec *q; + void *p; + int prec; + + if (pq->len == 0) + return NULL; + + while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL) + pq->hi_prec--; + + q = &pq->q[prec]; + + if ((p = q->head) == NULL) + return NULL; + + if ((q->head = PKTLINK(p)) == NULL) + q->tail = NULL; + + q->len--; + + pq->len--; + + if (prec_out) + *prec_out = prec; + + PKTSETLINK(p, NULL); + + return p; +} + +void * +pktq_deq_tail(struct pktq *pq, int *prec_out) +{ + struct pktq_prec *q; + void *p, *prev; + int prec; + + if (pq->len == 0) + return NULL; + + for (prec = 0; prec < pq->hi_prec; prec++) + if (pq->q[prec].head) + break; + + q = &pq->q[prec]; + + if ((p = q->head) == NULL) + return NULL; + + for (prev = NULL; p != q->tail; p = PKTLINK(p)) + prev = p; + + if (prev) + PKTSETLINK(prev, NULL); + else + q->head = NULL; + + q->tail = prev; + q->len--; + + pq->len--; + + if (prec_out) + *prec_out = prec; + + PKTSETLINK(p, NULL); + + return p; +} + +void * +pktq_peek(struct pktq *pq, int *prec_out) +{ + int prec; + + if (pq->len == 0) + return NULL; + + while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL) + pq->hi_prec--; + + if (prec_out) + *prec_out = prec; + + return (pq->q[prec].head); +} + +void * +pktq_peek_tail(struct pktq *pq, int *prec_out) +{ + int prec; + + if (pq->len == 0) + return NULL; + + for (prec = 0; prec < pq->hi_prec; prec++) + if (pq->q[prec].head) + break; + + if (prec_out) + *prec_out = prec; + + return (pq->q[prec].tail); +} + +void +pktq_flush(osl_t *osh, struct pktq *pq, bool dir) +{ + int prec; + for (prec = 0; prec < pq->num_prec; prec++) + pktq_pflush(osh, pq, prec, dir); + ASSERT(pq->len == 0); +} + +/* Return sum of lengths of a specific set of precedences */ +int +pktq_mlen(struct pktq *pq, uint prec_bmp) +{ + int prec, len; + + len = 0; + + for (prec = 0; prec <= pq->hi_prec; prec++) + if (prec_bmp & (1 << prec)) + len += pq->q[prec].len; + + return len; +} + +/* Priority dequeue from a specific set of precedences */ +void * +pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out) +{ + struct pktq_prec *q; + void *p; + int prec; + + if (pq->len == 0) + return NULL; + + while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL) + pq->hi_prec--; + + while ((prec_bmp & (1 << prec)) == 0 || pq->q[prec].head == NULL) + if (prec-- == 0) + return NULL; + + q = &pq->q[prec]; + + if ((p = q->head) == NULL) + return NULL; + + if ((q->head = PKTLINK(p)) == NULL) + q->tail = NULL; + + q->len--; + + if (prec_out) + *prec_out = prec; + + pq->len--; + + PKTSETLINK(p, NULL); + + return p; +} +#endif /* BCMDRIVER */ + + + +const unsigned char bcm_ctype[] = { + _BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C, /* 0-7 */ + _BCM_C, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C, + _BCM_C, /* 8-15 */ + _BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C, /* 16-23 */ + _BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C, /* 24-31 */ + _BCM_S|_BCM_SP,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P, /* 32-39 */ + _BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P, /* 40-47 */ + _BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D, /* 48-55 */ + _BCM_D,_BCM_D,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P, /* 56-63 */ + _BCM_P, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X, + _BCM_U|_BCM_X, _BCM_U, /* 64-71 */ + _BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U, /* 72-79 */ + _BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U, /* 80-87 */ + _BCM_U,_BCM_U,_BCM_U,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P, /* 88-95 */ + _BCM_P, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X, + _BCM_L|_BCM_X, _BCM_L, /* 96-103 */ + _BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L, /* 104-111 */ + _BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L, /* 112-119 */ + _BCM_L,_BCM_L,_BCM_L,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_C, /* 120-127 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 128-143 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 144-159 */ + _BCM_S|_BCM_SP, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, + _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, /* 160-175 */ + _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, + _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, /* 176-191 */ + _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, + _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, /* 192-207 */ + _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_P, _BCM_U, _BCM_U, _BCM_U, + _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_L, /* 208-223 */ + _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, + _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, /* 224-239 */ + _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_P, _BCM_L, _BCM_L, _BCM_L, + _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L /* 240-255 */ +}; + +ulong +bcm_strtoul(char *cp, char **endp, uint base) +{ + ulong result, last_result = 0, value; + bool minus; + + minus = FALSE; + + while (bcm_isspace(*cp)) + cp++; + + if (cp[0] == '+') + cp++; + else if (cp[0] == '-') { + minus = TRUE; + cp++; + } + + if (base == 0) { + if (cp[0] == '0') { + if ((cp[1] == 'x') || (cp[1] == 'X')) { + base = 16; + cp = &cp[2]; + } else { + base = 8; + cp = &cp[1]; + } + } else + base = 10; + } else if (base == 16 && (cp[0] == '0') && ((cp[1] == 'x') || (cp[1] == 'X'))) { + cp = &cp[2]; + } + + result = 0; + + while (bcm_isxdigit(*cp) && + (value = bcm_isdigit(*cp) ? *cp-'0' : bcm_toupper(*cp)-'A'+10) < base) { + result = result*base + value; + /* Detected overflow */ + if (result < last_result && !minus) + return (ulong)-1; + last_result = result; + cp++; + } + + if (minus) + result = (ulong)(-(long)result); + + if (endp) + *endp = (char *)cp; + + return (result); +} + +int +bcm_atoi(char *s) +{ + return (int)bcm_strtoul(s, NULL, 10); +} + +/* return pointer to location of substring 'needle' in 'haystack' */ +char* +bcmstrstr(char *haystack, char *needle) +{ + int len, nlen; + int i; + + if ((haystack == NULL) || (needle == NULL)) + return (haystack); + + nlen = strlen(needle); + len = strlen(haystack) - nlen + 1; + + for (i = 0; i < len; i++) + if (memcmp(needle, &haystack[i], nlen) == 0) + return (&haystack[i]); + return (NULL); +} + +char* +bcmstrcat(char *dest, const char *src) +{ + char *p; + + p = dest + strlen(dest); + + while ((*p++ = *src++) != '\0') + ; + + return (dest); +} + +char* +bcmstrncat(char *dest, const char *src, uint size) +{ + char *endp; + char *p; + + p = dest + strlen(dest); + endp = p + size; + + while (p != endp && (*p++ = *src++) != '\0') + ; + + return (dest); +} + + +/**************************************************************************** +* Function: bcmstrtok +* +* Purpose: +* Tokenizes a string. This function is conceptually similiar to ANSI C strtok(), +* but allows strToken() to be used by different strings or callers at the same +* time. Each call modifies '*string' by substituting a NULL character for the +* first delimiter that is encountered, and updates 'string' to point to the char +* after the delimiter. Leading delimiters are skipped. +* +* Parameters: +* string (mod) Ptr to string ptr, updated by token. +* delimiters (in) Set of delimiter characters. +* tokdelim (out) Character that delimits the returned token. (May +* be set to NULL if token delimiter is not required). +* +* Returns: Pointer to the next token found. NULL when no more tokens are found. +***************************************************************************** +*/ +char * +bcmstrtok(char **string, const char *delimiters, char *tokdelim) +{ + unsigned char *str; + unsigned long map[8]; + int count; + char *nextoken; + + if (tokdelim != NULL) { + /* Prime the token delimiter */ + *tokdelim = '\0'; + } + + /* Clear control map */ + for (count = 0; count < 8; count++) { + map[count] = 0; + } + + /* Set bits in delimiter table */ + do { + map[*delimiters >> 5] |= (1 << (*delimiters & 31)); + } + while (*delimiters++); + + str = (unsigned char*)*string; + + /* Find beginning of token (skip over leading delimiters). Note that + * there is no token iff this loop sets str to point to the terminal + * null (*str == '\0') + */ + while (((map[*str >> 5] & (1 << (*str & 31))) && *str) || (*str == ' ')) { + str++; + } + + nextoken = (char*)str; + + /* Find the end of the token. If it is not the end of the string, + * put a null there. + */ + for (; *str; str++) { + if (map[*str >> 5] & (1 << (*str & 31))) { + if (tokdelim != NULL) { + *tokdelim = *str; + } + + *str++ = '\0'; + break; + } + } + + *string = (char*)str; + + /* Determine if a token has been found. */ + if (nextoken == (char *) str) { + return NULL; + } + else { + return nextoken; + } +} + + +#define xToLower(C) \ + ((C >= 'A' && C <= 'Z') ? (char)((int)C - (int)'A' + (int)'a') : C) + + +/**************************************************************************** +* Function: bcmstricmp +* +* Purpose: Compare to strings case insensitively. +* +* Parameters: s1 (in) First string to compare. +* s2 (in) Second string to compare. +* +* Returns: Return 0 if the two strings are equal, -1 if t1 < t2 and 1 if +* t1 > t2, when ignoring case sensitivity. +***************************************************************************** +*/ +int +bcmstricmp(const char *s1, const char *s2) +{ + char dc, sc; + + while (*s2 && *s1) { + dc = xToLower(*s1); + sc = xToLower(*s2); + if (dc < sc) return -1; + if (dc > sc) return 1; + s1++; + s2++; + } + + if (*s1 && !*s2) return 1; + if (!*s1 && *s2) return -1; + return 0; +} + + +/**************************************************************************** +* Function: bcmstrnicmp +* +* Purpose: Compare to strings case insensitively, upto a max of 'cnt' +* characters. +* +* Parameters: s1 (in) First string to compare. +* s2 (in) Second string to compare. +* cnt (in) Max characters to compare. +* +* Returns: Return 0 if the two strings are equal, -1 if t1 < t2 and 1 if +* t1 > t2, when ignoring case sensitivity. +***************************************************************************** +*/ +int +bcmstrnicmp(const char* s1, const char* s2, int cnt) +{ + char dc, sc; + + while (*s2 && *s1 && cnt) { + dc = xToLower(*s1); + sc = xToLower(*s2); + if (dc < sc) return -1; + if (dc > sc) return 1; + s1++; + s2++; + cnt--; + } + + if (!cnt) return 0; + if (*s1 && !*s2) return 1; + if (!*s1 && *s2) return -1; + return 0; +} + +/* parse a xx:xx:xx:xx:xx:xx format ethernet address */ +int +bcm_ether_atoe(char *p, struct ether_addr *ea) +{ + int i = 0; + + for (;;) { + ea->octet[i++] = (char) bcm_strtoul(p, &p, 16); + if (!*p++ || i == 6) + break; + } + + return (i == 6); +} + + +#if defined(CONFIG_USBRNDIS_RETAIL) || defined(NDIS_MINIPORT_DRIVER) +/* registry routine buffer preparation utility functions: + * parameter order is like strncpy, but returns count + * of bytes copied. Minimum bytes copied is null char(1)/wchar(2) + */ +ulong +wchar2ascii(char *abuf, ushort *wbuf, ushort wbuflen, ulong abuflen) +{ + ulong copyct = 1; + ushort i; + + if (abuflen == 0) + return 0; + + /* wbuflen is in bytes */ + wbuflen /= sizeof(ushort); + + for (i = 0; i < wbuflen; ++i) { + if (--abuflen == 0) + break; + *abuf++ = (char) *wbuf++; + ++copyct; + } + *abuf = '\0'; + + return copyct; +} +#endif /* CONFIG_USBRNDIS_RETAIL || NDIS_MINIPORT_DRIVER */ + +char * +bcm_ether_ntoa(const struct ether_addr *ea, char *buf) +{ + static const char template[] = "%02x:%02x:%02x:%02x:%02x:%02x"; + snprintf(buf, 18, template, + ea->octet[0]&0xff, ea->octet[1]&0xff, ea->octet[2]&0xff, + ea->octet[3]&0xff, ea->octet[4]&0xff, ea->octet[5]&0xff); + return (buf); +} + +char * +bcm_ip_ntoa(struct ipv4_addr *ia, char *buf) +{ + snprintf(buf, 16, "%d.%d.%d.%d", + ia->addr[0], ia->addr[1], ia->addr[2], ia->addr[3]); + return (buf); +} + +#ifdef BCMDRIVER + +void +bcm_mdelay(uint ms) +{ + uint i; + + for (i = 0; i < ms; i++) { + OSL_DELAY(1000); + } +} + + + + + + +#if defined(DHD_DEBUG) +/* pretty hex print a pkt buffer chain */ +void +prpkt(const char *msg, osl_t *osh, void *p0) +{ + void *p; + + if (msg && (msg[0] != '\0')) + printf("%s:\n", msg); + + for (p = p0; p; p = PKTNEXT(osh, p)) + prhex(NULL, PKTDATA(osh, p), PKTLEN(osh, p)); +} +#endif + +/* Takes an Ethernet frame and sets out-of-bound PKTPRIO. + * Also updates the inplace vlan tag if requested. + * For debugging, it returns an indication of what it did. + */ +uint +pktsetprio(void *pkt, bool update_vtag) +{ + struct ether_header *eh; + struct ethervlan_header *evh; + uint8 *pktdata; + int priority = 0; + int rc = 0; + + pktdata = (uint8 *) PKTDATA(NULL, pkt); + ASSERT(ISALIGNED((uintptr)pktdata, sizeof(uint16))); + + eh = (struct ether_header *) pktdata; + + if (ntoh16(eh->ether_type) == ETHER_TYPE_8021Q) { + uint16 vlan_tag; + int vlan_prio, dscp_prio = 0; + + evh = (struct ethervlan_header *)eh; + + vlan_tag = ntoh16(evh->vlan_tag); + vlan_prio = (int) (vlan_tag >> VLAN_PRI_SHIFT) & VLAN_PRI_MASK; + + if (ntoh16(evh->ether_type) == ETHER_TYPE_IP) { + uint8 *ip_body = pktdata + sizeof(struct ethervlan_header); + uint8 tos_tc = IP_TOS(ip_body); + dscp_prio = (int)(tos_tc >> IPV4_TOS_PREC_SHIFT); + } + + /* DSCP priority gets precedence over 802.1P (vlan tag) */ + if (dscp_prio != 0) { + priority = dscp_prio; + rc |= PKTPRIO_VDSCP; + } else { + priority = vlan_prio; + rc |= PKTPRIO_VLAN; + } + /* + * If the DSCP priority is not the same as the VLAN priority, + * then overwrite the priority field in the vlan tag, with the + * DSCP priority value. This is required for Linux APs because + * the VLAN driver on Linux, overwrites the skb->priority field + * with the priority value in the vlan tag + */ + if (update_vtag && (priority != vlan_prio)) { + vlan_tag &= ~(VLAN_PRI_MASK << VLAN_PRI_SHIFT); + vlan_tag |= (uint16)priority << VLAN_PRI_SHIFT; + evh->vlan_tag = hton16(vlan_tag); + rc |= PKTPRIO_UPD; + } + } else if (ntoh16(eh->ether_type) == ETHER_TYPE_IP) { + uint8 *ip_body = pktdata + sizeof(struct ether_header); + uint8 tos_tc = IP_TOS(ip_body); + priority = (int)(tos_tc >> IPV4_TOS_PREC_SHIFT); + rc |= PKTPRIO_DSCP; + } + + ASSERT(priority >= 0 && priority <= MAXPRIO); + PKTSETPRIO(pkt, priority); + return (rc | priority); +} + +static char bcm_undeferrstr[BCME_STRLEN]; + +static const char *bcmerrorstrtable[] = BCMERRSTRINGTABLE; + +/* Convert the error codes into related error strings */ +const char * +bcmerrorstr(int bcmerror) +{ + /* check if someone added a bcmerror code but forgot to add errorstring */ + ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(bcmerrorstrtable) - 1)); + + if (bcmerror > 0 || bcmerror < BCME_LAST) { + snprintf(bcm_undeferrstr, BCME_STRLEN, "Undefined error %d", bcmerror); + return bcm_undeferrstr; + } + + ASSERT(strlen(bcmerrorstrtable[-bcmerror]) < BCME_STRLEN); + + return bcmerrorstrtable[-bcmerror]; +} + + + +/* iovar table lookup */ +const bcm_iovar_t* +bcm_iovar_lookup(const bcm_iovar_t *table, const char *name) +{ + const bcm_iovar_t *vi; + const char *lookup_name; + + /* skip any ':' delimited option prefixes */ + lookup_name = strrchr(name, ':'); + if (lookup_name != NULL) + lookup_name++; + else + lookup_name = name; + + ASSERT(table != NULL); + + for (vi = table; vi->name; vi++) { + if (!strcmp(vi->name, lookup_name)) + return vi; + } + /* ran to end of table */ + + return NULL; /* var name not found */ +} + +int +bcm_iovar_lencheck(const bcm_iovar_t *vi, void *arg, int len, bool set) +{ + int bcmerror = 0; + + /* length check on io buf */ + switch (vi->type) { + case IOVT_BOOL: + case IOVT_INT8: + case IOVT_INT16: + case IOVT_INT32: + case IOVT_UINT8: + case IOVT_UINT16: + case IOVT_UINT32: + /* all integers are int32 sized args at the ioctl interface */ + if (len < (int)sizeof(int)) { + bcmerror = BCME_BUFTOOSHORT; + } + break; + + case IOVT_BUFFER: + /* buffer must meet minimum length requirement */ + if (len < vi->minlen) { + bcmerror = BCME_BUFTOOSHORT; + } + break; + + case IOVT_VOID: + if (!set) { + /* Cannot return nil... */ + bcmerror = BCME_UNSUPPORTED; + } else if (len) { + /* Set is an action w/o parameters */ + bcmerror = BCME_BUFTOOLONG; + } + break; + + default: + /* unknown type for length check in iovar info */ + ASSERT(0); + bcmerror = BCME_UNSUPPORTED; + } + + return bcmerror; +} + +#endif /* BCMDRIVER */ + +/******************************************************************************* + * crc8 + * + * Computes a crc8 over the input data using the polynomial: + * + * x^8 + x^7 +x^6 + x^4 + x^2 + 1 + * + * The caller provides the initial value (either CRC8_INIT_VALUE + * or the previous returned value) to allow for processing of + * discontiguous blocks of data. When generating the CRC the + * caller is responsible for complementing the final return value + * and inserting it into the byte stream. When checking, a final + * return value of CRC8_GOOD_VALUE indicates a valid CRC. + * + * Reference: Dallas Semiconductor Application Note 27 + * Williams, Ross N., "A Painless Guide to CRC Error Detection Algorithms", + * ver 3, Aug 1993, ross@guest.adelaide.edu.au, Rocksoft Pty Ltd., + * ftp://ftp.rocksoft.com/clients/rocksoft/papers/crc_v3.txt + * + * **************************************************************************** + */ + +STATIC const uint8 crc8_table[256] = { + 0x00, 0xF7, 0xB9, 0x4E, 0x25, 0xD2, 0x9C, 0x6B, + 0x4A, 0xBD, 0xF3, 0x04, 0x6F, 0x98, 0xD6, 0x21, + 0x94, 0x63, 0x2D, 0xDA, 0xB1, 0x46, 0x08, 0xFF, + 0xDE, 0x29, 0x67, 0x90, 0xFB, 0x0C, 0x42, 0xB5, + 0x7F, 0x88, 0xC6, 0x31, 0x5A, 0xAD, 0xE3, 0x14, + 0x35, 0xC2, 0x8C, 0x7B, 0x10, 0xE7, 0xA9, 0x5E, + 0xEB, 0x1C, 0x52, 0xA5, 0xCE, 0x39, 0x77, 0x80, + 0xA1, 0x56, 0x18, 0xEF, 0x84, 0x73, 0x3D, 0xCA, + 0xFE, 0x09, 0x47, 0xB0, 0xDB, 0x2C, 0x62, 0x95, + 0xB4, 0x43, 0x0D, 0xFA, 0x91, 0x66, 0x28, 0xDF, + 0x6A, 0x9D, 0xD3, 0x24, 0x4F, 0xB8, 0xF6, 0x01, + 0x20, 0xD7, 0x99, 0x6E, 0x05, 0xF2, 0xBC, 0x4B, + 0x81, 0x76, 0x38, 0xCF, 0xA4, 0x53, 0x1D, 0xEA, + 0xCB, 0x3C, 0x72, 0x85, 0xEE, 0x19, 0x57, 0xA0, + 0x15, 0xE2, 0xAC, 0x5B, 0x30, 0xC7, 0x89, 0x7E, + 0x5F, 0xA8, 0xE6, 0x11, 0x7A, 0x8D, 0xC3, 0x34, + 0xAB, 0x5C, 0x12, 0xE5, 0x8E, 0x79, 0x37, 0xC0, + 0xE1, 0x16, 0x58, 0xAF, 0xC4, 0x33, 0x7D, 0x8A, + 0x3F, 0xC8, 0x86, 0x71, 0x1A, 0xED, 0xA3, 0x54, + 0x75, 0x82, 0xCC, 0x3B, 0x50, 0xA7, 0xE9, 0x1E, + 0xD4, 0x23, 0x6D, 0x9A, 0xF1, 0x06, 0x48, 0xBF, + 0x9E, 0x69, 0x27, 0xD0, 0xBB, 0x4C, 0x02, 0xF5, + 0x40, 0xB7, 0xF9, 0x0E, 0x65, 0x92, 0xDC, 0x2B, + 0x0A, 0xFD, 0xB3, 0x44, 0x2F, 0xD8, 0x96, 0x61, + 0x55, 0xA2, 0xEC, 0x1B, 0x70, 0x87, 0xC9, 0x3E, + 0x1F, 0xE8, 0xA6, 0x51, 0x3A, 0xCD, 0x83, 0x74, + 0xC1, 0x36, 0x78, 0x8F, 0xE4, 0x13, 0x5D, 0xAA, + 0x8B, 0x7C, 0x32, 0xC5, 0xAE, 0x59, 0x17, 0xE0, + 0x2A, 0xDD, 0x93, 0x64, 0x0F, 0xF8, 0xB6, 0x41, + 0x60, 0x97, 0xD9, 0x2E, 0x45, 0xB2, 0xFC, 0x0B, + 0xBE, 0x49, 0x07, 0xF0, 0x9B, 0x6C, 0x22, 0xD5, + 0xF4, 0x03, 0x4D, 0xBA, 0xD1, 0x26, 0x68, 0x9F +}; + +#define CRC_INNER_LOOP(n, c, x) \ + (c) = ((c) >> 8) ^ crc##n##_table[((c) ^ (x)) & 0xff] + +uint8 +hndcrc8( + uint8 *pdata, /* pointer to array of data to process */ + uint nbytes, /* number of input data bytes to process */ + uint8 crc /* either CRC8_INIT_VALUE or previous return value */ +) +{ + /* hard code the crc loop instead of using CRC_INNER_LOOP macro + * to avoid the undefined and unnecessary (uint8 >> 8) operation. + */ + while (nbytes-- > 0) + crc = crc8_table[(crc ^ *pdata++) & 0xff]; + + return crc; +} + +/******************************************************************************* + * crc16 + * + * Computes a crc16 over the input data using the polynomial: + * + * x^16 + x^12 +x^5 + 1 + * + * The caller provides the initial value (either CRC16_INIT_VALUE + * or the previous returned value) to allow for processing of + * discontiguous blocks of data. When generating the CRC the + * caller is responsible for complementing the final return value + * and inserting it into the byte stream. When checking, a final + * return value of CRC16_GOOD_VALUE indicates a valid CRC. + * + * Reference: Dallas Semiconductor Application Note 27 + * Williams, Ross N., "A Painless Guide to CRC Error Detection Algorithms", + * ver 3, Aug 1993, ross@guest.adelaide.edu.au, Rocksoft Pty Ltd., + * ftp://ftp.rocksoft.com/clients/rocksoft/papers/crc_v3.txt + * + * **************************************************************************** + */ + +static const uint16 crc16_table[256] = { + 0x0000, 0x1189, 0x2312, 0x329B, 0x4624, 0x57AD, 0x6536, 0x74BF, + 0x8C48, 0x9DC1, 0xAF5A, 0xBED3, 0xCA6C, 0xDBE5, 0xE97E, 0xF8F7, + 0x1081, 0x0108, 0x3393, 0x221A, 0x56A5, 0x472C, 0x75B7, 0x643E, + 0x9CC9, 0x8D40, 0xBFDB, 0xAE52, 0xDAED, 0xCB64, 0xF9FF, 0xE876, + 0x2102, 0x308B, 0x0210, 0x1399, 0x6726, 0x76AF, 0x4434, 0x55BD, + 0xAD4A, 0xBCC3, 0x8E58, 0x9FD1, 0xEB6E, 0xFAE7, 0xC87C, 0xD9F5, + 0x3183, 0x200A, 0x1291, 0x0318, 0x77A7, 0x662E, 0x54B5, 0x453C, + 0xBDCB, 0xAC42, 0x9ED9, 0x8F50, 0xFBEF, 0xEA66, 0xD8FD, 0xC974, + 0x4204, 0x538D, 0x6116, 0x709F, 0x0420, 0x15A9, 0x2732, 0x36BB, + 0xCE4C, 0xDFC5, 0xED5E, 0xFCD7, 0x8868, 0x99E1, 0xAB7A, 0xBAF3, + 0x5285, 0x430C, 0x7197, 0x601E, 0x14A1, 0x0528, 0x37B3, 0x263A, + 0xDECD, 0xCF44, 0xFDDF, 0xEC56, 0x98E9, 0x8960, 0xBBFB, 0xAA72, + 0x6306, 0x728F, 0x4014, 0x519D, 0x2522, 0x34AB, 0x0630, 0x17B9, + 0xEF4E, 0xFEC7, 0xCC5C, 0xDDD5, 0xA96A, 0xB8E3, 0x8A78, 0x9BF1, + 0x7387, 0x620E, 0x5095, 0x411C, 0x35A3, 0x242A, 0x16B1, 0x0738, + 0xFFCF, 0xEE46, 0xDCDD, 0xCD54, 0xB9EB, 0xA862, 0x9AF9, 0x8B70, + 0x8408, 0x9581, 0xA71A, 0xB693, 0xC22C, 0xD3A5, 0xE13E, 0xF0B7, + 0x0840, 0x19C9, 0x2B52, 0x3ADB, 0x4E64, 0x5FED, 0x6D76, 0x7CFF, + 0x9489, 0x8500, 0xB79B, 0xA612, 0xD2AD, 0xC324, 0xF1BF, 0xE036, + 0x18C1, 0x0948, 0x3BD3, 0x2A5A, 0x5EE5, 0x4F6C, 0x7DF7, 0x6C7E, + 0xA50A, 0xB483, 0x8618, 0x9791, 0xE32E, 0xF2A7, 0xC03C, 0xD1B5, + 0x2942, 0x38CB, 0x0A50, 0x1BD9, 0x6F66, 0x7EEF, 0x4C74, 0x5DFD, + 0xB58B, 0xA402, 0x9699, 0x8710, 0xF3AF, 0xE226, 0xD0BD, 0xC134, + 0x39C3, 0x284A, 0x1AD1, 0x0B58, 0x7FE7, 0x6E6E, 0x5CF5, 0x4D7C, + 0xC60C, 0xD785, 0xE51E, 0xF497, 0x8028, 0x91A1, 0xA33A, 0xB2B3, + 0x4A44, 0x5BCD, 0x6956, 0x78DF, 0x0C60, 0x1DE9, 0x2F72, 0x3EFB, + 0xD68D, 0xC704, 0xF59F, 0xE416, 0x90A9, 0x8120, 0xB3BB, 0xA232, + 0x5AC5, 0x4B4C, 0x79D7, 0x685E, 0x1CE1, 0x0D68, 0x3FF3, 0x2E7A, + 0xE70E, 0xF687, 0xC41C, 0xD595, 0xA12A, 0xB0A3, 0x8238, 0x93B1, + 0x6B46, 0x7ACF, 0x4854, 0x59DD, 0x2D62, 0x3CEB, 0x0E70, 0x1FF9, + 0xF78F, 0xE606, 0xD49D, 0xC514, 0xB1AB, 0xA022, 0x92B9, 0x8330, + 0x7BC7, 0x6A4E, 0x58D5, 0x495C, 0x3DE3, 0x2C6A, 0x1EF1, 0x0F78 +}; + +uint16 +hndcrc16( + uint8 *pdata, /* pointer to array of data to process */ + uint nbytes, /* number of input data bytes to process */ + uint16 crc /* either CRC16_INIT_VALUE or previous return value */ +) +{ + while (nbytes-- > 0) + CRC_INNER_LOOP(16, crc, *pdata++); + return crc; +} + +STATIC const uint32 crc32_table[256] = { + 0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA, + 0x076DC419, 0x706AF48F, 0xE963A535, 0x9E6495A3, + 0x0EDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988, + 0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91, + 0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE, + 0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7, + 0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC, + 0x14015C4F, 0x63066CD9, 0xFA0F3D63, 0x8D080DF5, + 0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172, + 0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B, + 0x35B5A8FA, 0x42B2986C, 0xDBBBC9D6, 0xACBCF940, + 0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59, + 0x26D930AC, 0x51DE003A, 0xC8D75180, 0xBFD06116, + 0x21B4F4B5, 0x56B3C423, 0xCFBA9599, 0xB8BDA50F, + 0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924, + 0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D, + 0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A, + 0x71B18589, 0x06B6B51F, 0x9FBFE4A5, 0xE8B8D433, + 0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818, + 0x7F6A0DBB, 0x086D3D2D, 0x91646C97, 0xE6635C01, + 0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E, + 0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457, + 0x65B0D9C6, 0x12B7E950, 0x8BBEB8EA, 0xFCB9887C, + 0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65, + 0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2, + 0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB, + 0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0, + 0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9, + 0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086, + 0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F, + 0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4, + 0x59B33D17, 0x2EB40D81, 0xB7BD5C3B, 0xC0BA6CAD, + 0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A, + 0xEAD54739, 0x9DD277AF, 0x04DB2615, 0x73DC1683, + 0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8, + 0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1, + 0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE, + 0xF762575D, 0x806567CB, 0x196C3671, 0x6E6B06E7, + 0xFED41B76, 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC, + 0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5, + 0xD6D6A3E8, 0xA1D1937E, 0x38D8C2C4, 0x4FDFF252, + 0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B, + 0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, 0x41047A60, + 0xDF60EFC3, 0xA867DF55, 0x316E8EEF, 0x4669BE79, + 0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236, + 0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F, + 0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04, + 0xC2D7FFA7, 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D, + 0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A, + 0x9C0906A9, 0xEB0E363F, 0x72076785, 0x05005713, + 0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38, + 0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21, + 0x86D3D2D4, 0xF1D4E242, 0x68DDB3F8, 0x1FDA836E, + 0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777, + 0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C, + 0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45, + 0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2, + 0xA7672661, 0xD06016F7, 0x4969474D, 0x3E6E77DB, + 0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, 0x37D83BF0, + 0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9, + 0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6, + 0xBAD03605, 0xCDD70693, 0x54DE5729, 0x23D967BF, + 0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94, + 0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D +}; + +uint32 +hndcrc32( + uint8 *pdata, /* pointer to array of data to process */ + uint nbytes, /* number of input data bytes to process */ + uint32 crc /* either CRC32_INIT_VALUE or previous return value */ +) +{ + uint8 *pend; +#ifdef __mips__ + uint8 tmp[4]; + ulong *tptr = (ulong *)tmp; + + /* in case the beginning of the buffer isn't aligned */ + pend = (uint8 *)((uint)(pdata + 3) & 0xfffffffc); + nbytes -= (pend - pdata); + while (pdata < pend) + CRC_INNER_LOOP(32, crc, *pdata++); + + /* handle bulk of data as 32-bit words */ + pend = pdata + (nbytes & 0xfffffffc); + while (pdata < pend) { + *tptr = *(ulong *)pdata; + pdata += sizeof(ulong *); + CRC_INNER_LOOP(32, crc, tmp[0]); + CRC_INNER_LOOP(32, crc, tmp[1]); + CRC_INNER_LOOP(32, crc, tmp[2]); + CRC_INNER_LOOP(32, crc, tmp[3]); + } + + /* 1-3 bytes at end of buffer */ + pend = pdata + (nbytes & 0x03); + while (pdata < pend) + CRC_INNER_LOOP(32, crc, *pdata++); +#else + pend = pdata + nbytes; + while (pdata < pend) + CRC_INNER_LOOP(32, crc, *pdata++); +#endif /* __mips__ */ + + return crc; +} + +#ifdef notdef +#define CLEN 1499 /* CRC Length */ +#define CBUFSIZ (CLEN+4) +#define CNBUFS 5 /* # of bufs */ + +void testcrc32(void) +{ + uint j, k, l; + uint8 *buf; + uint len[CNBUFS]; + uint32 crcr; + uint32 crc32tv[CNBUFS] = + {0xd2cb1faa, 0xd385c8fa, 0xf5b4f3f3, 0x55789e20, 0x00343110}; + + ASSERT((buf = MALLOC(CBUFSIZ*CNBUFS)) != NULL); + + /* step through all possible alignments */ + for (l = 0; l <= 4; l++) { + for (j = 0; j < CNBUFS; j++) { + len[j] = CLEN; + for (k = 0; k < len[j]; k++) + *(buf + j*CBUFSIZ + (k+l)) = (j+k) & 0xff; + } + + for (j = 0; j < CNBUFS; j++) { + crcr = crc32(buf + j*CBUFSIZ + l, len[j], CRC32_INIT_VALUE); + ASSERT(crcr == crc32tv[j]); + } + } + + MFREE(buf, CBUFSIZ*CNBUFS); + return; +} +#endif /* notdef */ + +/* + * Advance from the current 1-byte tag/1-byte length/variable-length value + * triple, to the next, returning a pointer to the next. + * If the current or next TLV is invalid (does not fit in given buffer length), + * NULL is returned. + * *buflen is not modified if the TLV elt parameter is invalid, or is decremented + * by the TLV parameter's length if it is valid. + */ +bcm_tlv_t * +bcm_next_tlv(bcm_tlv_t *elt, int *buflen) +{ + int len; + + /* validate current elt */ + if (!bcm_valid_tlv(elt, *buflen)) + return NULL; + + /* advance to next elt */ + len = elt->len; + elt = (bcm_tlv_t*)(elt->data + len); + *buflen -= (2 + len); + + /* validate next elt */ + if (!bcm_valid_tlv(elt, *buflen)) + return NULL; + + return elt; +} + +/* + * Traverse a string of 1-byte tag/1-byte length/variable-length value + * triples, returning a pointer to the substring whose first element + * matches tag + */ +bcm_tlv_t * +bcm_parse_tlvs(void *buf, int buflen, uint key) +{ + bcm_tlv_t *elt; + int totlen; + + elt = (bcm_tlv_t*)buf; + totlen = buflen; + + /* find tagged parameter */ + while (totlen >= 2) { + int len = elt->len; + + /* validate remaining totlen */ + if ((elt->id == key) && (totlen >= (len + 2))) + return (elt); + + elt = (bcm_tlv_t*)((uint8*)elt + (len + 2)); + totlen -= (len + 2); + } + + return NULL; +} + +/* + * Traverse a string of 1-byte tag/1-byte length/variable-length value + * triples, returning a pointer to the substring whose first element + * matches tag. Stop parsing when we see an element whose ID is greater + * than the target key. + */ +bcm_tlv_t * +bcm_parse_ordered_tlvs(void *buf, int buflen, uint key) +{ + bcm_tlv_t *elt; + int totlen; + + elt = (bcm_tlv_t*)buf; + totlen = buflen; + + /* find tagged parameter */ + while (totlen >= 2) { + uint id = elt->id; + int len = elt->len; + + /* Punt if we start seeing IDs > than target key */ + if (id > key) + return (NULL); + + /* validate remaining totlen */ + if ((id == key) && (totlen >= (len + 2))) + return (elt); + + elt = (bcm_tlv_t*)((uint8*)elt + (len + 2)); + totlen -= (len + 2); + } + return NULL; +} + +#if defined(WLMSG_PRHDRS) || defined(WLMSG_PRPKT) || defined(WLMSG_ASSOC) || \ + defined(DHD_DEBUG) +int +bcm_format_flags(const bcm_bit_desc_t *bd, uint32 flags, char* buf, int len) +{ + int i; + char* p = buf; + char hexstr[16]; + int slen = 0; + uint32 bit; + const char* name; + + if (len < 2 || !buf) + return 0; + + buf[0] = '\0'; + len -= 1; + + for (i = 0; flags != 0; i++) { + bit = bd[i].bit; + name = bd[i].name; + if (bit == 0 && flags) { + /* print any unnamed bits */ + sprintf(hexstr, "0x%X", flags); + name = hexstr; + flags = 0; /* exit loop */ + } else if ((flags & bit) == 0) + continue; + slen += strlen(name); + if (len < slen) + break; + if (p != buf) p += sprintf(p, " "); /* btwn flag space */ + strcat(p, name); + p += strlen(name); + flags &= ~bit; + len -= slen; + slen = 1; /* account for btwn flag space */ + } + + /* indicate the str was too short */ + if (flags != 0) { + if (len == 0) + p--; /* overwrite last char */ + p += sprintf(p, ">"); + } + + return (int)(p - buf); +} + +/* print bytes formatted as hex to a string. return the resulting string length */ +int +bcm_format_hex(char *str, const void *bytes, int len) +{ + int i; + char *p = str; + const uint8 *src = (const uint8*)bytes; + + for (i = 0; i < len; i++) { + p += sprintf(p, "%02X", *src); + src++; + } + return (int)(p - str); +} + +/* pretty hex print a contiguous buffer */ +void +prhex(const char *msg, uchar *buf, uint nbytes) +{ + char line[128], *p; + uint i; + + if (msg && (msg[0] != '\0')) + printf("%s:\n", msg); + + p = line; + for (i = 0; i < nbytes; i++) { + if (i % 16 == 0) { + p += sprintf(p, " %04d: ", i); /* line prefix */ + } + p += sprintf(p, "%02x ", buf[i]); + if (i % 16 == 15) { + printf("%s\n", line); /* flush line */ + p = line; + } + } + + /* flush last partial line */ + if (p != line) + printf("%s\n", line); +} +#endif + + +/* Produce a human-readable string for boardrev */ +char * +bcm_brev_str(uint32 brev, char *buf) +{ + if (brev < 0x100) + snprintf(buf, 8, "%d.%d", (brev & 0xf0) >> 4, brev & 0xf); + else + snprintf(buf, 8, "%c%03x", ((brev & 0xf000) == 0x1000) ? 'P' : 'A', brev & 0xfff); + + return (buf); +} + +#define BUFSIZE_TODUMP_ATONCE 512 /* Buffer size */ + +/* dump large strings to console */ +void +printbig(char *buf) +{ + uint len, max_len; + char c; + + len = strlen(buf); + + max_len = BUFSIZE_TODUMP_ATONCE; + + while (len > max_len) { + c = buf[max_len]; + buf[max_len] = '\0'; + printf("%s", buf); + buf[max_len] = c; + + buf += max_len; + len -= max_len; + } + /* print the remaining string */ + printf("%s\n", buf); + return; +} + +/* routine to dump fields in a fileddesc structure */ +uint +bcmdumpfields(bcmutl_rdreg_rtn read_rtn, void *arg0, uint arg1, struct fielddesc *fielddesc_array, + char *buf, uint32 bufsize) +{ + uint filled_len; + int len; + struct fielddesc *cur_ptr; + + filled_len = 0; + cur_ptr = fielddesc_array; + + while (bufsize > 1) { + if (cur_ptr->nameandfmt == NULL) + break; + len = snprintf(buf, bufsize, cur_ptr->nameandfmt, + read_rtn(arg0, arg1, cur_ptr->offset)); + /* check for snprintf overflow or error */ + if (len < 0 || (uint32)len >= bufsize) + len = bufsize - 1; + buf += len; + bufsize -= len; + filled_len += len; + cur_ptr++; + } + return filled_len; +} + +uint +bcm_mkiovar(char *name, char *data, uint datalen, char *buf, uint buflen) +{ + uint len; + + len = strlen(name) + 1; + + if ((len + datalen) > buflen) + return 0; + + strncpy(buf, name, buflen); + + /* append data onto the end of the name string */ + memcpy(&buf[len], data, datalen); + len += datalen; + + return len; +} + +/* Quarter dBm units to mW + * Table starts at QDBM_OFFSET, so the first entry is mW for qdBm=153 + * Table is offset so the last entry is largest mW value that fits in + * a uint16. + */ + +#define QDBM_OFFSET 153 /* Offset for first entry */ +#define QDBM_TABLE_LEN 40 /* Table size */ + +/* Smallest mW value that will round up to the first table entry, QDBM_OFFSET. + * Value is ( mW(QDBM_OFFSET - 1) + mW(QDBM_OFFSET) ) / 2 + */ +#define QDBM_TABLE_LOW_BOUND 6493 /* Low bound */ + +/* Largest mW value that will round down to the last table entry, + * QDBM_OFFSET + QDBM_TABLE_LEN-1. + * Value is ( mW(QDBM_OFFSET + QDBM_TABLE_LEN - 1) + mW(QDBM_OFFSET + QDBM_TABLE_LEN) ) / 2. + */ +#define QDBM_TABLE_HIGH_BOUND 64938 /* High bound */ + +static const uint16 nqdBm_to_mW_map[QDBM_TABLE_LEN] = { +/* qdBm: +0 +1 +2 +3 +4 +5 +6 +7 */ +/* 153: */ 6683, 7079, 7499, 7943, 8414, 8913, 9441, 10000, +/* 161: */ 10593, 11220, 11885, 12589, 13335, 14125, 14962, 15849, +/* 169: */ 16788, 17783, 18836, 19953, 21135, 22387, 23714, 25119, +/* 177: */ 26607, 28184, 29854, 31623, 33497, 35481, 37584, 39811, +/* 185: */ 42170, 44668, 47315, 50119, 53088, 56234, 59566, 63096 +}; + +uint16 +bcm_qdbm_to_mw(uint8 qdbm) +{ + uint factor = 1; + int idx = qdbm - QDBM_OFFSET; + + if (idx >= QDBM_TABLE_LEN) { + /* clamp to max uint16 mW value */ + return 0xFFFF; + } + + /* scale the qdBm index up to the range of the table 0-40 + * where an offset of 40 qdBm equals a factor of 10 mW. + */ + while (idx < 0) { + idx += 40; + factor *= 10; + } + + /* return the mW value scaled down to the correct factor of 10, + * adding in factor/2 to get proper rounding. + */ + return ((nqdBm_to_mW_map[idx] + factor/2) / factor); +} + +uint8 +bcm_mw_to_qdbm(uint16 mw) +{ + uint8 qdbm; + int offset; + uint mw_uint = mw; + uint boundary; + + /* handle boundary case */ + if (mw_uint <= 1) + return 0; + + offset = QDBM_OFFSET; + + /* move mw into the range of the table */ + while (mw_uint < QDBM_TABLE_LOW_BOUND) { + mw_uint *= 10; + offset -= 40; + } + + for (qdbm = 0; qdbm < QDBM_TABLE_LEN-1; qdbm++) { + boundary = nqdBm_to_mW_map[qdbm] + (nqdBm_to_mW_map[qdbm+1] - + nqdBm_to_mW_map[qdbm])/2; + if (mw_uint < boundary) break; + } + + qdbm += (uint8)offset; + + return (qdbm); +} + + +uint +bcm_bitcount(uint8 *bitmap, uint length) +{ + uint bitcount = 0, i; + uint8 tmp; + for (i = 0; i < length; i++) { + tmp = bitmap[i]; + while (tmp) { + bitcount++; + tmp &= (tmp - 1); + } + } + return bitcount; +} + +#ifdef BCMDRIVER + +/* Initialization of bcmstrbuf structure */ +void +bcm_binit(struct bcmstrbuf *b, char *buf, uint size) +{ + b->origsize = b->size = size; + b->origbuf = b->buf = buf; +} + +/* Buffer sprintf wrapper to guard against buffer overflow */ +int +bcm_bprintf(struct bcmstrbuf *b, const char *fmt, ...) +{ + va_list ap; + int r; + + va_start(ap, fmt); + r = vsnprintf(b->buf, b->size, fmt, ap); + + /* Non Ansi C99 compliant returns -1, + * Ansi compliant return r >= b->size, + * bcmstdlib returns 0, handle all + */ + if ((r == -1) || (r >= (int)b->size) || (r == 0)) { + b->size = 0; + } else { + b->size -= r; + b->buf += r; + } + + va_end(ap); + + return r; +} + +void +bcm_inc_bytes(uchar *num, int num_bytes, uint8 amount) +{ + int i; + + for (i = 0; i < num_bytes; i++) { + num[i] += amount; + if (num[i] >= amount) + break; + amount = 1; + } +} + +int +bcm_cmp_bytes(uchar *arg1, uchar *arg2, uint8 nbytes) +{ + int i; + + for (i = nbytes - 1; i >= 0; i--) { + if (arg1[i] != arg2[i]) + return (arg1[i] - arg2[i]); + } + return 0; +} + +void +bcm_print_bytes(char *name, const uchar *data, int len) +{ + int i; + int per_line = 0; + + printf("%s: %d \n", name ? name : "", len); + for (i = 0; i < len; i++) { + printf("%02x ", *data++); + per_line++; + if (per_line == 16) { + per_line = 0; + printf("\n"); + } + } + printf("\n"); +} + +/* + * buffer length needed for wlc_format_ssid + * 32 SSID chars, max of 4 chars for each SSID char "\xFF", plus NULL. + */ + +#if defined(WLTINYDUMP) || defined(WLMSG_INFORM) || defined(WLMSG_ASSOC) || \ + defined(WLMSG_PRPKT) || defined(WLMSG_WSEC) +int +bcm_format_ssid(char* buf, const uchar ssid[], uint ssid_len) +{ + uint i, c; + char *p = buf; + char *endp = buf + SSID_FMT_BUF_LEN; + + if (ssid_len > DOT11_MAX_SSID_LEN) ssid_len = DOT11_MAX_SSID_LEN; + + for (i = 0; i < ssid_len; i++) { + c = (uint)ssid[i]; + if (c == '\\') { + *p++ = '\\'; + *p++ = '\\'; + } else if (bcm_isprint((uchar)c)) { + *p++ = (char)c; + } else { + p += snprintf(p, (endp - p), "\\x%02X", c); + } + } + *p = '\0'; + ASSERT(p < endp); + + return (int)(p - buf); +} +#endif + +#endif /* BCMDRIVER */ diff --git a/drivers/net/wireless/bcm4329/bcmwifi.c b/drivers/net/wireless/bcm4329/bcmwifi.c new file mode 100644 index 000000000000..803acf842a29 --- /dev/null +++ b/drivers/net/wireless/bcm4329/bcmwifi.c @@ -0,0 +1,199 @@ +/* + * Misc utility routines used by kernel or app-level. + * Contents are wifi-specific, used by any kernel or app-level + * software that might want wifi things as it grows. + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * $Id: bcmwifi.c,v 1.18.24.2.4.1 2009/09/25 00:32:01 Exp $ + */ + + +#include <typedefs.h> + +#ifdef BCMDRIVER +#include <osl.h> +#include <bcmutils.h> +#define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base)) +#define tolower(c) (bcm_isupper((c)) ? ((c) + 'a' - 'A') : (c)) +#else +#include <stdio.h> +#include <stdlib.h> +#include <ctype.h> +#endif +#include <bcmwifi.h> + +#if defined(WIN32) && (defined(BCMDLL) || defined(WLMDLL)) +#include <bcmstdlib.h> +#endif + + + + + +char * +wf_chspec_ntoa(chanspec_t chspec, char *buf) +{ + const char *band, *bw, *sb; + uint channel; + + band = ""; + bw = ""; + sb = ""; + channel = CHSPEC_CHANNEL(chspec); + + if ((CHSPEC_IS2G(chspec) && channel > CH_MAX_2G_CHANNEL) || + (CHSPEC_IS5G(chspec) && channel <= CH_MAX_2G_CHANNEL)) + band = (CHSPEC_IS2G(chspec)) ? "b" : "a"; + if (CHSPEC_IS40(chspec)) { + if (CHSPEC_SB_UPPER(chspec)) { + sb = "u"; + channel += CH_10MHZ_APART; + } else { + sb = "l"; + channel -= CH_10MHZ_APART; + } + } else if (CHSPEC_IS10(chspec)) { + bw = "n"; + } + + + snprintf(buf, 6, "%d%s%s%s", channel, band, bw, sb); + return (buf); +} + + +chanspec_t +wf_chspec_aton(char *a) +{ + char *endp = NULL; + uint channel, band, bw, ctl_sb; + char c; + + channel = strtoul(a, &endp, 10); + + + if (endp == a) + return 0; + + if (channel > MAXCHANNEL) + return 0; + + band = ((channel <= CH_MAX_2G_CHANNEL) ? WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G); + bw = WL_CHANSPEC_BW_20; + ctl_sb = WL_CHANSPEC_CTL_SB_NONE; + + a = endp; + + c = tolower(a[0]); + if (c == '\0') + goto done; + + + if (c == 'a' || c == 'b') { + band = (c == 'a') ? WL_CHANSPEC_BAND_5G : WL_CHANSPEC_BAND_2G; + a++; + c = tolower(a[0]); + if (c == '\0') + goto done; + } + + + if (c == 'n') { + bw = WL_CHANSPEC_BW_10; + } else if (c == 'l') { + bw = WL_CHANSPEC_BW_40; + ctl_sb = WL_CHANSPEC_CTL_SB_LOWER; + + if (channel <= (MAXCHANNEL - CH_20MHZ_APART)) + channel += CH_10MHZ_APART; + else + return 0; + } else if (c == 'u') { + bw = WL_CHANSPEC_BW_40; + ctl_sb = WL_CHANSPEC_CTL_SB_UPPER; + + if (channel > CH_20MHZ_APART) + channel -= CH_10MHZ_APART; + else + return 0; + } else { + return 0; + } + +done: + return (channel | band | bw | ctl_sb); +} + + +int +wf_mhz2channel(uint freq, uint start_factor) +{ + int ch = -1; + uint base; + int offset; + + + if (start_factor == 0) { + if (freq >= 2400 && freq <= 2500) + start_factor = WF_CHAN_FACTOR_2_4_G; + else if (freq >= 5000 && freq <= 6000) + start_factor = WF_CHAN_FACTOR_5_G; + } + + if (freq == 2484 && start_factor == WF_CHAN_FACTOR_2_4_G) + return 14; + + base = start_factor / 2; + + + if ((freq < base) || (freq > base + 1000)) + return -1; + + offset = freq - base; + ch = offset / 5; + + + if (offset != (ch * 5)) + return -1; + + + if (start_factor == WF_CHAN_FACTOR_2_4_G && (ch < 1 || ch > 13)) + return -1; + + return ch; +} + + +int +wf_channel2mhz(uint ch, uint start_factor) +{ + int freq; + + if ((start_factor == WF_CHAN_FACTOR_2_4_G && (ch < 1 || ch > 14)) || + (ch <= 200)) + freq = -1; + if ((start_factor == WF_CHAN_FACTOR_2_4_G) && (ch == 14)) + freq = 2484; + else + freq = ch * 5 + start_factor / 2; + + return freq; +} diff --git a/drivers/net/wireless/bcm4329/dhd.h b/drivers/net/wireless/bcm4329/dhd.h new file mode 100644 index 000000000000..cc7e8cb286b8 --- /dev/null +++ b/drivers/net/wireless/bcm4329/dhd.h @@ -0,0 +1,423 @@ +/* + * Header file describing the internal (inter-module) DHD interfaces. + * + * Provides type definitions and function prototypes used to link the + * DHD OS, bus, and protocol modules. + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: dhd.h,v 1.32.4.7.2.4.14.49 2010/08/20 17:32:48 Exp $ + */ + +/**************** + * Common types * + */ + +#ifndef _dhd_h_ +#define _dhd_h_ + +#if defined(LINUX) +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/random.h> +#include <linux/spinlock.h> +#include <linux/ethtool.h> +#include <asm/uaccess.h> +#include <asm/unaligned.h> + +/* The kernel threading is sdio-specific */ +#else /* LINUX */ +#define ENOMEM 1 +#define EFAULT 2 +#define EINVAL 3 +#define EIO 4 +#define ETIMEDOUT 5 +#define ERESTARTSYS 6 +#endif /* LINUX */ + +#include <wlioctl.h> + +#ifdef DHD_DEBUG +#ifndef DHD_DEBUG_TRAP +#define DHD_DEBUG_TRAP +#endif +#endif + +/* Forward decls */ +struct dhd_bus; +struct dhd_prot; +struct dhd_info; + +/* The level of bus communication with the dongle */ +enum dhd_bus_state { + DHD_BUS_DOWN, /* Not ready for frame transfers */ + DHD_BUS_LOAD, /* Download access only (CPU reset) */ + DHD_BUS_DATA /* Ready for frame transfers */ +}; + +enum dhd_bus_wake_state { + WAKE_LOCK_OFF, + WAKE_LOCK_PRIV, + WAKE_LOCK_DPC, + WAKE_LOCK_IOCTL, + WAKE_LOCK_DOWNLOAD, + WAKE_LOCK_TMOUT, + WAKE_LOCK_WATCHDOG, + WAKE_LOCK_LINK_DOWN_TMOUT, + WAKE_LOCK_SOFTAP_SET, + WAKE_LOCK_SOFTAP_STOP, + WAKE_LOCK_SOFTAP_START, + WAKE_LOCK_MAX +}; +enum dhd_prealloc_index { + DHD_PREALLOC_PROT = 0, + DHD_PREALLOC_RXBUF, + DHD_PREALLOC_DATABUF, + DHD_PREALLOC_OSL_BUF +}; +#ifdef DHD_USE_STATIC_BUF +extern void * dhd_os_prealloc(int section, unsigned long size); +#endif +/* Common structure for module and instance linkage */ +typedef struct dhd_pub { + /* Linkage ponters */ + osl_t *osh; /* OSL handle */ + struct dhd_bus *bus; /* Bus module handle */ + struct dhd_prot *prot; /* Protocol module handle */ + struct dhd_info *info; /* Info module handle */ + + /* Internal dhd items */ + bool up; /* Driver up/down (to OS) */ + bool txoff; /* Transmit flow-controlled */ + bool dongle_reset; /* TRUE = DEVRESET put dongle into reset */ + enum dhd_bus_state busstate; + uint hdrlen; /* Total DHD header length (proto + bus) */ + uint maxctl; /* Max size rxctl request from proto to bus */ + uint rxsz; /* Rx buffer size bus module should use */ + uint8 wme_dp; /* wme discard priority */ + + /* Dongle media info */ + bool iswl; /* Dongle-resident driver is wl */ + ulong drv_version; /* Version of dongle-resident driver */ + struct ether_addr mac; /* MAC address obtained from dongle */ + dngl_stats_t dstats; /* Stats for dongle-based data */ + + /* Additional stats for the bus level */ + ulong tx_packets; /* Data packets sent to dongle */ + ulong tx_multicast; /* Multicast data packets sent to dongle */ + ulong tx_errors; /* Errors in sending data to dongle */ + ulong tx_ctlpkts; /* Control packets sent to dongle */ + ulong tx_ctlerrs; /* Errors sending control frames to dongle */ + ulong rx_packets; /* Packets sent up the network interface */ + ulong rx_multicast; /* Multicast packets sent up the network interface */ + ulong rx_errors; /* Errors processing rx data packets */ + ulong rx_ctlpkts; /* Control frames processed from dongle */ + ulong rx_ctlerrs; /* Errors in processing rx control frames */ + ulong rx_dropped; /* Packets dropped locally (no memory) */ + ulong rx_flushed; /* Packets flushed due to unscheduled sendup thread */ + ulong wd_dpc_sched; /* Number of times dhd dpc scheduled by watchdog timer */ + + ulong rx_readahead_cnt; /* Number of packets where header read-ahead was used. */ + ulong tx_realloc; /* Number of tx packets we had to realloc for headroom */ + ulong fc_packets; /* Number of flow control pkts recvd */ + + /* Last error return */ + int bcmerror; + uint tickcnt; + + /* Last error from dongle */ + int dongle_error; + + /* Suspend disable flag and "in suspend" flag */ + int suspend_disable_flag; /* "1" to disable all extra powersaving during suspend */ + int in_suspend; /* flag set to 1 when early suspend called */ +#ifdef PNO_SUPPORT + int pno_enable; /* pno status : "1" is pno enable */ +#endif /* PNO_SUPPORT */ + int dtim_skip; /* dtim skip , default 0 means wake each dtim */ + + /* Pkt filter defination */ + char * pktfilter[100]; + int pktfilter_count; + + uint8 country_code[WLC_CNTRY_BUF_SZ]; + char eventmask[WL_EVENTING_MASK_LEN]; + +} dhd_pub_t; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) + + #define DHD_PM_RESUME_WAIT_INIT(a) DECLARE_WAIT_QUEUE_HEAD(a); + #define _DHD_PM_RESUME_WAIT(a, b) do { \ + int retry = 0; \ + smp_mb(); \ + while (dhd_mmc_suspend && retry++ != b) { \ + wait_event_interruptible_timeout(a, FALSE, HZ/100); \ + } \ + } while (0) + #define DHD_PM_RESUME_WAIT(a) _DHD_PM_RESUME_WAIT(a, 30) + #define DHD_PM_RESUME_WAIT_FOREVER(a) _DHD_PM_RESUME_WAIT(a, ~0) + #define DHD_PM_RESUME_RETURN_ERROR(a) do { if (dhd_mmc_suspend) return a; } while (0) + #define DHD_PM_RESUME_RETURN do { if (dhd_mmc_suspend) return; } while (0) + + #define DHD_SPINWAIT_SLEEP_INIT(a) DECLARE_WAIT_QUEUE_HEAD(a); + #define SPINWAIT_SLEEP(a, exp, us) do { \ + uint countdown = (us) + 9999; \ + while ((exp) && (countdown >= 10000)) { \ + wait_event_interruptible_timeout(a, FALSE, HZ/100); \ + countdown -= 10000; \ + } \ + } while (0) + +#else + + #define DHD_PM_RESUME_WAIT_INIT(a) + #define DHD_PM_RESUME_WAIT(a) + #define DHD_PM_RESUME_WAIT_FOREVER(a) + #define DHD_PM_RESUME_RETURN_ERROR(a) + #define DHD_PM_RESUME_RETURN + + #define DHD_SPINWAIT_SLEEP_INIT(a) + #define SPINWAIT_SLEEP(a, exp, us) do { \ + uint countdown = (us) + 9; \ + while ((exp) && (countdown >= 10)) { \ + OSL_DELAY(10); \ + countdown -= 10; \ + } \ + } while (0) + +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */ + +#define DHD_IF_VIF 0x01 /* Virtual IF (Hidden from user) */ + +/* Wakelock Functions */ +extern int dhd_os_wake_lock(dhd_pub_t *pub); +extern int dhd_os_wake_unlock(dhd_pub_t *pub); +extern int dhd_os_wake_lock_timeout(dhd_pub_t *pub); +extern int dhd_os_wake_lock_timeout_enable(dhd_pub_t *pub); + +typedef struct dhd_if_event { + uint8 ifidx; + uint8 action; + uint8 flags; + uint8 bssidx; +} dhd_if_event_t; + +/* + * Exported from dhd OS modules (dhd_linux/dhd_ndis) + */ + +/* To allow osl_attach/detach calls from os-independent modules */ +osl_t *dhd_osl_attach(void *pdev, uint bustype); +void dhd_osl_detach(osl_t *osh); + +/* Indication from bus module regarding presence/insertion of dongle. + * Return dhd_pub_t pointer, used as handle to OS module in later calls. + * Returned structure should have bus and prot pointers filled in. + * bus_hdrlen specifies required headroom for bus module header. + */ +extern dhd_pub_t *dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen); +extern int dhd_net_attach(dhd_pub_t *dhdp, int idx); + +/* Indication from bus module regarding removal/absence of dongle */ +extern void dhd_detach(dhd_pub_t *dhdp); + +/* Indication from bus module to change flow-control state */ +extern void dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool on); + +extern bool dhd_prec_enq(dhd_pub_t *dhdp, struct pktq *q, void *pkt, int prec); + +/* Receive frame for delivery to OS. Callee disposes of rxp. */ +extern void dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *rxp, int numpkt); + +/* Return pointer to interface name */ +extern char *dhd_ifname(dhd_pub_t *dhdp, int idx); + +/* Request scheduling of the bus dpc */ +extern void dhd_sched_dpc(dhd_pub_t *dhdp); + +/* Notify tx completion */ +extern void dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success); + +/* Query ioctl */ +extern int dhdcdc_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len); + +/* OS independent layer functions */ +extern int dhd_os_proto_block(dhd_pub_t * pub); +extern int dhd_os_proto_unblock(dhd_pub_t * pub); +extern int dhd_os_ioctl_resp_wait(dhd_pub_t * pub, uint * condition, bool * pending); +extern int dhd_os_ioctl_resp_wake(dhd_pub_t * pub); +extern unsigned int dhd_os_get_ioctl_resp_timeout(void); +extern void dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec); +extern void * dhd_os_open_image(char * filename); +extern int dhd_os_get_image_block(char * buf, int len, void * image); +extern void dhd_os_close_image(void * image); +extern void dhd_os_wd_timer(void *bus, uint wdtick); +extern void dhd_os_sdlock(dhd_pub_t * pub); +extern void dhd_os_sdunlock(dhd_pub_t * pub); +extern void dhd_os_sdlock_txq(dhd_pub_t * pub); +extern void dhd_os_sdunlock_txq(dhd_pub_t * pub); +extern void dhd_os_sdlock_rxq(dhd_pub_t * pub); +extern void dhd_os_sdunlock_rxq(dhd_pub_t * pub); +extern void dhd_os_sdlock_sndup_rxq(dhd_pub_t * pub); +extern void dhd_customer_gpio_wlan_ctrl(int onoff); +extern int dhd_custom_get_mac_address(unsigned char *buf); +extern void dhd_os_sdunlock_sndup_rxq(dhd_pub_t * pub); +extern void dhd_os_sdlock_eventq(dhd_pub_t * pub); +extern void dhd_os_sdunlock_eventq(dhd_pub_t * pub); +#ifdef DHD_DEBUG +extern int write_to_file(dhd_pub_t *dhd, uint8 *buf, int size); +#endif /* DHD_DEBUG */ +#if defined(OOB_INTR_ONLY) +extern int dhd_customer_oob_irq_map(unsigned long *irq_flags_ptr); +#endif /* defined(OOB_INTR_ONLY) */ +extern void dhd_os_sdtxlock(dhd_pub_t * pub); +extern void dhd_os_sdtxunlock(dhd_pub_t * pub); + +int setScheduler(struct task_struct *p, int policy, struct sched_param *param); + +typedef struct { + uint32 limit; /* Expiration time (usec) */ + uint32 increment; /* Current expiration increment (usec) */ + uint32 elapsed; /* Current elapsed time (usec) */ + uint32 tick; /* O/S tick time (usec) */ +} dhd_timeout_t; + +extern void dhd_timeout_start(dhd_timeout_t *tmo, uint usec); +extern int dhd_timeout_expired(dhd_timeout_t *tmo); + +extern int dhd_ifname2idx(struct dhd_info *dhd, char *name); +extern uint8 *dhd_bssidx2bssid(dhd_pub_t *dhd, int idx); +extern int wl_host_event(struct dhd_info *dhd, int *idx, void *pktdata, + wl_event_msg_t *, void **data_ptr); +extern void wl_event_to_host_order(wl_event_msg_t * evt); + +extern void dhd_common_init(void); + +extern int dhd_add_if(struct dhd_info *dhd, int ifidx, void *handle, + char *name, uint8 *mac_addr, uint32 flags, uint8 bssidx); +extern void dhd_del_if(struct dhd_info *dhd, int ifidx); + +extern void dhd_vif_add(struct dhd_info *dhd, int ifidx, char * name); +extern void dhd_vif_del(struct dhd_info *dhd, int ifidx); + +extern void dhd_event(struct dhd_info *dhd, char *evpkt, int evlen, int ifidx); +extern void dhd_vif_sendup(struct dhd_info *dhd, int ifidx, uchar *cp, int len); + + +/* Send packet to dongle via data channel */ +extern int dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pkt); + +/* Send event to host */ +extern void dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data); +extern int dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag); +extern uint dhd_bus_status(dhd_pub_t *dhdp); +extern int dhd_bus_start(dhd_pub_t *dhdp); + +extern void print_buf(void *pbuf, int len, int bytes_per_line); + + +typedef enum cust_gpio_modes { + WLAN_RESET_ON, + WLAN_RESET_OFF, + WLAN_POWER_ON, + WLAN_POWER_OFF +} cust_gpio_modes_t; +extern int wl_iw_iscan_set_scan_broadcast_prep(struct net_device *dev, uint flag); +extern int wl_iw_send_priv_event(struct net_device *dev, char *flag); +/* + * Insmod parameters for debug/test + */ + +/* Watchdog timer interval */ +extern uint dhd_watchdog_ms; + +#if defined(DHD_DEBUG) +/* Console output poll interval */ +extern uint dhd_console_ms; +#endif /* defined(DHD_DEBUG) */ + +/* Use interrupts */ +extern uint dhd_intr; + +/* Use polling */ +extern uint dhd_poll; + +/* ARP offload agent mode */ +extern uint dhd_arp_mode; + +/* ARP offload enable */ +extern uint dhd_arp_enable; + +/* Pkt filte enable control */ +extern uint dhd_pkt_filter_enable; + +/* Pkt filter init setup */ +extern uint dhd_pkt_filter_init; + +/* Pkt filter mode control */ +extern uint dhd_master_mode; + +/* Roaming mode control */ +extern uint dhd_roam; + +/* Roaming mode control */ +extern uint dhd_radio_up; + +/* Initial idletime ticks (may be -1 for immediate idle, 0 for no idle) */ +extern int dhd_idletime; +#define DHD_IDLETIME_TICKS 1 + +/* SDIO Drive Strength */ +extern uint dhd_sdiod_drive_strength; + +/* Override to force tx queueing all the time */ +extern uint dhd_force_tx_queueing; + +#ifdef SDTEST +/* Echo packet generator (SDIO), pkts/s */ +extern uint dhd_pktgen; + +/* Echo packet len (0 => sawtooth, max 1800) */ +extern uint dhd_pktgen_len; +#define MAX_PKTGEN_LEN 1800 +#endif + + +/* optionally set by a module_param_string() */ +#define MOD_PARAM_PATHLEN 2048 +extern char fw_path[MOD_PARAM_PATHLEN]; +extern char nv_path[MOD_PARAM_PATHLEN]; + +/* For supporting multiple interfaces */ +#define DHD_MAX_IFS 16 +#define DHD_DEL_IF -0xe +#define DHD_BAD_IF -0xf + + +extern void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar); +extern void dhd_wait_event_wakeup(dhd_pub_t*dhd); + +#endif /* _dhd_h_ */ diff --git a/drivers/net/wireless/bcm4329/dhd_bus.h b/drivers/net/wireless/bcm4329/dhd_bus.h new file mode 100644 index 000000000000..9e29fb955444 --- /dev/null +++ b/drivers/net/wireless/bcm4329/dhd_bus.h @@ -0,0 +1,93 @@ +/* + * Header file describing the internal (inter-module) DHD interfaces. + * + * Provides type definitions and function prototypes used to link the + * DHD OS, bus, and protocol modules. + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: dhd_bus.h,v 1.4.6.3.2.3.6.6 2010/05/17 18:18:13 Exp $ + */ + +#ifndef _dhd_bus_h_ +#define _dhd_bus_h_ + +/* + * Exported from dhd bus module (dhd_usb, dhd_sdio) + */ + +/* Indicate (dis)interest in finding dongles. */ +extern int dhd_bus_register(void); +extern void dhd_bus_unregister(void); + +/* Download firmware image and nvram image */ +extern bool dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh, + char *fw_path, char *nv_path); + +/* Stop bus module: clear pending frames, disable data flow */ +extern void dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex); + +/* Initialize bus module: prepare for communication w/dongle */ +extern int dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex); + +/* Send a data frame to the dongle. Callee disposes of txp. */ +extern int dhd_bus_txdata(struct dhd_bus *bus, void *txp); + +/* Send/receive a control message to/from the dongle. + * Expects caller to enforce a single outstanding transaction. + */ +extern int dhd_bus_txctl(struct dhd_bus *bus, uchar *msg, uint msglen); +extern int dhd_bus_rxctl(struct dhd_bus *bus, uchar *msg, uint msglen); + +/* Watchdog timer function */ +extern bool dhd_bus_watchdog(dhd_pub_t *dhd); + +#ifdef DHD_DEBUG +/* Device console input function */ +extern int dhd_bus_console_in(dhd_pub_t *dhd, uchar *msg, uint msglen); +#endif + +/* Deferred processing for the bus, return TRUE requests reschedule */ +extern bool dhd_bus_dpc(struct dhd_bus *bus); +extern void dhd_bus_isr(bool * InterruptRecognized, bool * QueueMiniportHandleInterrupt, void *arg); + + +/* Check for and handle local prot-specific iovar commands */ +extern int dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name, + void *params, int plen, void *arg, int len, bool set); + +/* Add bus dump output to a buffer */ +extern void dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf); + +/* Clear any bus counters */ +extern void dhd_bus_clearcounts(dhd_pub_t *dhdp); + +/* return the dongle chipid */ +extern uint dhd_bus_chip(struct dhd_bus *bus); + +/* Set user-specified nvram parameters. */ +extern void dhd_bus_set_nvram_params(struct dhd_bus * bus, const char *nvram_params); + +extern void *dhd_bus_pub(struct dhd_bus *bus); +extern void *dhd_bus_txq(struct dhd_bus *bus); +extern uint dhd_bus_hdrlen(struct dhd_bus *bus); + +#endif /* _dhd_bus_h_ */ diff --git a/drivers/net/wireless/bcm4329/dhd_cdc.c b/drivers/net/wireless/bcm4329/dhd_cdc.c new file mode 100644 index 000000000000..61f6a6f393a9 --- /dev/null +++ b/drivers/net/wireless/bcm4329/dhd_cdc.c @@ -0,0 +1,522 @@ +/* + * DHD Protocol Module for CDC and BDC. + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: dhd_cdc.c,v 1.22.4.2.4.7.2.41 2010/06/23 19:58:18 Exp $ + * + * BDC is like CDC, except it includes a header for data packets to convey + * packet priority over the bus, and flags (e.g. to indicate checksum status + * for dongle offload). + */ + +#include <typedefs.h> +#include <osl.h> + +#include <bcmutils.h> +#include <bcmcdc.h> +#include <bcmendian.h> + +#include <dngl_stats.h> +#include <dhd.h> +#include <dhd_proto.h> +#include <dhd_bus.h> +#include <dhd_dbg.h> + +extern int dhd_preinit_ioctls(dhd_pub_t *dhd); + +/* Packet alignment for most efficient SDIO (can change based on platform) */ +#ifndef DHD_SDALIGN +#define DHD_SDALIGN 32 +#endif +#if !ISPOWEROF2(DHD_SDALIGN) +#error DHD_SDALIGN is not a power of 2! +#endif + +#define RETRIES 2 /* # of retries to retrieve matching ioctl response */ +#define BUS_HEADER_LEN (16+DHD_SDALIGN) /* Must be atleast SDPCM_RESERVE + * defined in dhd_sdio.c (amount of header tha might be added) + * plus any space that might be needed for alignment padding. + */ +#define ROUND_UP_MARGIN 2048 /* Biggest SDIO block size possible for + * round off at the end of buffer + */ + +typedef struct dhd_prot { + uint16 reqid; + uint8 pending; + uint32 lastcmd; + uint8 bus_header[BUS_HEADER_LEN]; + cdc_ioctl_t msg; + unsigned char buf[WLC_IOCTL_MAXLEN + ROUND_UP_MARGIN]; +} dhd_prot_t; + +static int +dhdcdc_msg(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + int len = ltoh32(prot->msg.len) + sizeof(cdc_ioctl_t); + int ret; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + dhd_os_wake_lock(dhd); + + /* NOTE : cdc->msg.len holds the desired length of the buffer to be + * returned. Only up to CDC_MAX_MSG_SIZE of this buffer area + * is actually sent to the dongle + */ + if (len > CDC_MAX_MSG_SIZE) + len = CDC_MAX_MSG_SIZE; + + /* Send request */ + ret = dhd_bus_txctl(dhd->bus, (uchar*)&prot->msg, len); + dhd_os_wake_unlock(dhd); + return ret; +} + +static int +dhdcdc_cmplt(dhd_pub_t *dhd, uint32 id, uint32 len) +{ + int ret; + dhd_prot_t *prot = dhd->prot; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + do { + ret = dhd_bus_rxctl(dhd->bus, (uchar*)&prot->msg, len+sizeof(cdc_ioctl_t)); + if (ret < 0) + break; + } while (CDC_IOC_ID(ltoh32(prot->msg.flags)) != id); + + return ret; +} + +int +dhdcdc_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len) +{ + dhd_prot_t *prot = dhd->prot; + cdc_ioctl_t *msg = &prot->msg; + void *info; + int ret = 0, retries = 0; + uint32 id, flags = 0; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + DHD_CTL(("%s: cmd %d len %d\n", __FUNCTION__, cmd, len)); + + + /* Respond "bcmerror" and "bcmerrorstr" with local cache */ + if (cmd == WLC_GET_VAR && buf) + { + if (!strcmp((char *)buf, "bcmerrorstr")) + { + strncpy((char *)buf, bcmerrorstr(dhd->dongle_error), BCME_STRLEN); + goto done; + } + else if (!strcmp((char *)buf, "bcmerror")) + { + *(int *)buf = dhd->dongle_error; + goto done; + } + } + + memset(msg, 0, sizeof(cdc_ioctl_t)); + + msg->cmd = htol32(cmd); + msg->len = htol32(len); + msg->flags = (++prot->reqid << CDCF_IOC_ID_SHIFT); + CDC_SET_IF_IDX(msg, ifidx); + msg->flags = htol32(msg->flags); + + if (buf) + memcpy(prot->buf, buf, len); + + if ((ret = dhdcdc_msg(dhd)) < 0) { + DHD_ERROR(("dhdcdc_query_ioctl: dhdcdc_msg failed w/status %d\n", ret)); + goto done; + } + +retry: + /* wait for interrupt and get first fragment */ + if ((ret = dhdcdc_cmplt(dhd, prot->reqid, len)) < 0) + goto done; + + flags = ltoh32(msg->flags); + id = (flags & CDCF_IOC_ID_MASK) >> CDCF_IOC_ID_SHIFT; + + if ((id < prot->reqid) && (++retries < RETRIES)) + goto retry; + if (id != prot->reqid) { + DHD_ERROR(("%s: %s: unexpected request id %d (expected %d)\n", + dhd_ifname(dhd, ifidx), __FUNCTION__, id, prot->reqid)); + ret = -EINVAL; + goto done; + } + + /* Check info buffer */ + info = (void*)&msg[1]; + + /* Copy info buffer */ + if (buf) + { + if (ret < (int)len) + len = ret; + memcpy(buf, info, len); + } + + /* Check the ERROR flag */ + if (flags & CDCF_IOC_ERROR) + { + ret = ltoh32(msg->status); + /* Cache error from dongle */ + dhd->dongle_error = ret; + } + +done: + return ret; +} + +int +dhdcdc_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len) +{ + dhd_prot_t *prot = dhd->prot; + cdc_ioctl_t *msg = &prot->msg; + int ret = 0; + uint32 flags, id; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + DHD_CTL(("%s: cmd %d len %d\n", __FUNCTION__, cmd, len)); + + memset(msg, 0, sizeof(cdc_ioctl_t)); + + msg->cmd = htol32(cmd); + msg->len = htol32(len); + msg->flags = (++prot->reqid << CDCF_IOC_ID_SHIFT) | CDCF_IOC_SET; + CDC_SET_IF_IDX(msg, ifidx); + msg->flags = htol32(msg->flags); + + if (buf) + memcpy(prot->buf, buf, len); + + if ((ret = dhdcdc_msg(dhd)) < 0) + goto done; + + if ((ret = dhdcdc_cmplt(dhd, prot->reqid, len)) < 0) + goto done; + + flags = ltoh32(msg->flags); + id = (flags & CDCF_IOC_ID_MASK) >> CDCF_IOC_ID_SHIFT; + + if (id != prot->reqid) { + DHD_ERROR(("%s: %s: unexpected request id %d (expected %d)\n", + dhd_ifname(dhd, ifidx), __FUNCTION__, id, prot->reqid)); + ret = -EINVAL; + goto done; + } + + /* Check the ERROR flag */ + if (flags & CDCF_IOC_ERROR) + { + ret = ltoh32(msg->status); + /* Cache error from dongle */ + dhd->dongle_error = ret; + } + +done: + return ret; +} + +extern int dhd_bus_interface(struct dhd_bus *bus, uint arg, void* arg2); +int +dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int len) +{ + dhd_prot_t *prot = dhd->prot; + int ret = -1; + + if (dhd->busstate == DHD_BUS_DOWN) { + DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__)); + return ret; + } + dhd_os_proto_block(dhd); + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + ASSERT(len <= WLC_IOCTL_MAXLEN); + + if (len > WLC_IOCTL_MAXLEN) + goto done; + + if (prot->pending == TRUE) { + DHD_TRACE(("CDC packet is pending!!!! cmd=0x%x (%lu) lastcmd=0x%x (%lu)\n", + ioc->cmd, (unsigned long)ioc->cmd, prot->lastcmd, + (unsigned long)prot->lastcmd)); + if ((ioc->cmd == WLC_SET_VAR) || (ioc->cmd == WLC_GET_VAR)) { + DHD_TRACE(("iovar cmd=%s\n", (char*)buf)); + } + goto done; + } + + prot->pending = TRUE; + prot->lastcmd = ioc->cmd; + if (ioc->set) + ret = dhdcdc_set_ioctl(dhd, ifidx, ioc->cmd, buf, len); + else { + ret = dhdcdc_query_ioctl(dhd, ifidx, ioc->cmd, buf, len); + if (ret > 0) + ioc->used = ret - sizeof(cdc_ioctl_t); + } + + /* Too many programs assume ioctl() returns 0 on success */ + if (ret >= 0) + ret = 0; + else { + cdc_ioctl_t *msg = &prot->msg; + ioc->needed = ltoh32(msg->len); /* len == needed when set/query fails from dongle */ + } + + /* Intercept the wme_dp ioctl here */ + if ((!ret) && (ioc->cmd == WLC_SET_VAR) && (!strcmp(buf, "wme_dp"))) { + int slen, val = 0; + + slen = strlen("wme_dp") + 1; + if (len >= (int)(slen + sizeof(int))) + bcopy(((char *)buf + slen), &val, sizeof(int)); + dhd->wme_dp = (uint8) ltoh32(val); + } + + prot->pending = FALSE; + +done: + dhd_os_proto_unblock(dhd); + + return ret; +} + +int +dhd_prot_iovar_op(dhd_pub_t *dhdp, const char *name, + void *params, int plen, void *arg, int len, bool set) +{ + return BCME_UNSUPPORTED; +} + +void +dhd_prot_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf) +{ + bcm_bprintf(strbuf, "Protocol CDC: reqid %d\n", dhdp->prot->reqid); +} + + +void +dhd_prot_hdrpush(dhd_pub_t *dhd, int ifidx, void *pktbuf) +{ +#ifdef BDC + struct bdc_header *h; +#endif /* BDC */ + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + +#ifdef BDC + /* Push BDC header used to convey priority for buses that don't */ + + + PKTPUSH(dhd->osh, pktbuf, BDC_HEADER_LEN); + + h = (struct bdc_header *)PKTDATA(dhd->osh, pktbuf); + + h->flags = (BDC_PROTO_VER << BDC_FLAG_VER_SHIFT); + if (PKTSUMNEEDED(pktbuf)) + h->flags |= BDC_FLAG_SUM_NEEDED; + + + h->priority = (PKTPRIO(pktbuf) & BDC_PRIORITY_MASK); + h->flags2 = 0; + h->rssi = 0; +#endif /* BDC */ + BDC_SET_IF_IDX(h, ifidx); +} + + +bool +dhd_proto_fcinfo(dhd_pub_t *dhd, void *pktbuf, uint8 *fcbits) +{ +#ifdef BDC + struct bdc_header *h; + + if (PKTLEN(dhd->osh, pktbuf) < BDC_HEADER_LEN) { + DHD_ERROR(("%s: rx data too short (%d < %d)\n", + __FUNCTION__, PKTLEN(dhd->osh, pktbuf), BDC_HEADER_LEN)); + return BCME_ERROR; + } + + h = (struct bdc_header *)PKTDATA(dhd->osh, pktbuf); + + *fcbits = h->priority >> BDC_PRIORITY_FC_SHIFT; + if ((h->flags2 & BDC_FLAG2_FC_FLAG) == BDC_FLAG2_FC_FLAG) + return TRUE; +#endif + return FALSE; +} + + +int +dhd_prot_hdrpull(dhd_pub_t *dhd, int *ifidx, void *pktbuf) +{ +#ifdef BDC + struct bdc_header *h; +#endif + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + +#ifdef BDC + /* Pop BDC header used to convey priority for buses that don't */ + + if (PKTLEN(dhd->osh, pktbuf) < BDC_HEADER_LEN) { + DHD_ERROR(("%s: rx data too short (%d < %d)\n", __FUNCTION__, + PKTLEN(dhd->osh, pktbuf), BDC_HEADER_LEN)); + return BCME_ERROR; + } + + h = (struct bdc_header *)PKTDATA(dhd->osh, pktbuf); + + if ((*ifidx = BDC_GET_IF_IDX(h)) >= DHD_MAX_IFS) { + DHD_ERROR(("%s: rx data ifnum out of range (%d)\n", + __FUNCTION__, *ifidx)); + return BCME_ERROR; + } + + if (((h->flags & BDC_FLAG_VER_MASK) >> BDC_FLAG_VER_SHIFT) != BDC_PROTO_VER) { + DHD_ERROR(("%s: non-BDC packet received, flags 0x%x\n", + dhd_ifname(dhd, *ifidx), h->flags)); + return BCME_ERROR; + } + + if (h->flags & BDC_FLAG_SUM_GOOD) { + DHD_INFO(("%s: BDC packet received with good rx-csum, flags 0x%x\n", + dhd_ifname(dhd, *ifidx), h->flags)); + PKTSETSUMGOOD(pktbuf, TRUE); + } + + PKTSETPRIO(pktbuf, (h->priority & BDC_PRIORITY_MASK)); + + PKTPULL(dhd->osh, pktbuf, BDC_HEADER_LEN); +#endif /* BDC */ + + return 0; +} + +int +dhd_prot_attach(dhd_pub_t *dhd) +{ + dhd_prot_t *cdc; + +#ifndef DHD_USE_STATIC_BUF + if (!(cdc = (dhd_prot_t *)MALLOC(dhd->osh, sizeof(dhd_prot_t)))) { + DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__)); + goto fail; + } +#else + if (!(cdc = (dhd_prot_t *)dhd_os_prealloc(DHD_PREALLOC_PROT, sizeof(dhd_prot_t)))) { + DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__)); + goto fail; + } +#endif /* DHD_USE_STATIC_BUF */ + memset(cdc, 0, sizeof(dhd_prot_t)); + + /* ensure that the msg buf directly follows the cdc msg struct */ + if ((uintptr)(&cdc->msg + 1) != (uintptr)cdc->buf) { + DHD_ERROR(("dhd_prot_t is not correctly defined\n")); + goto fail; + } + + dhd->prot = cdc; +#ifdef BDC + dhd->hdrlen += BDC_HEADER_LEN; +#endif + dhd->maxctl = WLC_IOCTL_MAXLEN + sizeof(cdc_ioctl_t) + ROUND_UP_MARGIN; + return 0; + +fail: +#ifndef DHD_USE_STATIC_BUF + if (cdc != NULL) + MFREE(dhd->osh, cdc, sizeof(dhd_prot_t)); +#endif + return BCME_NOMEM; +} + +/* ~NOTE~ What if another thread is waiting on the semaphore? Holding it? */ +void +dhd_prot_detach(dhd_pub_t *dhd) +{ +#ifndef DHD_USE_STATIC_BUF + MFREE(dhd->osh, dhd->prot, sizeof(dhd_prot_t)); +#endif + dhd->prot = NULL; +} + +void +dhd_prot_dstats(dhd_pub_t *dhd) +{ + /* No stats from dongle added yet, copy bus stats */ + dhd->dstats.tx_packets = dhd->tx_packets; + dhd->dstats.tx_errors = dhd->tx_errors; + dhd->dstats.rx_packets = dhd->rx_packets; + dhd->dstats.rx_errors = dhd->rx_errors; + dhd->dstats.rx_dropped = dhd->rx_dropped; + dhd->dstats.multicast = dhd->rx_multicast; + return; +} + +int +dhd_prot_init(dhd_pub_t *dhd) +{ + int ret = 0; + char buf[128]; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + dhd_os_proto_block(dhd); + + /* Get the device MAC address */ + strcpy(buf, "cur_etheraddr"); + ret = dhdcdc_query_ioctl(dhd, 0, WLC_GET_VAR, buf, sizeof(buf)); + if (ret < 0) { + dhd_os_proto_unblock(dhd); + return ret; + } + memcpy(dhd->mac.octet, buf, ETHER_ADDR_LEN); + + dhd_os_proto_unblock(dhd); + +#ifdef EMBEDDED_PLATFORM + ret = dhd_preinit_ioctls(dhd); +#endif /* EMBEDDED_PLATFORM */ + + /* Always assumes wl for now */ + dhd->iswl = TRUE; + + return ret; +} + +void +dhd_prot_stop(dhd_pub_t *dhd) +{ + /* Nothing to do for CDC */ +} diff --git a/drivers/net/wireless/bcm4329/dhd_common.c b/drivers/net/wireless/bcm4329/dhd_common.c new file mode 100644 index 000000000000..bea33b6d29af --- /dev/null +++ b/drivers/net/wireless/bcm4329/dhd_common.c @@ -0,0 +1,2183 @@ +/* + * Broadcom Dongle Host Driver (DHD), common DHD core. + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: dhd_common.c,v 1.5.6.8.2.6.6.69.4.3 2010/09/10 21:30:16 Exp $ + */ +#include <typedefs.h> +#include <osl.h> + +#include <epivers.h> +#include <bcmutils.h> + +#include <bcmendian.h> +#include <dngl_stats.h> +#include <dhd.h> +#include <dhd_bus.h> +#include <dhd_proto.h> +#include <dhd_dbg.h> +#include <msgtrace.h> + +#include <wlioctl.h> + +#ifdef SET_RANDOM_MAC_SOFTAP +#include <linux/random.h> +#include <linux/jiffies.h> +#endif + +#ifdef GET_CUSTOM_MAC_ENABLE +int wifi_get_mac_addr(unsigned char *buf); +#endif /* GET_CUSTOM_MAC_ENABLE */ + +int dhd_msg_level; + +#include <wl_iw.h> + +char fw_path[MOD_PARAM_PATHLEN]; +char nv_path[MOD_PARAM_PATHLEN]; + +/* Last connection success/failure status */ +uint32 dhd_conn_event; +uint32 dhd_conn_status; +uint32 dhd_conn_reason; + +#define htod32(i) i +#define htod16(i) i +#define dtoh32(i) i +#define dtoh16(i) i + +extern int dhdcdc_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len); +extern void dhd_ind_scan_confirm(void *h, bool status); +extern int dhd_wl_ioctl(dhd_pub_t *dhd, uint cmd, char *buf, uint buflen); +void dhd_iscan_lock(void); +void dhd_iscan_unlock(void); + +/* Packet alignment for most efficient SDIO (can change based on platform) */ +#ifndef DHD_SDALIGN +#define DHD_SDALIGN 32 +#endif +#if !ISPOWEROF2(DHD_SDALIGN) +#error DHD_SDALIGN is not a power of 2! +#endif + +#ifdef DHD_DEBUG +const char dhd_version[] = "Dongle Host Driver, version " EPI_VERSION_STR "\nCompiled on " + __DATE__ " at " __TIME__; +#else +const char dhd_version[] = "Dongle Host Driver, version " EPI_VERSION_STR; +#endif + +void dhd_set_timer(void *bus, uint wdtick); + +/* IOVar table */ +enum { + IOV_VERSION = 1, + IOV_MSGLEVEL, + IOV_BCMERRORSTR, + IOV_BCMERROR, + IOV_WDTICK, + IOV_DUMP, +#ifdef DHD_DEBUG + IOV_CONS, + IOV_DCONSOLE_POLL, +#endif + IOV_CLEARCOUNTS, + IOV_LOGDUMP, + IOV_LOGCAL, + IOV_LOGSTAMP, + IOV_GPIOOB, + IOV_IOCTLTIMEOUT, + IOV_LAST +}; + +const bcm_iovar_t dhd_iovars[] = { + {"version", IOV_VERSION, 0, IOVT_BUFFER, sizeof(dhd_version) }, +#ifdef DHD_DEBUG + {"msglevel", IOV_MSGLEVEL, 0, IOVT_UINT32, 0 }, +#endif /* DHD_DEBUG */ + {"bcmerrorstr", IOV_BCMERRORSTR, 0, IOVT_BUFFER, BCME_STRLEN }, + {"bcmerror", IOV_BCMERROR, 0, IOVT_INT8, 0 }, + {"wdtick", IOV_WDTICK, 0, IOVT_UINT32, 0 }, + {"dump", IOV_DUMP, 0, IOVT_BUFFER, DHD_IOCTL_MAXLEN }, +#ifdef DHD_DEBUG + {"dconpoll", IOV_DCONSOLE_POLL, 0, IOVT_UINT32, 0 }, + {"cons", IOV_CONS, 0, IOVT_BUFFER, 0 }, +#endif + {"clearcounts", IOV_CLEARCOUNTS, 0, IOVT_VOID, 0 }, + {"gpioob", IOV_GPIOOB, 0, IOVT_UINT32, 0 }, + {"ioctl_timeout", IOV_IOCTLTIMEOUT, 0, IOVT_UINT32, 0 }, + {NULL, 0, 0, 0, 0 } +}; + +void +dhd_common_init(void) +{ + /* Init global variables at run-time, not as part of the declaration. + * This is required to support init/de-init of the driver. Initialization + * of globals as part of the declaration results in non-deterministic + * behaviour since the value of the globals may be different on the + * first time that the driver is initialized vs subsequent initializations. + */ + dhd_msg_level = DHD_ERROR_VAL; +#ifdef CONFIG_BCM4329_FW_PATH + strncpy(fw_path, CONFIG_BCM4329_FW_PATH, MOD_PARAM_PATHLEN-1); +#else + fw_path[0] = '\0'; +#endif +#ifdef CONFIG_BCM4329_NVRAM_PATH + strncpy(nv_path, CONFIG_BCM4329_NVRAM_PATH, MOD_PARAM_PATHLEN-1); +#else + nv_path[0] = '\0'; +#endif +} + +static int +dhd_dump(dhd_pub_t *dhdp, char *buf, int buflen) +{ + char eabuf[ETHER_ADDR_STR_LEN]; + + struct bcmstrbuf b; + struct bcmstrbuf *strbuf = &b; + + bcm_binit(strbuf, buf, buflen); + + /* Base DHD info */ + bcm_bprintf(strbuf, "%s\n", dhd_version); + bcm_bprintf(strbuf, "\n"); + bcm_bprintf(strbuf, "pub.up %d pub.txoff %d pub.busstate %d\n", + dhdp->up, dhdp->txoff, dhdp->busstate); + bcm_bprintf(strbuf, "pub.hdrlen %d pub.maxctl %d pub.rxsz %d\n", + dhdp->hdrlen, dhdp->maxctl, dhdp->rxsz); + bcm_bprintf(strbuf, "pub.iswl %d pub.drv_version %ld pub.mac %s\n", + dhdp->iswl, dhdp->drv_version, bcm_ether_ntoa(&dhdp->mac, eabuf)); + bcm_bprintf(strbuf, "pub.bcmerror %d tickcnt %d\n", dhdp->bcmerror, dhdp->tickcnt); + + bcm_bprintf(strbuf, "dongle stats:\n"); + bcm_bprintf(strbuf, "tx_packets %ld tx_bytes %ld tx_errors %ld tx_dropped %ld\n", + dhdp->dstats.tx_packets, dhdp->dstats.tx_bytes, + dhdp->dstats.tx_errors, dhdp->dstats.tx_dropped); + bcm_bprintf(strbuf, "rx_packets %ld rx_bytes %ld rx_errors %ld rx_dropped %ld\n", + dhdp->dstats.rx_packets, dhdp->dstats.rx_bytes, + dhdp->dstats.rx_errors, dhdp->dstats.rx_dropped); + bcm_bprintf(strbuf, "multicast %ld\n", dhdp->dstats.multicast); + + bcm_bprintf(strbuf, "bus stats:\n"); + bcm_bprintf(strbuf, "tx_packets %ld tx_multicast %ld tx_errors %ld\n", + dhdp->tx_packets, dhdp->tx_multicast, dhdp->tx_errors); + bcm_bprintf(strbuf, "tx_ctlpkts %ld tx_ctlerrs %ld\n", + dhdp->tx_ctlpkts, dhdp->tx_ctlerrs); + bcm_bprintf(strbuf, "rx_packets %ld rx_multicast %ld rx_errors %ld \n", + dhdp->rx_packets, dhdp->rx_multicast, dhdp->rx_errors); + bcm_bprintf(strbuf, "rx_ctlpkts %ld rx_ctlerrs %ld rx_dropped %ld rx_flushed %ld\n", + dhdp->rx_ctlpkts, dhdp->rx_ctlerrs, dhdp->rx_dropped, dhdp->rx_flushed); + bcm_bprintf(strbuf, "rx_readahead_cnt %ld tx_realloc %ld fc_packets %ld\n", + dhdp->rx_readahead_cnt, dhdp->tx_realloc, dhdp->fc_packets); + bcm_bprintf(strbuf, "wd_dpc_sched %ld\n", dhdp->wd_dpc_sched); + bcm_bprintf(strbuf, "\n"); + + /* Add any prot info */ + dhd_prot_dump(dhdp, strbuf); + bcm_bprintf(strbuf, "\n"); + + /* Add any bus info */ + dhd_bus_dump(dhdp, strbuf); + + return (!strbuf->size ? BCME_BUFTOOSHORT : 0); +} + +static int +dhd_doiovar(dhd_pub_t *dhd_pub, const bcm_iovar_t *vi, uint32 actionid, const char *name, + void *params, int plen, void *arg, int len, int val_size) +{ + int bcmerror = 0; + int32 int_val = 0; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0) + goto exit; + + if (plen >= (int)sizeof(int_val)) + bcopy(params, &int_val, sizeof(int_val)); + + switch (actionid) { + case IOV_GVAL(IOV_VERSION): + /* Need to have checked buffer length */ + strncpy((char*)arg, dhd_version, len); + break; + + case IOV_GVAL(IOV_MSGLEVEL): + int_val = (int32)dhd_msg_level; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_MSGLEVEL): + dhd_msg_level = int_val; + break; + + case IOV_GVAL(IOV_BCMERRORSTR): + strncpy((char *)arg, bcmerrorstr(dhd_pub->bcmerror), BCME_STRLEN); + ((char *)arg)[BCME_STRLEN - 1] = 0x00; + break; + + case IOV_GVAL(IOV_BCMERROR): + int_val = (int32)dhd_pub->bcmerror; + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_WDTICK): + int_val = (int32)dhd_watchdog_ms; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_WDTICK): + if (!dhd_pub->up) { + bcmerror = BCME_NOTUP; + break; + } + dhd_os_wd_timer(dhd_pub, (uint)int_val); + break; + + case IOV_GVAL(IOV_DUMP): + bcmerror = dhd_dump(dhd_pub, arg, len); + break; + +#ifdef DHD_DEBUG + case IOV_GVAL(IOV_DCONSOLE_POLL): + int_val = (int32)dhd_console_ms; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_DCONSOLE_POLL): + dhd_console_ms = (uint)int_val; + break; + + case IOV_SVAL(IOV_CONS): + if (len > 0) + bcmerror = dhd_bus_console_in(dhd_pub, arg, len - 1); + break; +#endif + + case IOV_SVAL(IOV_CLEARCOUNTS): + dhd_pub->tx_packets = dhd_pub->rx_packets = 0; + dhd_pub->tx_errors = dhd_pub->rx_errors = 0; + dhd_pub->tx_ctlpkts = dhd_pub->rx_ctlpkts = 0; + dhd_pub->tx_ctlerrs = dhd_pub->rx_ctlerrs = 0; + dhd_pub->rx_dropped = 0; + dhd_pub->rx_readahead_cnt = 0; + dhd_pub->tx_realloc = 0; + dhd_pub->wd_dpc_sched = 0; + memset(&dhd_pub->dstats, 0, sizeof(dhd_pub->dstats)); + dhd_bus_clearcounts(dhd_pub); + break; + + + case IOV_GVAL(IOV_IOCTLTIMEOUT): { + int_val = (int32)dhd_os_get_ioctl_resp_timeout(); + bcopy(&int_val, arg, sizeof(int_val)); + break; + } + + case IOV_SVAL(IOV_IOCTLTIMEOUT): { + if (int_val <= 0) + bcmerror = BCME_BADARG; + else + dhd_os_set_ioctl_resp_timeout((unsigned int)int_val); + break; + } + + + default: + bcmerror = BCME_UNSUPPORTED; + break; + } + +exit: + return bcmerror; +} + +/* Store the status of a connection attempt for later retrieval by an iovar */ +void +dhd_store_conn_status(uint32 event, uint32 status, uint32 reason) +{ + /* Do not overwrite a WLC_E_PRUNE with a WLC_E_SET_SSID + * because an encryption/rsn mismatch results in both events, and + * the important information is in the WLC_E_PRUNE. + */ + if (!(event == WLC_E_SET_SSID && status == WLC_E_STATUS_FAIL && + dhd_conn_event == WLC_E_PRUNE)) { + dhd_conn_event = event; + dhd_conn_status = status; + dhd_conn_reason = reason; + } +} + +bool +dhd_prec_enq(dhd_pub_t *dhdp, struct pktq *q, void *pkt, int prec) +{ + void *p; + int eprec = -1; /* precedence to evict from */ + bool discard_oldest; + + /* Fast case, precedence queue is not full and we are also not + * exceeding total queue length + */ + if (!pktq_pfull(q, prec) && !pktq_full(q)) { + pktq_penq(q, prec, pkt); + return TRUE; + } + + /* Determine precedence from which to evict packet, if any */ + if (pktq_pfull(q, prec)) + eprec = prec; + else if (pktq_full(q)) { + p = pktq_peek_tail(q, &eprec); + ASSERT(p); + if (eprec > prec) + return FALSE; + } + + /* Evict if needed */ + if (eprec >= 0) { + /* Detect queueing to unconfigured precedence */ + ASSERT(!pktq_pempty(q, eprec)); + discard_oldest = AC_BITMAP_TST(dhdp->wme_dp, eprec); + if (eprec == prec && !discard_oldest) + return FALSE; /* refuse newer (incoming) packet */ + /* Evict packet according to discard policy */ + p = discard_oldest ? pktq_pdeq(q, eprec) : pktq_pdeq_tail(q, eprec); + if (p == NULL) { + DHD_ERROR(("%s: pktq_penq() failed, oldest %d.", + __FUNCTION__, discard_oldest)); + ASSERT(p); + } + + PKTFREE(dhdp->osh, p, TRUE); + } + + /* Enqueue */ + p = pktq_penq(q, prec, pkt); + if (p == NULL) { + DHD_ERROR(("%s: pktq_penq() failed.", __FUNCTION__)); + ASSERT(p); + } + + return TRUE; +} + +static int +dhd_iovar_op(dhd_pub_t *dhd_pub, const char *name, + void *params, int plen, void *arg, int len, bool set) +{ + int bcmerror = 0; + int val_size; + const bcm_iovar_t *vi = NULL; + uint32 actionid; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + ASSERT(name); + ASSERT(len >= 0); + + /* Get MUST have return space */ + ASSERT(set || (arg && len)); + + /* Set does NOT take qualifiers */ + ASSERT(!set || (!params && !plen)); + + if ((vi = bcm_iovar_lookup(dhd_iovars, name)) == NULL) { + bcmerror = BCME_UNSUPPORTED; + goto exit; + } + + DHD_CTL(("%s: %s %s, len %d plen %d\n", __FUNCTION__, + name, (set ? "set" : "get"), len, plen)); + + /* set up 'params' pointer in case this is a set command so that + * the convenience int and bool code can be common to set and get + */ + if (params == NULL) { + params = arg; + plen = len; + } + + if (vi->type == IOVT_VOID) + val_size = 0; + else if (vi->type == IOVT_BUFFER) + val_size = len; + else + /* all other types are integer sized */ + val_size = sizeof(int); + + actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid); + bcmerror = dhd_doiovar(dhd_pub, vi, actionid, name, params, plen, arg, len, val_size); + +exit: + return bcmerror; +} + +int +dhd_ioctl(dhd_pub_t *dhd_pub, dhd_ioctl_t *ioc, void *buf, uint buflen) +{ + int bcmerror = 0; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (!buf) return BCME_BADARG; + + switch (ioc->cmd) { + case DHD_GET_MAGIC: + if (buflen < sizeof(int)) + bcmerror = BCME_BUFTOOSHORT; + else + *(int*)buf = DHD_IOCTL_MAGIC; + break; + + case DHD_GET_VERSION: + if (buflen < sizeof(int)) + bcmerror = -BCME_BUFTOOSHORT; + else + *(int*)buf = DHD_IOCTL_VERSION; + break; + + case DHD_GET_VAR: + case DHD_SET_VAR: { + char *arg; + uint arglen; + + /* scan past the name to any arguments */ + for (arg = buf, arglen = buflen; *arg && arglen; arg++, arglen--); + + if (*arg) { + bcmerror = BCME_BUFTOOSHORT; + break; + } + + /* account for the NUL terminator */ + arg++, arglen--; + + /* call with the appropriate arguments */ + if (ioc->cmd == DHD_GET_VAR) + bcmerror = dhd_iovar_op(dhd_pub, buf, arg, arglen, + buf, buflen, IOV_GET); + else + bcmerror = dhd_iovar_op(dhd_pub, buf, NULL, 0, arg, arglen, IOV_SET); + if (bcmerror != BCME_UNSUPPORTED) + break; + + /* not in generic table, try protocol module */ + if (ioc->cmd == DHD_GET_VAR) + bcmerror = dhd_prot_iovar_op(dhd_pub, buf, arg, + arglen, buf, buflen, IOV_GET); + else + bcmerror = dhd_prot_iovar_op(dhd_pub, buf, + NULL, 0, arg, arglen, IOV_SET); + if (bcmerror != BCME_UNSUPPORTED) + break; + + /* if still not found, try bus module */ + if (ioc->cmd == DHD_GET_VAR) + bcmerror = dhd_bus_iovar_op(dhd_pub, buf, + arg, arglen, buf, buflen, IOV_GET); + else + bcmerror = dhd_bus_iovar_op(dhd_pub, buf, + NULL, 0, arg, arglen, IOV_SET); + + break; + } + + default: + bcmerror = BCME_UNSUPPORTED; + } + + return bcmerror; +} + + +#ifdef SHOW_EVENTS +static void +wl_show_host_event(wl_event_msg_t *event, void *event_data) +{ + uint i, status, reason; + bool group = FALSE, flush_txq = FALSE, link = FALSE; + char *auth_str, *event_name; + uchar *buf; + char err_msg[256], eabuf[ETHER_ADDR_STR_LEN]; + static struct {uint event; char *event_name;} event_names[] = { + {WLC_E_SET_SSID, "SET_SSID"}, + {WLC_E_JOIN, "JOIN"}, + {WLC_E_START, "START"}, + {WLC_E_AUTH, "AUTH"}, + {WLC_E_AUTH_IND, "AUTH_IND"}, + {WLC_E_DEAUTH, "DEAUTH"}, + {WLC_E_DEAUTH_IND, "DEAUTH_IND"}, + {WLC_E_ASSOC, "ASSOC"}, + {WLC_E_ASSOC_IND, "ASSOC_IND"}, + {WLC_E_REASSOC, "REASSOC"}, + {WLC_E_REASSOC_IND, "REASSOC_IND"}, + {WLC_E_DISASSOC, "DISASSOC"}, + {WLC_E_DISASSOC_IND, "DISASSOC_IND"}, + {WLC_E_QUIET_START, "START_QUIET"}, + {WLC_E_QUIET_END, "END_QUIET"}, + {WLC_E_BEACON_RX, "BEACON_RX"}, + {WLC_E_LINK, "LINK"}, + {WLC_E_MIC_ERROR, "MIC_ERROR"}, + {WLC_E_NDIS_LINK, "NDIS_LINK"}, + {WLC_E_ROAM, "ROAM"}, + {WLC_E_TXFAIL, "TXFAIL"}, + {WLC_E_PMKID_CACHE, "PMKID_CACHE"}, + {WLC_E_RETROGRADE_TSF, "RETROGRADE_TSF"}, + {WLC_E_PRUNE, "PRUNE"}, + {WLC_E_AUTOAUTH, "AUTOAUTH"}, + {WLC_E_EAPOL_MSG, "EAPOL_MSG"}, + {WLC_E_SCAN_COMPLETE, "SCAN_COMPLETE"}, + {WLC_E_ADDTS_IND, "ADDTS_IND"}, + {WLC_E_DELTS_IND, "DELTS_IND"}, + {WLC_E_BCNSENT_IND, "BCNSENT_IND"}, + {WLC_E_BCNRX_MSG, "BCNRX_MSG"}, + {WLC_E_BCNLOST_MSG, "BCNLOST_MSG"}, + {WLC_E_ROAM_PREP, "ROAM_PREP"}, + {WLC_E_PFN_NET_FOUND, "PNO_NET_FOUND"}, + {WLC_E_PFN_NET_LOST, "PNO_NET_LOST"}, + {WLC_E_RESET_COMPLETE, "RESET_COMPLETE"}, + {WLC_E_JOIN_START, "JOIN_START"}, + {WLC_E_ROAM_START, "ROAM_START"}, + {WLC_E_ASSOC_START, "ASSOC_START"}, + {WLC_E_IBSS_ASSOC, "IBSS_ASSOC"}, + {WLC_E_RADIO, "RADIO"}, + {WLC_E_PSM_WATCHDOG, "PSM_WATCHDOG"}, + {WLC_E_PROBREQ_MSG, "PROBREQ_MSG"}, + {WLC_E_SCAN_CONFIRM_IND, "SCAN_CONFIRM_IND"}, + {WLC_E_PSK_SUP, "PSK_SUP"}, + {WLC_E_COUNTRY_CODE_CHANGED, "COUNTRY_CODE_CHANGED"}, + {WLC_E_EXCEEDED_MEDIUM_TIME, "EXCEEDED_MEDIUM_TIME"}, + {WLC_E_ICV_ERROR, "ICV_ERROR"}, + {WLC_E_UNICAST_DECODE_ERROR, "UNICAST_DECODE_ERROR"}, + {WLC_E_MULTICAST_DECODE_ERROR, "MULTICAST_DECODE_ERROR"}, + {WLC_E_TRACE, "TRACE"}, + {WLC_E_ACTION_FRAME, "ACTION FRAME"}, + {WLC_E_ACTION_FRAME_COMPLETE, "ACTION FRAME TX COMPLETE"}, + {WLC_E_IF, "IF"}, + {WLC_E_RSSI, "RSSI"}, + {WLC_E_PFN_SCAN_COMPLETE, "SCAN_COMPLETE"} + }; + uint event_type, flags, auth_type, datalen; + event_type = ntoh32(event->event_type); + flags = ntoh16(event->flags); + status = ntoh32(event->status); + reason = ntoh32(event->reason); + auth_type = ntoh32(event->auth_type); + datalen = ntoh32(event->datalen); + /* debug dump of event messages */ + sprintf(eabuf, "%02x:%02x:%02x:%02x:%02x:%02x", + (uchar)event->addr.octet[0]&0xff, + (uchar)event->addr.octet[1]&0xff, + (uchar)event->addr.octet[2]&0xff, + (uchar)event->addr.octet[3]&0xff, + (uchar)event->addr.octet[4]&0xff, + (uchar)event->addr.octet[5]&0xff); + + event_name = "UNKNOWN"; + for (i = 0; i < ARRAYSIZE(event_names); i++) { + if (event_names[i].event == event_type) + event_name = event_names[i].event_name; + } + + DHD_EVENT(("EVENT: %s, event ID = %d\n", event_name, event_type)); + + if (flags & WLC_EVENT_MSG_LINK) + link = TRUE; + if (flags & WLC_EVENT_MSG_GROUP) + group = TRUE; + if (flags & WLC_EVENT_MSG_FLUSHTXQ) + flush_txq = TRUE; + + switch (event_type) { + case WLC_E_START: + case WLC_E_DEAUTH: + case WLC_E_DISASSOC: + DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf)); + break; + + case WLC_E_ASSOC_IND: + case WLC_E_REASSOC_IND: + DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf)); + break; + + case WLC_E_ASSOC: + case WLC_E_REASSOC: + if (status == WLC_E_STATUS_SUCCESS) { + DHD_EVENT(("MACEVENT: %s, MAC %s, SUCCESS\n", event_name, eabuf)); + } else if (status == WLC_E_STATUS_TIMEOUT) { + DHD_EVENT(("MACEVENT: %s, MAC %s, TIMEOUT\n", event_name, eabuf)); + } else if (status == WLC_E_STATUS_FAIL) { + DHD_EVENT(("MACEVENT: %s, MAC %s, FAILURE, reason %d\n", + event_name, eabuf, (int)reason)); + } else { + DHD_EVENT(("MACEVENT: %s, MAC %s, unexpected status %d\n", + event_name, eabuf, (int)status)); + } + break; + + case WLC_E_DEAUTH_IND: + case WLC_E_DISASSOC_IND: + DHD_EVENT(("MACEVENT: %s, MAC %s, reason %d\n", event_name, eabuf, (int)reason)); + break; + + case WLC_E_AUTH: + case WLC_E_AUTH_IND: + if (auth_type == DOT11_OPEN_SYSTEM) + auth_str = "Open System"; + else if (auth_type == DOT11_SHARED_KEY) + auth_str = "Shared Key"; + else { + sprintf(err_msg, "AUTH unknown: %d", (int)auth_type); + auth_str = err_msg; + } + if (event_type == WLC_E_AUTH_IND) { + DHD_EVENT(("MACEVENT: %s, MAC %s, %s\n", event_name, eabuf, auth_str)); + } else if (status == WLC_E_STATUS_SUCCESS) { + DHD_EVENT(("MACEVENT: %s, MAC %s, %s, SUCCESS\n", + event_name, eabuf, auth_str)); + } else if (status == WLC_E_STATUS_TIMEOUT) { + DHD_EVENT(("MACEVENT: %s, MAC %s, %s, TIMEOUT\n", + event_name, eabuf, auth_str)); + } else if (status == WLC_E_STATUS_FAIL) { + DHD_EVENT(("MACEVENT: %s, MAC %s, %s, FAILURE, reason %d\n", + event_name, eabuf, auth_str, (int)reason)); + } + + break; + + case WLC_E_JOIN: + case WLC_E_ROAM: + case WLC_E_SET_SSID: + if (status == WLC_E_STATUS_SUCCESS) { + DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf)); + } else if (status == WLC_E_STATUS_FAIL) { + DHD_EVENT(("MACEVENT: %s, failed\n", event_name)); + } else if (status == WLC_E_STATUS_NO_NETWORKS) { + DHD_EVENT(("MACEVENT: %s, no networks found\n", event_name)); + } else { + DHD_EVENT(("MACEVENT: %s, unexpected status %d\n", + event_name, (int)status)); + } + break; + + case WLC_E_BEACON_RX: + if (status == WLC_E_STATUS_SUCCESS) { + DHD_EVENT(("MACEVENT: %s, SUCCESS\n", event_name)); + } else if (status == WLC_E_STATUS_FAIL) { + DHD_EVENT(("MACEVENT: %s, FAIL\n", event_name)); + } else { + DHD_EVENT(("MACEVENT: %s, status %d\n", event_name, status)); + } + break; + + case WLC_E_LINK: + DHD_EVENT(("MACEVENT: %s %s\n", event_name, link?"UP":"DOWN")); + break; + + case WLC_E_MIC_ERROR: + DHD_EVENT(("MACEVENT: %s, MAC %s, Group %d, Flush %d\n", + event_name, eabuf, group, flush_txq)); + break; + + case WLC_E_ICV_ERROR: + case WLC_E_UNICAST_DECODE_ERROR: + case WLC_E_MULTICAST_DECODE_ERROR: + DHD_EVENT(("MACEVENT: %s, MAC %s\n", + event_name, eabuf)); + break; + + case WLC_E_TXFAIL: + DHD_EVENT(("MACEVENT: %s, RA %s\n", event_name, eabuf)); + break; + + case WLC_E_SCAN_COMPLETE: + case WLC_E_PMKID_CACHE: + DHD_EVENT(("MACEVENT: %s\n", event_name)); + break; + + case WLC_E_PFN_NET_FOUND: + case WLC_E_PFN_NET_LOST: + case WLC_E_PFN_SCAN_COMPLETE: + DHD_EVENT(("PNOEVENT: %s\n", event_name)); + break; + + case WLC_E_PSK_SUP: + case WLC_E_PRUNE: + DHD_EVENT(("MACEVENT: %s, status %d, reason %d\n", + event_name, (int)status, (int)reason)); + break; + + case WLC_E_TRACE: + { + static uint32 seqnum_prev = 0; + msgtrace_hdr_t hdr; + uint32 nblost; + char *s, *p; + + buf = (uchar *) event_data; + memcpy(&hdr, buf, MSGTRACE_HDRLEN); + + if (hdr.version != MSGTRACE_VERSION) { + printf("\nMACEVENT: %s [unsupported version --> " + "dhd version:%d dongle version:%d]\n", + event_name, MSGTRACE_VERSION, hdr.version); + /* Reset datalen to avoid display below */ + datalen = 0; + break; + } + + /* There are 2 bytes available at the end of data */ + buf[MSGTRACE_HDRLEN + ntoh16(hdr.len)] = '\0'; + + if (ntoh32(hdr.discarded_bytes) || ntoh32(hdr.discarded_printf)) { + printf("\nWLC_E_TRACE: [Discarded traces in dongle -->" + "discarded_bytes %d discarded_printf %d]\n", + ntoh32(hdr.discarded_bytes), ntoh32(hdr.discarded_printf)); + } + + nblost = ntoh32(hdr.seqnum) - seqnum_prev - 1; + if (nblost > 0) { + printf("\nWLC_E_TRACE: [Event lost --> seqnum %d nblost %d\n", + ntoh32(hdr.seqnum), nblost); + } + seqnum_prev = ntoh32(hdr.seqnum); + + /* Display the trace buffer. Advance from \n to \n to avoid display big + * printf (issue with Linux printk ) + */ + p = (char *)&buf[MSGTRACE_HDRLEN]; + while ((s = strstr(p, "\n")) != NULL) { + *s = '\0'; + printf("%s\n", p); + p = s + 1; + } + printf("%s\n", p); + + /* Reset datalen to avoid display below */ + datalen = 0; + } + break; + + + case WLC_E_RSSI: + DHD_EVENT(("MACEVENT: %s %d\n", event_name, ntoh32(*((int *)event_data)))); + break; + + default: + DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d\n", + event_name, event_type, eabuf, (int)status, (int)reason, + (int)auth_type)); + break; + } + + /* show any appended data */ + if (datalen) { + buf = (uchar *) event_data; + DHD_EVENT((" data (%d) : ", datalen)); + for (i = 0; i < datalen; i++) + DHD_EVENT((" 0x%02x ", *buf++)); + DHD_EVENT(("\n")); + } +} +#endif /* SHOW_EVENTS */ + +int +wl_host_event(struct dhd_info *dhd, int *ifidx, void *pktdata, + wl_event_msg_t *event, void **data_ptr) +{ + /* check whether packet is a BRCM event pkt */ + bcm_event_t *pvt_data = (bcm_event_t *)pktdata; + char *event_data; + uint32 type, status; + uint16 flags; + int evlen; + + if (bcmp(BRCM_OUI, &pvt_data->bcm_hdr.oui[0], DOT11_OUI_LEN)) { + DHD_ERROR(("%s: mismatched OUI, bailing\n", __FUNCTION__)); + return (BCME_ERROR); + } + + /* BRCM event pkt may be unaligned - use xxx_ua to load user_subtype. */ + if (ntoh16_ua((void *)&pvt_data->bcm_hdr.usr_subtype) != BCMILCP_BCM_SUBTYPE_EVENT) { + DHD_ERROR(("%s: mismatched subtype, bailing\n", __FUNCTION__)); + return (BCME_ERROR); + } + + *data_ptr = &pvt_data[1]; + event_data = *data_ptr; + + /* memcpy since BRCM event pkt may be unaligned. */ + memcpy(event, &pvt_data->event, sizeof(wl_event_msg_t)); + + type = ntoh32_ua((void *)&event->event_type); + flags = ntoh16_ua((void *)&event->flags); + status = ntoh32_ua((void *)&event->status); + evlen = ntoh32_ua((void *)&event->datalen) + sizeof(bcm_event_t); + + switch (type) { + case WLC_E_IF: + { + dhd_if_event_t *ifevent = (dhd_if_event_t *)event_data; + DHD_TRACE(("%s: if event\n", __FUNCTION__)); + + if (ifevent->ifidx > 0 && ifevent->ifidx < DHD_MAX_IFS) + { + if (ifevent->action == WLC_E_IF_ADD) + dhd_add_if(dhd, ifevent->ifidx, + NULL, event->ifname, + pvt_data->eth.ether_dhost, + ifevent->flags, ifevent->bssidx); + else + dhd_del_if(dhd, ifevent->ifidx); + } else { + DHD_ERROR(("%s: Invalid ifidx %d for %s\n", + __FUNCTION__, ifevent->ifidx, event->ifname)); + } + } + /* send up the if event: btamp user needs it */ + *ifidx = dhd_ifname2idx(dhd, event->ifname); + /* push up to external supp/auth */ + dhd_event(dhd, (char *)pvt_data, evlen, *ifidx); + break; + + +#ifdef P2P + case WLC_E_NDIS_LINK: + break; +#endif + /* fall through */ + /* These are what external supplicant/authenticator wants */ + case WLC_E_LINK: + case WLC_E_ASSOC_IND: + case WLC_E_REASSOC_IND: + case WLC_E_DISASSOC_IND: + case WLC_E_MIC_ERROR: + default: + /* Fall through: this should get _everything_ */ + + *ifidx = dhd_ifname2idx(dhd, event->ifname); + /* push up to external supp/auth */ + dhd_event(dhd, (char *)pvt_data, evlen, *ifidx); + DHD_TRACE(("%s: MAC event %d, flags %x, status %x\n", + __FUNCTION__, type, flags, status)); + + /* put it back to WLC_E_NDIS_LINK */ + if (type == WLC_E_NDIS_LINK) { + uint32 temp; + + temp = ntoh32_ua((void *)&event->event_type); + DHD_TRACE(("Converted to WLC_E_LINK type %d\n", temp)); + + temp = ntoh32(WLC_E_NDIS_LINK); + memcpy((void *)(&pvt_data->event.event_type), &temp, + sizeof(pvt_data->event.event_type)); + } + break; + } + +#ifdef SHOW_EVENTS + wl_show_host_event(event, event_data); +#endif /* SHOW_EVENTS */ + + return (BCME_OK); +} + + +void +wl_event_to_host_order(wl_event_msg_t *evt) +{ + /* Event struct members passed from dongle to host are stored in network + * byte order. Convert all members to host-order. + */ + evt->event_type = ntoh32(evt->event_type); + evt->flags = ntoh16(evt->flags); + evt->status = ntoh32(evt->status); + evt->reason = ntoh32(evt->reason); + evt->auth_type = ntoh32(evt->auth_type); + evt->datalen = ntoh32(evt->datalen); + evt->version = ntoh16(evt->version); +} + +void print_buf(void *pbuf, int len, int bytes_per_line) +{ + int i, j = 0; + unsigned char *buf = pbuf; + + if (bytes_per_line == 0) { + bytes_per_line = len; + } + + for (i = 0; i < len; i++) { + printf("%2.2x", *buf++); + j++; + if (j == bytes_per_line) { + printf("\n"); + j = 0; + } else { + printf(":"); + } + } + printf("\n"); +} + +#define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base)) + +#ifdef PKT_FILTER_SUPPORT +/* Convert user's input in hex pattern to byte-size mask */ +static int +wl_pattern_atoh(char *src, char *dst) +{ + int i; + if (strncmp(src, "0x", 2) != 0 && + strncmp(src, "0X", 2) != 0) { + DHD_ERROR(("Mask invalid format. Needs to start with 0x\n")); + return -1; + } + src = src + 2; /* Skip past 0x */ + if (strlen(src) % 2 != 0) { + DHD_ERROR(("Mask invalid format. Needs to be of even length\n")); + return -1; + } + for (i = 0; *src != '\0'; i++) { + char num[3]; + strncpy(num, src, 2); + num[2] = '\0'; + dst[i] = (uint8)strtoul(num, NULL, 16); + src += 2; + } + return i; +} + +void +dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode) +{ + char *argv[8]; + int i = 0; + const char *str; + int buf_len; + int str_len; + char *arg_save = 0, *arg_org = 0; + int rc; + char buf[128]; + wl_pkt_filter_enable_t enable_parm; + wl_pkt_filter_enable_t * pkt_filterp; + + if (!(arg_save = MALLOC(dhd->osh, strlen(arg) + 1))) { + DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__)); + goto fail; + } + arg_org = arg_save; + memcpy(arg_save, arg, strlen(arg) + 1); + + argv[i] = bcmstrtok(&arg_save, " ", 0); + + i = 0; + if (NULL == argv[i]) { + DHD_ERROR(("No args provided\n")); + goto fail; + } + + str = "pkt_filter_enable"; + str_len = strlen(str); + strncpy(buf, str, str_len); + buf[str_len] = '\0'; + buf_len = str_len + 1; + + pkt_filterp = (wl_pkt_filter_enable_t *)(buf + str_len + 1); + + /* Parse packet filter id. */ + enable_parm.id = htod32(strtoul(argv[i], NULL, 0)); + + /* Parse enable/disable value. */ + enable_parm.enable = htod32(enable); + + buf_len += sizeof(enable_parm); + memcpy((char *)pkt_filterp, + &enable_parm, + sizeof(enable_parm)); + + /* Enable/disable the specified filter. */ + rc = dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, buf, buf_len); + rc = rc >= 0 ? 0 : rc; + if (rc) + DHD_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n", + __FUNCTION__, arg, rc)); + else + DHD_TRACE(("%s: successfully added pktfilter %s\n", + __FUNCTION__, arg)); + + /* Contorl the master mode */ + bcm_mkiovar("pkt_filter_mode", (char *)&master_mode, 4, buf, sizeof(buf)); + rc = dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, buf, sizeof(buf)); + rc = rc >= 0 ? 0 : rc; + if (rc) + DHD_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n", + __FUNCTION__, arg, rc)); + +fail: + if (arg_org) + MFREE(dhd->osh, arg_org, strlen(arg) + 1); +} + +void +dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg) +{ + const char *str; + wl_pkt_filter_t pkt_filter; + wl_pkt_filter_t *pkt_filterp; + int buf_len; + int str_len; + int rc; + uint32 mask_size; + uint32 pattern_size; + char *argv[8], * buf = 0; + int i = 0; + char *arg_save = 0, *arg_org = 0; +#define BUF_SIZE 2048 + + if (!(arg_save = MALLOC(dhd->osh, strlen(arg) + 1))) { + DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__)); + goto fail; + } + + arg_org = arg_save; + + if (!(buf = MALLOC(dhd->osh, BUF_SIZE))) { + DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__)); + goto fail; + } + + memcpy(arg_save, arg, strlen(arg) + 1); + + if (strlen(arg) > BUF_SIZE) { + DHD_ERROR(("Not enough buffer %d < %d\n", (int)strlen(arg), (int)sizeof(buf))); + goto fail; + } + + argv[i] = bcmstrtok(&arg_save, " ", 0); + while (argv[i++]) + argv[i] = bcmstrtok(&arg_save, " ", 0); + + i = 0; + if (NULL == argv[i]) { + DHD_ERROR(("No args provided\n")); + goto fail; + } + + str = "pkt_filter_add"; + str_len = strlen(str); + strncpy(buf, str, str_len); + buf[ str_len ] = '\0'; + buf_len = str_len + 1; + + pkt_filterp = (wl_pkt_filter_t *) (buf + str_len + 1); + + /* Parse packet filter id. */ + pkt_filter.id = htod32(strtoul(argv[i], NULL, 0)); + + if (NULL == argv[++i]) { + DHD_ERROR(("Polarity not provided\n")); + goto fail; + } + + /* Parse filter polarity. */ + pkt_filter.negate_match = htod32(strtoul(argv[i], NULL, 0)); + + if (NULL == argv[++i]) { + DHD_ERROR(("Filter type not provided\n")); + goto fail; + } + + /* Parse filter type. */ + pkt_filter.type = htod32(strtoul(argv[i], NULL, 0)); + + if (NULL == argv[++i]) { + DHD_ERROR(("Offset not provided\n")); + goto fail; + } + + /* Parse pattern filter offset. */ + pkt_filter.u.pattern.offset = htod32(strtoul(argv[i], NULL, 0)); + + if (NULL == argv[++i]) { + DHD_ERROR(("Bitmask not provided\n")); + goto fail; + } + + /* Parse pattern filter mask. */ + mask_size = + htod32(wl_pattern_atoh(argv[i], (char *) pkt_filterp->u.pattern.mask_and_pattern)); + + if (NULL == argv[++i]) { + DHD_ERROR(("Pattern not provided\n")); + goto fail; + } + + /* Parse pattern filter pattern. */ + pattern_size = + htod32(wl_pattern_atoh(argv[i], + (char *) &pkt_filterp->u.pattern.mask_and_pattern[mask_size])); + + if (mask_size != pattern_size) { + DHD_ERROR(("Mask and pattern not the same size\n")); + goto fail; + } + + pkt_filter.u.pattern.size_bytes = mask_size; + buf_len += WL_PKT_FILTER_FIXED_LEN; + buf_len += (WL_PKT_FILTER_PATTERN_FIXED_LEN + 2 * mask_size); + + /* Keep-alive attributes are set in local variable (keep_alive_pkt), and + ** then memcpy'ed into buffer (keep_alive_pktp) since there is no + ** guarantee that the buffer is properly aligned. + */ + memcpy((char *)pkt_filterp, + &pkt_filter, + WL_PKT_FILTER_FIXED_LEN + WL_PKT_FILTER_PATTERN_FIXED_LEN); + + rc = dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, buf, buf_len); + rc = rc >= 0 ? 0 : rc; + + if (rc) + DHD_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n", + __FUNCTION__, arg, rc)); + else + DHD_TRACE(("%s: successfully added pktfilter %s\n", + __FUNCTION__, arg)); + +fail: + if (arg_org) + MFREE(dhd->osh, arg_org, strlen(arg) + 1); + + if (buf) + MFREE(dhd->osh, buf, BUF_SIZE); +} +#endif + +#ifdef ARP_OFFLOAD_SUPPORT +void +dhd_arp_offload_set(dhd_pub_t * dhd, int arp_mode) +{ + char iovbuf[32]; + int retcode; + + bcm_mkiovar("arp_ol", (char *)&arp_mode, 4, iovbuf, sizeof(iovbuf)); + retcode = dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, sizeof(iovbuf)); + retcode = retcode >= 0 ? 0 : retcode; + if (retcode) + DHD_TRACE(("%s: failed to set ARP offload mode to 0x%x, retcode = %d\n", + __FUNCTION__, arp_mode, retcode)); + else + DHD_TRACE(("%s: successfully set ARP offload mode to 0x%x\n", + __FUNCTION__, arp_mode)); +} + +void +dhd_arp_offload_enable(dhd_pub_t * dhd, int arp_enable) +{ + char iovbuf[32]; + int retcode; + + bcm_mkiovar("arpoe", (char *)&arp_enable, 4, iovbuf, sizeof(iovbuf)); + retcode = dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, sizeof(iovbuf)); + retcode = retcode >= 0 ? 0 : retcode; + if (retcode) + DHD_TRACE(("%s: failed to enabe ARP offload to %d, retcode = %d\n", + __FUNCTION__, arp_enable, retcode)); + else + DHD_TRACE(("%s: successfully enabed ARP offload to %d\n", + __FUNCTION__, arp_enable)); +} +#endif + +int +dhd_preinit_ioctls(dhd_pub_t *dhd) +{ + char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */ + uint up = 0; + char buf[128], *ptr; + uint power_mode = PM_FAST; + uint32 dongle_align = DHD_SDALIGN; + uint32 glom = 0; + uint bcn_timeout = 3; + int scan_assoc_time = 40; + int scan_unassoc_time = 40; +#ifdef GET_CUSTOM_MAC_ENABLE + int ret; + struct ether_addr ea_addr; +#endif /* GET_CUSTOM_MAC_ENABLE */ + + dhd_os_proto_block(dhd); + +#ifdef GET_CUSTOM_MAC_ENABLE + /* + ** Read MAC address from external customer place + ** NOTE that default mac address has to be present in otp or nvram file + ** to bring up firmware but unique per board mac address maybe provided + ** by customer code + */ + ret = dhd_custom_get_mac_address(ea_addr.octet); + if (!ret) { + bcm_mkiovar("cur_etheraddr", (void *)&ea_addr, ETHER_ADDR_LEN, buf, sizeof(buf)); + ret = dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, buf, sizeof(buf)); + if (ret < 0) { + DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret)); + } else + memcpy(dhd->mac.octet, (void *)&ea_addr, ETHER_ADDR_LEN); + } +#endif /* GET_CUSTOM_MAC_ENABLE */ + +#ifdef SET_RANDOM_MAC_SOFTAP + if (strstr(fw_path, "apsta") != NULL) { + uint rand_mac; + int ret; + + srandom32((uint)jiffies); + rand_mac = random32(); + iovbuf[0] = 0x02; /* locally administered bit */ + iovbuf[1] = 0x1A; + iovbuf[2] = 0x11; + iovbuf[3] = (unsigned char)(rand_mac & 0x0F) | 0xF0; + iovbuf[4] = (unsigned char)(rand_mac >> 8); + iovbuf[5] = (unsigned char)(rand_mac >> 16); + + printk("Broadcom Dongle Host Driver mac=%02x:%02x:%02x:%02x:%02x:%02x\n", + iovbuf[0], iovbuf[1], iovbuf[2], iovbuf[3], iovbuf[4], iovbuf[5]); + + bcm_mkiovar("cur_etheraddr", (void *)iovbuf, ETHER_ADDR_LEN, buf, sizeof(buf)); + ret = dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, buf, sizeof(buf)); + if (ret < 0) { + DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret)); + } else + memcpy(dhd->mac.octet, iovbuf, ETHER_ADDR_LEN); + } +#endif /* SET_RANDOM_MAC_SOFTAP */ + + /* Set Country code */ + if (dhd->country_code[0] != 0) { + if (dhdcdc_set_ioctl(dhd, 0, WLC_SET_COUNTRY, + dhd->country_code, sizeof(dhd->country_code)) < 0) { + DHD_ERROR(("%s: country code setting failed\n", __FUNCTION__)); + } + } + + /* query for 'ver' to get version info from firmware */ + memset(buf, 0, sizeof(buf)); + ptr = buf; + bcm_mkiovar("ver", 0, 0, buf, sizeof(buf)); + dhdcdc_query_ioctl(dhd, 0, WLC_GET_VAR, buf, sizeof(buf)); + bcmstrtok(&ptr, "\n", 0); + /* Print fw version info */ + DHD_ERROR(("Firmware version = %s\n", buf)); + + /* Set PowerSave mode */ + dhdcdc_set_ioctl(dhd, 0, WLC_SET_PM, (char *)&power_mode, sizeof(power_mode)); + + /* Match Host and Dongle rx alignment */ + bcm_mkiovar("bus:txglomalign", (char *)&dongle_align, 4, iovbuf, sizeof(iovbuf)); + dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, sizeof(iovbuf)); + + /* disable glom option per default */ + bcm_mkiovar("bus:txglom", (char *)&glom, 4, iovbuf, sizeof(iovbuf)); + dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, sizeof(iovbuf)); + + /* Setup timeout if Beacons are lost and roam is off to report link down */ + bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout, 4, iovbuf, sizeof(iovbuf)); + dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, sizeof(iovbuf)); + + /* Enable/Disable build-in roaming to allowed ext supplicant to take of romaing */ + bcm_mkiovar("roam_off", (char *)&dhd_roam, 4, iovbuf, sizeof(iovbuf)); + dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, sizeof(iovbuf)); + + if (dhd_roam == 0) + { + /* set internal roaming roaming parameters */ + int roam_scan_period = 30; /* in sec */ + int roam_fullscan_period = 120; /* in sec */ + int roam_trigger = -85; + int roam_delta = 15; + int band; + int band_temp_set = WLC_BAND_2G; + + if (dhdcdc_set_ioctl(dhd, 0, WLC_SET_ROAM_SCAN_PERIOD, \ + (char *)&roam_scan_period, sizeof(roam_scan_period)) < 0) + DHD_ERROR(("%s: roam scan setup failed\n", __FUNCTION__)); + + bcm_mkiovar("fullroamperiod", (char *)&roam_fullscan_period, \ + 4, iovbuf, sizeof(iovbuf)); + if (dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, \ + iovbuf, sizeof(iovbuf)) < 0) + DHD_ERROR(("%s: roam fullscan setup failed\n", __FUNCTION__)); + + if (dhdcdc_query_ioctl(dhd, 0, WLC_GET_BAND, \ + (char *)&band, sizeof(band)) < 0) + DHD_ERROR(("%s: roam delta setting failed\n", __FUNCTION__)); + else { + if ((band == WLC_BAND_AUTO) || (band == WLC_BAND_ALL)) + { + /* temp set band to insert new roams values */ + if (dhdcdc_set_ioctl(dhd, 0, WLC_SET_BAND, \ + (char *)&band_temp_set, sizeof(band_temp_set)) < 0) + DHD_ERROR(("%s: local band seting failed\n", __FUNCTION__)); + } + if (dhdcdc_set_ioctl(dhd, 0, WLC_SET_ROAM_DELTA, \ + (char *)&roam_delta, sizeof(roam_delta)) < 0) + DHD_ERROR(("%s: roam delta setting failed\n", __FUNCTION__)); + + if (dhdcdc_set_ioctl(dhd, 0, WLC_SET_ROAM_TRIGGER, \ + (char *)&roam_trigger, sizeof(roam_trigger)) < 0) + DHD_ERROR(("%s: roam trigger setting failed\n", __FUNCTION__)); + + /* Restore original band settinngs */ + if (dhdcdc_set_ioctl(dhd, 0, WLC_SET_BAND, \ + (char *)&band, sizeof(band)) < 0) + DHD_ERROR(("%s: Original band restore failed\n", __FUNCTION__)); + } + } + + /* Force STA UP */ + if (dhd_radio_up) + dhdcdc_set_ioctl(dhd, 0, WLC_UP, (char *)&up, sizeof(up)); + + /* Setup event_msgs */ + bcm_mkiovar("event_msgs", dhd->eventmask, WL_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf)); + dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, sizeof(iovbuf)); + + dhdcdc_set_ioctl(dhd, 0, WLC_SET_SCAN_CHANNEL_TIME, (char *)&scan_assoc_time, + sizeof(scan_assoc_time)); + dhdcdc_set_ioctl(dhd, 0, WLC_SET_SCAN_UNASSOC_TIME, (char *)&scan_unassoc_time, + sizeof(scan_unassoc_time)); + +#ifdef ARP_OFFLOAD_SUPPORT + /* Set and enable ARP offload feature */ + if (dhd_arp_enable) + dhd_arp_offload_set(dhd, dhd_arp_mode); + dhd_arp_offload_enable(dhd, dhd_arp_enable); +#endif /* ARP_OFFLOAD_SUPPORT */ + +#ifdef PKT_FILTER_SUPPORT + { + int i; + /* Set up pkt filter */ + if (dhd_pkt_filter_enable) { + for (i = 0; i < dhd->pktfilter_count; i++) { + dhd_pktfilter_offload_set(dhd, dhd->pktfilter[i]); + dhd_pktfilter_offload_enable(dhd, dhd->pktfilter[i], + dhd_pkt_filter_init, dhd_master_mode); + } + } + } +#endif /* PKT_FILTER_SUPPORT */ + + dhd_os_proto_unblock(dhd); + + return 0; +} + +#ifdef SIMPLE_ISCAN + +uint iscan_thread_id; +iscan_buf_t * iscan_chain = 0; + +iscan_buf_t * +dhd_iscan_allocate_buf(dhd_pub_t *dhd, iscan_buf_t **iscanbuf) +{ + iscan_buf_t *iscanbuf_alloc = 0; + iscan_buf_t *iscanbuf_head; + + dhd_iscan_lock(); + + iscanbuf_alloc = (iscan_buf_t*)MALLOC(dhd->osh, sizeof(iscan_buf_t)); + if (iscanbuf_alloc == NULL) + goto fail; + + iscanbuf_alloc->next = NULL; + iscanbuf_head = *iscanbuf; + + DHD_ISCAN(("%s: addr of allocated node = 0x%X" + "addr of iscanbuf_head = 0x%X dhd = 0x%X\n", + __FUNCTION__, iscanbuf_alloc, iscanbuf_head, dhd)); + + if (iscanbuf_head == NULL) { + *iscanbuf = iscanbuf_alloc; + DHD_ISCAN(("%s: Head is allocated\n", __FUNCTION__)); + goto fail; + } + + while (iscanbuf_head->next) + iscanbuf_head = iscanbuf_head->next; + + iscanbuf_head->next = iscanbuf_alloc; + +fail: + dhd_iscan_unlock(); + return iscanbuf_alloc; +} + +void +dhd_iscan_free_buf(void *dhdp, iscan_buf_t *iscan_delete) +{ + iscan_buf_t *iscanbuf_free = 0; + iscan_buf_t *iscanbuf_prv = 0; + iscan_buf_t *iscanbuf_cur = iscan_chain; + dhd_pub_t *dhd = dhd_bus_pub(dhdp); + + dhd_iscan_lock(); + /* If iscan_delete is null then delete the entire + * chain or else delete specific one provided + */ + if (!iscan_delete) { + while (iscanbuf_cur) { + iscanbuf_free = iscanbuf_cur; + iscanbuf_cur = iscanbuf_cur->next; + iscanbuf_free->next = 0; + MFREE(dhd->osh, iscanbuf_free, sizeof(iscan_buf_t)); + } + iscan_chain = 0; + } else { + while (iscanbuf_cur) { + if (iscanbuf_cur == iscan_delete) + break; + iscanbuf_prv = iscanbuf_cur; + iscanbuf_cur = iscanbuf_cur->next; + } + if (iscanbuf_prv) + iscanbuf_prv->next = iscan_delete->next; + + iscan_delete->next = 0; + MFREE(dhd->osh, iscan_delete, sizeof(iscan_buf_t)); + + if (!iscanbuf_prv) + iscan_chain = 0; + } + dhd_iscan_unlock(); +} + +iscan_buf_t * +dhd_iscan_result_buf(void) +{ + return iscan_chain; +} + + + +/* +* print scan cache +* print partial iscan_skip list differently +*/ +int +dhd_iscan_print_cache(iscan_buf_t *iscan_skip) +{ + int i = 0, l = 0; + iscan_buf_t *iscan_cur; + wl_iscan_results_t *list; + wl_scan_results_t *results; + wl_bss_info_t UNALIGNED *bi; + + dhd_iscan_lock(); + + iscan_cur = dhd_iscan_result_buf(); + + while (iscan_cur) { + list = (wl_iscan_results_t *)iscan_cur->iscan_buf; + if (!list) + break; + + results = (wl_scan_results_t *)&list->results; + if (!results) + break; + + if (results->version != WL_BSS_INFO_VERSION) { + DHD_ISCAN(("%s: results->version %d != WL_BSS_INFO_VERSION\n", + __FUNCTION__, results->version)); + goto done; + } + + bi = results->bss_info; + for (i = 0; i < results->count; i++) { + if (!bi) + break; + + DHD_ISCAN(("%s[%2.2d:%2.2d] %X:%X:%X:%X:%X:%X\n", + iscan_cur != iscan_skip?"BSS":"bss", l, i, + bi->BSSID.octet[0], bi->BSSID.octet[1], bi->BSSID.octet[2], + bi->BSSID.octet[3], bi->BSSID.octet[4], bi->BSSID.octet[5])); + + bi = (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)); + } + iscan_cur = iscan_cur->next; + l++; + } + +done: + dhd_iscan_unlock(); + return 0; +} + +/* +* delete disappeared AP from specific scan cache but skip partial list in iscan_skip +*/ +int +dhd_iscan_delete_bss(void *dhdp, void *addr, iscan_buf_t *iscan_skip) +{ + int i = 0, j = 0, l = 0; + iscan_buf_t *iscan_cur; + wl_iscan_results_t *list; + wl_scan_results_t *results; + wl_bss_info_t UNALIGNED *bi, *bi_new, *bi_next; + + uchar *s_addr = addr; + + dhd_iscan_lock(); + DHD_ISCAN(("%s: BSS to remove %X:%X:%X:%X:%X:%X\n", + __FUNCTION__, s_addr[0], s_addr[1], s_addr[2], + s_addr[3], s_addr[4], s_addr[5])); + + iscan_cur = dhd_iscan_result_buf(); + + while (iscan_cur) { + if (iscan_cur != iscan_skip) { + list = (wl_iscan_results_t *)iscan_cur->iscan_buf; + if (!list) + break; + + results = (wl_scan_results_t *)&list->results; + if (!results) + break; + + if (results->version != WL_BSS_INFO_VERSION) { + DHD_ERROR(("%s: results->version %d != WL_BSS_INFO_VERSION\n", + __FUNCTION__, results->version)); + goto done; + } + + bi = results->bss_info; + for (i = 0; i < results->count; i++) { + if (!bi) + break; + + if (!memcmp(bi->BSSID.octet, addr, ETHER_ADDR_LEN)) { + DHD_ISCAN(("%s: Del BSS[%2.2d:%2.2d] %X:%X:%X:%X:%X:%X\n", + __FUNCTION__, l, i, bi->BSSID.octet[0], + bi->BSSID.octet[1], bi->BSSID.octet[2], + bi->BSSID.octet[3], bi->BSSID.octet[4], + bi->BSSID.octet[5])); + + bi_new = bi; + bi = (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)); +/* + if(bi && bi_new) { + bcopy(bi, bi_new, results->buflen - + dtoh32(bi_new->length)); + results->buflen -= dtoh32(bi_new->length); + } +*/ + results->buflen -= dtoh32(bi_new->length); + results->count--; + + for (j = i; j < results->count; j++) { + if (bi && bi_new) { + DHD_ISCAN(("%s: Moved up BSS[%2.2d:%2.2d]" + "%X:%X:%X:%X:%X:%X\n", + __FUNCTION__, l, j, bi->BSSID.octet[0], + bi->BSSID.octet[1], bi->BSSID.octet[2], + bi->BSSID.octet[3], bi->BSSID.octet[4], + bi->BSSID.octet[5])); + + bi_next = (wl_bss_info_t *)((uintptr)bi + + dtoh32(bi->length)); + bcopy(bi, bi_new, dtoh32(bi->length)); + bi_new = (wl_bss_info_t *)((uintptr)bi_new + + dtoh32(bi_new->length)); + bi = bi_next; + } + } + + if (results->count == 0) { + /* Prune now empty partial scan list */ + dhd_iscan_free_buf(dhdp, iscan_cur); + goto done; + } + break; + } + bi = (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)); + } + } + iscan_cur = iscan_cur->next; + l++; + } + +done: + dhd_iscan_unlock(); + return 0; +} + +int +dhd_iscan_remove_duplicates(void * dhdp, iscan_buf_t *iscan_cur) +{ + int i = 0; + wl_iscan_results_t *list; + wl_scan_results_t *results; + wl_bss_info_t UNALIGNED *bi, *bi_new, *bi_next; + + dhd_iscan_lock(); + + DHD_ISCAN(("%s: Scan cache before delete\n", + __FUNCTION__)); + dhd_iscan_print_cache(iscan_cur); + + if (!iscan_cur) + goto done; + + list = (wl_iscan_results_t *)iscan_cur->iscan_buf; + if (!list) + goto done; + + results = (wl_scan_results_t *)&list->results; + if (!results) + goto done; + + if (results->version != WL_BSS_INFO_VERSION) { + DHD_ERROR(("%s: results->version %d != WL_BSS_INFO_VERSION\n", + __FUNCTION__, results->version)); + goto done; + } + + bi = results->bss_info; + for (i = 0; i < results->count; i++) { + if (!bi) + break; + + DHD_ISCAN(("%s: Find dups for BSS[%2.2d] %X:%X:%X:%X:%X:%X\n", + __FUNCTION__, i, bi->BSSID.octet[0], bi->BSSID.octet[1], bi->BSSID.octet[2], + bi->BSSID.octet[3], bi->BSSID.octet[4], bi->BSSID.octet[5])); + + dhd_iscan_delete_bss(dhdp, bi->BSSID.octet, iscan_cur); + + bi = (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)); + } + +done: + DHD_ISCAN(("%s: Scan cache after delete\n", __FUNCTION__)); + dhd_iscan_print_cache(iscan_cur); + dhd_iscan_unlock(); + return 0; +} + +void +dhd_iscan_ind_scan_confirm(void *dhdp, bool status) +{ + + dhd_ind_scan_confirm(dhdp, status); +} + +int +dhd_iscan_request(void * dhdp, uint16 action) +{ + int rc; + wl_iscan_params_t params; + dhd_pub_t *dhd = dhd_bus_pub(dhdp); + char buf[WLC_IOCTL_SMLEN]; + + + memset(¶ms, 0, sizeof(wl_iscan_params_t)); + memcpy(¶ms.params.bssid, ðer_bcast, ETHER_ADDR_LEN); + + params.params.bss_type = DOT11_BSSTYPE_ANY; + params.params.scan_type = DOT11_SCANTYPE_ACTIVE; + + params.params.nprobes = htod32(-1); + params.params.active_time = htod32(-1); + params.params.passive_time = htod32(-1); + params.params.home_time = htod32(-1); + params.params.channel_num = htod32(0); + + params.version = htod32(ISCAN_REQ_VERSION); + params.action = htod16(action); + params.scan_duration = htod16(0); + + bcm_mkiovar("iscan", (char *)¶ms, sizeof(wl_iscan_params_t), buf, WLC_IOCTL_SMLEN); + rc = dhd_wl_ioctl(dhdp, WLC_SET_VAR, buf, WLC_IOCTL_SMLEN); + + return rc; +} + +static int +dhd_iscan_get_partial_result(void *dhdp, uint *scan_count) +{ + wl_iscan_results_t *list_buf; + wl_iscan_results_t list; + wl_scan_results_t *results; + iscan_buf_t *iscan_cur; + int status = -1; + dhd_pub_t *dhd = dhd_bus_pub(dhdp); + int rc; + + + iscan_cur = dhd_iscan_allocate_buf(dhd, &iscan_chain); + if (!iscan_cur) { + DHD_ERROR(("%s: Failed to allocate node\n", __FUNCTION__)); + dhd_iscan_free_buf(dhdp, 0); + dhd_iscan_request(dhdp, WL_SCAN_ACTION_ABORT); + goto fail; + } + + dhd_iscan_lock(); + + memset(iscan_cur->iscan_buf, 0, WLC_IW_ISCAN_MAXLEN); + list_buf = (wl_iscan_results_t*)iscan_cur->iscan_buf; + results = &list_buf->results; + results->buflen = WL_ISCAN_RESULTS_FIXED_SIZE; + results->version = 0; + results->count = 0; + + memset(&list, 0, sizeof(list)); + list.results.buflen = htod32(WLC_IW_ISCAN_MAXLEN); + bcm_mkiovar("iscanresults", (char *)&list, WL_ISCAN_RESULTS_FIXED_SIZE, + iscan_cur->iscan_buf, WLC_IW_ISCAN_MAXLEN); + rc = dhd_wl_ioctl(dhdp, WLC_GET_VAR, iscan_cur->iscan_buf, WLC_IW_ISCAN_MAXLEN); + + results->buflen = dtoh32(results->buflen); + results->version = dtoh32(results->version); + *scan_count = results->count = dtoh32(results->count); + status = dtoh32(list_buf->status); + + dhd_iscan_unlock(); + + if (!(*scan_count)) + dhd_iscan_free_buf(dhdp, iscan_cur); + else + dhd_iscan_remove_duplicates(dhdp, iscan_cur); + + +fail: + return status; +} + +#endif + +#ifdef PNO_SUPPORT +int dhd_pno_clean(dhd_pub_t *dhd) +{ + char iovbuf[128]; + int pfn_enabled = 0; + int iov_len = 0; + int ret; + + /* Disable pfn */ + iov_len = bcm_mkiovar("pfn", (char *)&pfn_enabled, 4, iovbuf, sizeof(iovbuf)); + if ((ret = dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, sizeof(iovbuf))) >= 0) { + /* clear pfn */ + iov_len = bcm_mkiovar("pfnclear", 0, 0, iovbuf, sizeof(iovbuf)); + if (iov_len) { + if ((ret = dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, iov_len)) < 0) { + DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret)); + } + } + else { + ret = -1; + DHD_ERROR(("%s failed code %d\n", __FUNCTION__, iov_len)); + } + } + else + DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret)); + + return ret; +} + +int dhd_pno_enable(dhd_pub_t *dhd, int pfn_enabled) +{ + char iovbuf[128]; + int ret = -1; + + if ((!dhd) && ((pfn_enabled != 0) || (pfn_enabled != 1))) { + DHD_ERROR(("%s error exit\n", __FUNCTION__)); + return ret; + } + + /* Enable/disable PNO */ + if ((ret = bcm_mkiovar("pfn", (char *)&pfn_enabled, 4, iovbuf, sizeof(iovbuf))) > 0) { + if ((ret = dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, sizeof(iovbuf))) < 0) { + DHD_ERROR(("%s failed for error=%d\n", __FUNCTION__, ret)); + return ret; + } + else { + dhd->pno_enable = pfn_enabled; + DHD_TRACE(("%s set pno as %d\n", __FUNCTION__, dhd->pno_enable)); + } + } + else DHD_ERROR(("%s failed err=%d\n", __FUNCTION__, ret)); + + return ret; +} + +/* Function to execute combined scan */ +int +dhd_pno_set(dhd_pub_t *dhd, wlc_ssid_t* ssids_local, int nssid, ushort scan_fr) +{ + int err = -1; + char iovbuf[128]; + int k, i; + wl_pfn_param_t pfn_param; + wl_pfn_t pfn_element; + + DHD_TRACE(("%s nssid=%d nchan=%d\n", __FUNCTION__, nssid, scan_fr)); + + if ((!dhd) && (!ssids_local)) { + DHD_ERROR(("%s error exit\n", __FUNCTION__)); + err = -1; + } + + /* Check for broadcast ssid */ + for (k = 0; k < nssid; k++) { + if (!ssids_local[k].SSID_len) { + DHD_ERROR(("%d: Broadcast SSID is ilegal for PNO setting\n", k)); + return err; + } + } +/* #define PNO_DUMP 1 */ +#ifdef PNO_DUMP + { + int j; + for (j = 0; j < nssid; j++) { + DHD_ERROR(("%d: scan for %s size =%d\n", j, + ssids_local[j].SSID, ssids_local[j].SSID_len)); + } + } +#endif /* PNO_DUMP */ + + /* clean up everything */ + if ((err = dhd_pno_clean(dhd)) < 0) { + DHD_ERROR(("%s failed error=%d\n", __FUNCTION__, err)); + return err; + } + memset(&pfn_param, 0, sizeof(pfn_param)); + memset(&pfn_element, 0, sizeof(pfn_element)); + + /* set pfn parameters */ + pfn_param.version = htod32(PFN_VERSION); + pfn_param.flags = htod16((PFN_LIST_ORDER << SORT_CRITERIA_BIT)); + + /* set up pno scan fr */ + if (scan_fr != 0) + pfn_param.scan_freq = htod32(scan_fr); + + bcm_mkiovar("pfn_set", (char *)&pfn_param, sizeof(pfn_param), iovbuf, sizeof(iovbuf)); + dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, sizeof(iovbuf)); + + /* set all pfn ssid */ + for (i = 0; i < nssid; i++) { + + pfn_element.bss_type = htod32(DOT11_BSSTYPE_INFRASTRUCTURE); + pfn_element.auth = (DOT11_OPEN_SYSTEM); + pfn_element.wpa_auth = htod32(WPA_AUTH_PFN_ANY); + pfn_element.wsec = htod32(0); + pfn_element.infra = htod32(1); + + memcpy((char *)pfn_element.ssid.SSID, ssids_local[i].SSID, ssids_local[i].SSID_len); + pfn_element.ssid.SSID_len = ssids_local[i].SSID_len; + + if ((err = + bcm_mkiovar("pfn_add", (char *)&pfn_element, + sizeof(pfn_element), iovbuf, sizeof(iovbuf))) > 0) { + if ((err = + dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, sizeof(iovbuf))) < 0) { + DHD_ERROR(("%s failed for i=%d error=%d\n", + __FUNCTION__, i, err)); + return err; + } + } + else DHD_ERROR(("%s failed err=%d\n", __FUNCTION__, err)); + } + + /* Enable PNO */ + /* dhd_pno_enable(dhd, 1); */ + return err; +} + +int dhd_pno_get_status(dhd_pub_t *dhd) +{ + int ret = -1; + + if (!dhd) + return ret; + else + return (dhd->pno_enable); +} + +#endif /* PNO_SUPPORT */ + +#if defined(CSCAN) + +/* Androd ComboSCAN support */ +/* + * data parsing from ComboScan tlv list +*/ +int +wl_iw_parse_data_tlv(char** list_str, void *dst, int dst_size, const char token, + int input_size, int *bytes_left) +{ + char* str = *list_str; + uint16 short_temp; + uint32 int_temp; + + if ((list_str == NULL) || (*list_str == NULL) ||(bytes_left == NULL) || (*bytes_left < 0)) { + DHD_ERROR(("%s error paramters\n", __FUNCTION__)); + return -1; + } + + /* Clean all dest bytes */ + memset(dst, 0, dst_size); + while (*bytes_left > 0) { + + if (str[0] != token) { + DHD_TRACE(("%s NOT Type=%d get=%d left_parse=%d \n", + __FUNCTION__, token, str[0], *bytes_left)); + return -1; + } + + *bytes_left -= 1; + str += 1; + + if (input_size == 1) { + memcpy(dst, str, input_size); + } + else if (input_size == 2) { + memcpy(dst, (char *)htod16(memcpy(&short_temp, str, input_size)), + input_size); + } + else if (input_size == 4) { + memcpy(dst, (char *)htod32(memcpy(&int_temp, str, input_size)), + input_size); + } + + *bytes_left -= input_size; + str += input_size; + *list_str = str; + return 1; + } + return 1; +} + +/* + * channel list parsing from cscan tlv list +*/ +int +wl_iw_parse_channel_list_tlv(char** list_str, uint16* channel_list, + int channel_num, int *bytes_left) +{ + char* str = *list_str; + int idx = 0; + + if ((list_str == NULL) || (*list_str == NULL) ||(bytes_left == NULL) || (*bytes_left < 0)) { + DHD_ERROR(("%s error paramters\n", __FUNCTION__)); + return -1; + } + + while (*bytes_left > 0) { + + if (str[0] != CSCAN_TLV_TYPE_CHANNEL_IE) { + *list_str = str; + DHD_TRACE(("End channel=%d left_parse=%d %d\n", idx, *bytes_left, str[0])); + return idx; + } + /* Get proper CSCAN_TLV_TYPE_CHANNEL_IE */ + *bytes_left -= 1; + str += 1; + + if (str[0] == 0) { + /* All channels */ + channel_list[idx] = 0x0; + } + else { + channel_list[idx] = (uint16)str[0]; + DHD_TRACE(("%s channel=%d \n", __FUNCTION__, channel_list[idx])); + } + *bytes_left -= 1; + str += 1; + + if (idx++ > 255) { + DHD_ERROR(("%s Too many channels \n", __FUNCTION__)); + return -1; + } + } + + *list_str = str; + return idx; +} + +/* + * SSIDs list parsing from cscan tlv list + */ +int +wl_iw_parse_ssid_list_tlv(char** list_str, wlc_ssid_t* ssid, int max, int *bytes_left) +{ + char* str = *list_str; + int idx = 0; + + if ((list_str == NULL) || (*list_str == NULL) || (*bytes_left < 0)) { + DHD_ERROR(("%s error paramters\n", __FUNCTION__)); + return -1; + } + + while (*bytes_left > 0) { + + if (str[0] != CSCAN_TLV_TYPE_SSID_IE) { + *list_str = str; + DHD_TRACE(("nssid=%d left_parse=%d %d\n", idx, *bytes_left, str[0])); + return idx; + } + + /* Get proper CSCAN_TLV_TYPE_SSID_IE */ + *bytes_left -= 1; + str += 1; + + if (str[0] == 0) { + /* Broadcast SSID */ + ssid[idx].SSID_len = 0; + memset((char*)ssid[idx].SSID, 0x0, DOT11_MAX_SSID_LEN); + *bytes_left -= 1; + str += 1; + + DHD_TRACE(("BROADCAST SCAN left=%d\n", *bytes_left)); + } + else if (str[0] <= DOT11_MAX_SSID_LEN) { + /* Get proper SSID size */ + ssid[idx].SSID_len = str[0]; + *bytes_left -= 1; + str += 1; + + /* Get SSID */ + if (ssid[idx].SSID_len > *bytes_left) { + DHD_ERROR(("%s out of memory range len=%d but left=%d\n", + __FUNCTION__, ssid[idx].SSID_len, *bytes_left)); + return -1; + } + + memcpy((char*)ssid[idx].SSID, str, ssid[idx].SSID_len); + + *bytes_left -= ssid[idx].SSID_len; + str += ssid[idx].SSID_len; + + DHD_TRACE(("%s :size=%d left=%d\n", + (char*)ssid[idx].SSID, ssid[idx].SSID_len, *bytes_left)); + } + else { + DHD_ERROR(("### SSID size more that %d\n", str[0])); + return -1; + } + + if (idx++ > max) { + DHD_ERROR(("%s number of SSIDs more that %d\n", __FUNCTION__, idx)); + return -1; + } + } + + *list_str = str; + return idx; +} + +/* Parse a comma-separated list from list_str into ssid array, starting + * at index idx. Max specifies size of the ssid array. Parses ssids + * and returns updated idx; if idx >= max not all fit, the excess have + * not been copied. Returns -1 on empty string, or on ssid too long. + */ +int +wl_iw_parse_ssid_list(char** list_str, wlc_ssid_t* ssid, int idx, int max) +{ + char* str, *ptr; + + if ((list_str == NULL) || (*list_str == NULL)) + return -1; + + for (str = *list_str; str != NULL; str = ptr) { + + /* check for next TAG */ + if (!strncmp(str, GET_CHANNEL, strlen(GET_CHANNEL))) { + *list_str = str + strlen(GET_CHANNEL); + return idx; + } + + if ((ptr = strchr(str, ',')) != NULL) { + *ptr++ = '\0'; + } + + if (strlen(str) > DOT11_MAX_SSID_LEN) { + DHD_ERROR(("ssid <%s> exceeds %d\n", str, DOT11_MAX_SSID_LEN)); + return -1; + } + + if (strlen(str) == 0) + ssid[idx].SSID_len = 0; + + if (idx < max) { + strcpy((char*)ssid[idx].SSID, str); + ssid[idx].SSID_len = strlen(str); + } + idx++; + } + return idx; +} + +/* + * Parse channel list from iwpriv CSCAN + */ +int +wl_iw_parse_channel_list(char** list_str, uint16* channel_list, int channel_num) +{ + int num; + int val; + char* str; + char* endptr = NULL; + + if ((list_str == NULL)||(*list_str == NULL)) + return -1; + + str = *list_str; + num = 0; + while (strncmp(str, GET_NPROBE, strlen(GET_NPROBE))) { + val = (int)strtoul(str, &endptr, 0); + if (endptr == str) { + printf("could not parse channel number starting at" + " substring \"%s\" in list:\n%s\n", + str, *list_str); + return -1; + } + str = endptr + strspn(endptr, " ,"); + + if (num == channel_num) { + DHD_ERROR(("too many channels (more than %d) in channel list:\n%s\n", + channel_num, *list_str)); + return -1; + } + + channel_list[num++] = (uint16)val; + } + *list_str = str; + return num; +} + +#endif diff --git a/drivers/net/wireless/bcm4329/dhd_custom_gpio.c b/drivers/net/wireless/bcm4329/dhd_custom_gpio.c new file mode 100644 index 000000000000..8c6ec470b8bd --- /dev/null +++ b/drivers/net/wireless/bcm4329/dhd_custom_gpio.c @@ -0,0 +1,179 @@ +/* +* Customer code to add GPIO control during WLAN start/stop +* Copyright (C) 1999-2010, Broadcom Corporation +* +* Unless you and Broadcom execute a separate written software license +* agreement governing use of this software, this software is licensed to you +* under the terms of the GNU General Public License version 2 (the "GPL"), +* available at http://www.broadcom.com/licenses/GPLv2.php, with the +* following added to such license: +* +* As a special exception, the copyright holders of this software give you +* permission to link this software with independent modules, and to copy and +* distribute the resulting executable under terms of your choice, provided that +* you also meet, for each linked independent module, the terms and conditions of +* the license of that module. An independent module is a module which is not +* derived from this software. The special exception does not apply to any +* modifications of the software. +* +* Notwithstanding the above, under no circumstances may you combine this +* software in any way with any other Broadcom software provided under a license +* other than the GPL, without Broadcom's express prior written consent. +* +* $Id: dhd_custom_gpio.c,v 1.1.4.8.4.1 2010/09/02 23:13:16 Exp $ +*/ + + +#include <typedefs.h> +#include <linuxver.h> +#include <osl.h> +#include <bcmutils.h> + +#include <dngl_stats.h> +#include <dhd.h> + +#include <wlioctl.h> +#include <wl_iw.h> + +#define WL_ERROR(x) printf x +#define WL_TRACE(x) + +#ifdef CUSTOMER_HW +extern void bcm_wlan_power_off(int); +extern void bcm_wlan_power_on(int); +#endif /* CUSTOMER_HW */ +#ifdef CUSTOMER_HW2 +int wifi_set_carddetect(int on); +int wifi_set_power(int on, unsigned long msec); +int wifi_get_irq_number(unsigned long *irq_flags_ptr); +int wifi_get_mac_addr(unsigned char *buf); +#endif + +#if defined(OOB_INTR_ONLY) + +#if defined(BCMLXSDMMC) +extern int sdioh_mmc_irq(int irq); +#endif /* (BCMLXSDMMC) */ + +#ifdef CUSTOMER_HW3 +#include <mach/gpio.h> +#endif + +/* Customer specific Host GPIO defintion */ +static int dhd_oob_gpio_num = -1; /* GG 19 */ + +module_param(dhd_oob_gpio_num, int, 0644); +MODULE_PARM_DESC(dhd_oob_gpio_num, "DHD oob gpio number"); + +int dhd_customer_oob_irq_map(unsigned long *irq_flags_ptr) +{ + int host_oob_irq = 0; + +#ifdef CUSTOMER_HW2 + host_oob_irq = wifi_get_irq_number(irq_flags_ptr); + +#else /* for NOT CUSTOMER_HW2 */ +#if defined(CUSTOM_OOB_GPIO_NUM) + if (dhd_oob_gpio_num < 0) { + dhd_oob_gpio_num = CUSTOM_OOB_GPIO_NUM; + } +#endif + + if (dhd_oob_gpio_num < 0) { + WL_ERROR(("%s: ERROR customer specific Host GPIO is NOT defined \n", + __FUNCTION__)); + return (dhd_oob_gpio_num); + } + + WL_ERROR(("%s: customer specific Host GPIO number is (%d)\n", + __FUNCTION__, dhd_oob_gpio_num)); + +#if defined CUSTOMER_HW + host_oob_irq = MSM_GPIO_TO_INT(dhd_oob_gpio_num); +#elif defined CUSTOMER_HW3 + gpio_request(dhd_oob_gpio_num, "oob irq"); + host_oob_irq = gpio_to_irq(dhd_oob_gpio_num); + gpio_direction_input(dhd_oob_gpio_num); +#endif /* CUSTOMER_HW */ +#endif /* CUSTOMER_HW2 */ + + return (host_oob_irq); +} +#endif /* defined(OOB_INTR_ONLY) */ + +/* Customer function to control hw specific wlan gpios */ +void +dhd_customer_gpio_wlan_ctrl(int onoff) +{ + switch (onoff) { + case WLAN_RESET_OFF: + WL_TRACE(("%s: call customer specific GPIO to insert WLAN RESET\n", + __FUNCTION__)); +#ifdef CUSTOMER_HW + bcm_wlan_power_off(2); +#endif /* CUSTOMER_HW */ +#ifdef CUSTOMER_HW2 + wifi_set_power(0, 0); +#endif + WL_ERROR(("=========== WLAN placed in RESET ========\n")); + break; + + case WLAN_RESET_ON: + WL_TRACE(("%s: callc customer specific GPIO to remove WLAN RESET\n", + __FUNCTION__)); +#ifdef CUSTOMER_HW + bcm_wlan_power_on(2); +#endif /* CUSTOMER_HW */ +#ifdef CUSTOMER_HW2 + wifi_set_power(1, 0); +#endif + WL_ERROR(("=========== WLAN going back to live ========\n")); + break; + + case WLAN_POWER_OFF: + WL_TRACE(("%s: call customer specific GPIO to turn off WL_REG_ON\n", + __FUNCTION__)); +#ifdef CUSTOMER_HW + bcm_wlan_power_off(1); +#endif /* CUSTOMER_HW */ + break; + + case WLAN_POWER_ON: + WL_TRACE(("%s: call customer specific GPIO to turn on WL_REG_ON\n", + __FUNCTION__)); +#ifdef CUSTOMER_HW + bcm_wlan_power_on(1); + /* Lets customer power to get stable */ + OSL_DELAY(50); +#endif /* CUSTOMER_HW */ + break; + } +} + +#ifdef GET_CUSTOM_MAC_ENABLE +/* Function to get custom MAC address */ +int +dhd_custom_get_mac_address(unsigned char *buf) +{ + int ret = 0; + + WL_TRACE(("%s Enter\n", __FUNCTION__)); + if (!buf) + return -EINVAL; + + /* Customer access to MAC address stored outside of DHD driver */ +#ifdef CUSTOMER_HW2 + ret = wifi_get_mac_addr(buf); +#endif + +#ifdef EXAMPLE_GET_MAC + /* EXAMPLE code */ + { + struct ether_addr ea_example = {{0x00, 0x11, 0x22, 0x33, 0x44, 0xFF}}; + bcopy((char *)&ea_example, buf, sizeof(struct ether_addr)); + } +#endif /* EXAMPLE_GET_MAC */ + + return ret; +} +#endif /* GET_CUSTOM_MAC_ENABLE */ diff --git a/drivers/net/wireless/bcm4329/dhd_dbg.h b/drivers/net/wireless/bcm4329/dhd_dbg.h new file mode 100644 index 000000000000..b48c1d70f144 --- /dev/null +++ b/drivers/net/wireless/bcm4329/dhd_dbg.h @@ -0,0 +1,100 @@ +/* + * Debug/trace/assert driver definitions for Dongle Host Driver. + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: dhd_dbg.h,v 1.5.6.2.4.2.14.10 2010/05/21 21:49:38 Exp $ + */ + +#ifndef _dhd_dbg_ +#define _dhd_dbg_ + +#ifdef DHD_DEBUG + +#define DHD_ERROR(args) do {if ((dhd_msg_level & DHD_ERROR_VAL) && (net_ratelimit())) \ + printf args;} while (0) +#define DHD_TRACE(args) do {if (dhd_msg_level & DHD_TRACE_VAL) printf args;} while (0) +#define DHD_INFO(args) do {if (dhd_msg_level & DHD_INFO_VAL) printf args;} while (0) +#define DHD_DATA(args) do {if (dhd_msg_level & DHD_DATA_VAL) printf args;} while (0) +#define DHD_CTL(args) do {if (dhd_msg_level & DHD_CTL_VAL) printf args;} while (0) +#define DHD_TIMER(args) do {if (dhd_msg_level & DHD_TIMER_VAL) printf args;} while (0) +#define DHD_HDRS(args) do {if (dhd_msg_level & DHD_HDRS_VAL) printf args;} while (0) +#define DHD_BYTES(args) do {if (dhd_msg_level & DHD_BYTES_VAL) printf args;} while (0) +#define DHD_INTR(args) do {if (dhd_msg_level & DHD_INTR_VAL) printf args;} while (0) +#define DHD_GLOM(args) do {if (dhd_msg_level & DHD_GLOM_VAL) printf args;} while (0) +#define DHD_EVENT(args) do {if (dhd_msg_level & DHD_EVENT_VAL) printf args;} while (0) +#define DHD_BTA(args) do {if (dhd_msg_level & DHD_BTA_VAL) printf args;} while (0) +#define DHD_ISCAN(args) do {if (dhd_msg_level & DHD_ISCAN_VAL) printf args;} while (0) + +#define DHD_ERROR_ON() (dhd_msg_level & DHD_ERROR_VAL) +#define DHD_TRACE_ON() (dhd_msg_level & DHD_TRACE_VAL) +#define DHD_INFO_ON() (dhd_msg_level & DHD_INFO_VAL) +#define DHD_DATA_ON() (dhd_msg_level & DHD_DATA_VAL) +#define DHD_CTL_ON() (dhd_msg_level & DHD_CTL_VAL) +#define DHD_TIMER_ON() (dhd_msg_level & DHD_TIMER_VAL) +#define DHD_HDRS_ON() (dhd_msg_level & DHD_HDRS_VAL) +#define DHD_BYTES_ON() (dhd_msg_level & DHD_BYTES_VAL) +#define DHD_INTR_ON() (dhd_msg_level & DHD_INTR_VAL) +#define DHD_GLOM_ON() (dhd_msg_level & DHD_GLOM_VAL) +#define DHD_EVENT_ON() (dhd_msg_level & DHD_EVENT_VAL) +#define DHD_BTA_ON() (dhd_msg_level & DHD_BTA_VAL) +#define DHD_ISCAN_ON() (dhd_msg_level & DHD_ISCAN_VAL) + +#else /* DHD_DEBUG */ + +#define DHD_ERROR(args) do {if (net_ratelimit()) printf args;} while (0) +#define DHD_TRACE(args) +#define DHD_INFO(args) +#define DHD_DATA(args) +#define DHD_CTL(args) +#define DHD_TIMER(args) +#define DHD_HDRS(args) +#define DHD_BYTES(args) +#define DHD_INTR(args) +#define DHD_GLOM(args) +#define DHD_EVENT(args) +#define DHD_BTA(args) +#define DHD_ISCAN(args) + +#define DHD_ERROR_ON() 0 +#define DHD_TRACE_ON() 0 +#define DHD_INFO_ON() 0 +#define DHD_DATA_ON() 0 +#define DHD_CTL_ON() 0 +#define DHD_TIMER_ON() 0 +#define DHD_HDRS_ON() 0 +#define DHD_BYTES_ON() 0 +#define DHD_INTR_ON() 0 +#define DHD_GLOM_ON() 0 +#define DHD_EVENT_ON() 0 +#define DHD_BTA_ON() 0 +#define DHD_ISCAN_ON() 0 +#endif /* DHD_DEBUG */ + +#define DHD_LOG(args) + +#define DHD_NONE(args) +extern int dhd_msg_level; + +/* Defines msg bits */ +#include <dhdioctl.h> + +#endif /* _dhd_dbg_ */ diff --git a/drivers/net/wireless/bcm4329/dhd_linux.c b/drivers/net/wireless/bcm4329/dhd_linux.c new file mode 100644 index 000000000000..f687b31e3338 --- /dev/null +++ b/drivers/net/wireless/bcm4329/dhd_linux.c @@ -0,0 +1,3218 @@ +/* + * Broadcom Dongle Host Driver (DHD), Linux-specific network interface + * Basically selected code segments from usb-cdc.c and usb-rndis.c + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: dhd_linux.c,v 1.65.4.9.2.12.2.104 2010/08/20 19:15:40 Exp $ + */ + +#ifdef CONFIG_WIFI_CONTROL_FUNC +#include <linux/platform_device.h> +#endif +#include <typedefs.h> +#include <linuxver.h> +#include <osl.h> + +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/random.h> +#include <linux/spinlock.h> +#include <linux/ethtool.h> +#include <linux/fcntl.h> +#include <linux/fs.h> + +#include <asm/uaccess.h> +#include <asm/unaligned.h> + +#include <epivers.h> +#include <bcmutils.h> +#include <bcmendian.h> + +#include <proto/ethernet.h> +#include <dngl_stats.h> +#include <dhd.h> +#include <dhd_bus.h> +#include <dhd_proto.h> +#include <dhd_dbg.h> +#include <wl_iw.h> +#ifdef CONFIG_HAS_WAKELOCK +#include <linux/wakelock.h> +#endif +#if defined(CUSTOMER_HW2) && defined(CONFIG_WIFI_CONTROL_FUNC) +#include <linux/wlan_plat.h> + +struct semaphore wifi_control_sem; + +struct dhd_bus *g_bus; + +static struct wifi_platform_data *wifi_control_data = NULL; +static struct resource *wifi_irqres = NULL; + +int wifi_get_irq_number(unsigned long *irq_flags_ptr) +{ + if (wifi_irqres) { + *irq_flags_ptr = wifi_irqres->flags & IRQF_TRIGGER_MASK; + return (int)wifi_irqres->start; + } +#ifdef CUSTOM_OOB_GPIO_NUM + return CUSTOM_OOB_GPIO_NUM; +#else + return -1; +#endif +} + +int wifi_set_carddetect(int on) +{ + printk("%s = %d\n", __FUNCTION__, on); + if (wifi_control_data && wifi_control_data->set_carddetect) { + wifi_control_data->set_carddetect(on); + } + return 0; +} + +int wifi_set_power(int on, unsigned long msec) +{ + printk("%s = %d\n", __FUNCTION__, on); + if (wifi_control_data && wifi_control_data->set_power) { + wifi_control_data->set_power(on); + } + if (msec) + mdelay(msec); + return 0; +} + +int wifi_set_reset(int on, unsigned long msec) +{ + printk("%s = %d\n", __FUNCTION__, on); + if (wifi_control_data && wifi_control_data->set_reset) { + wifi_control_data->set_reset(on); + } + if (msec) + mdelay(msec); + return 0; +} + +int wifi_get_mac_addr(unsigned char *buf) +{ + printk("%s\n", __FUNCTION__); + if (!buf) + return -EINVAL; + if (wifi_control_data && wifi_control_data->get_mac_addr) { + return wifi_control_data->get_mac_addr(buf); + } + return -EOPNOTSUPP; +} + +static int wifi_probe(struct platform_device *pdev) +{ + struct wifi_platform_data *wifi_ctrl = + (struct wifi_platform_data *)(pdev->dev.platform_data); + + DHD_TRACE(("## %s\n", __FUNCTION__)); + wifi_irqres = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "bcm4329_wlan_irq"); + wifi_control_data = wifi_ctrl; + + wifi_set_power(1, 0); /* Power On */ + wifi_set_carddetect(1); /* CardDetect (0->1) */ + + up(&wifi_control_sem); + return 0; +} + +static int wifi_remove(struct platform_device *pdev) +{ + struct wifi_platform_data *wifi_ctrl = + (struct wifi_platform_data *)(pdev->dev.platform_data); + + DHD_TRACE(("## %s\n", __FUNCTION__)); + wifi_control_data = wifi_ctrl; + + wifi_set_power(0, 0); /* Power Off */ + wifi_set_carddetect(0); /* CardDetect (1->0) */ + + up(&wifi_control_sem); + return 0; +} +static int wifi_suspend(struct platform_device *pdev, pm_message_t state) +{ + DHD_TRACE(("##> %s\n", __FUNCTION__)); + return 0; +} +static int wifi_resume(struct platform_device *pdev) +{ + DHD_TRACE(("##> %s\n", __FUNCTION__)); + return 0; +} + +static struct platform_driver wifi_device = { + .probe = wifi_probe, + .remove = wifi_remove, + .suspend = wifi_suspend, + .resume = wifi_resume, + .driver = { + .name = "bcm4329_wlan", + } +}; + +int wifi_add_dev(void) +{ + DHD_TRACE(("## Calling platform_driver_register\n")); + return platform_driver_register(&wifi_device); +} + +void wifi_del_dev(void) +{ + DHD_TRACE(("## Unregister platform_driver_register\n")); + platform_driver_unregister(&wifi_device); +} +#endif /* defined(CUSTOMER_HW2) && defined(CONFIG_WIFI_CONTROL_FUNC) */ + + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) +#include <linux/suspend.h> +volatile bool dhd_mmc_suspend = FALSE; +DECLARE_WAIT_QUEUE_HEAD(dhd_dpc_wait); +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */ + +#if defined(OOB_INTR_ONLY) +extern void dhd_enable_oob_intr(struct dhd_bus *bus, bool enable); +#endif /* defined(OOB_INTR_ONLY) */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) +MODULE_LICENSE("GPL v2"); +#endif /* LinuxVer */ + +#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15) +const char * +print_tainted() +{ + return ""; +} +#endif /* LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15) */ + +/* Linux wireless extension support */ +#if defined(CONFIG_WIRELESS_EXT) +#include <wl_iw.h> +#endif /* defined(CONFIG_WIRELESS_EXT) */ + +#if defined(CONFIG_HAS_EARLYSUSPEND) +#include <linux/earlysuspend.h> +extern int dhdcdc_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len); +#endif /* defined(CONFIG_HAS_EARLYSUSPEND) */ + +#ifdef PKT_FILTER_SUPPORT +extern void dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg); +extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode); +#endif + +/* Interface control information */ +typedef struct dhd_if { + struct dhd_info *info; /* back pointer to dhd_info */ + /* OS/stack specifics */ + struct net_device *net; + struct net_device_stats stats; + int idx; /* iface idx in dongle */ + int state; /* interface state */ + uint subunit; /* subunit */ + uint8 mac_addr[ETHER_ADDR_LEN]; /* assigned MAC address */ + bool attached; /* Delayed attachment when unset */ + bool txflowcontrol; /* Per interface flow control indicator */ + char name[IFNAMSIZ+1]; /* linux interface name */ +} dhd_if_t; + +/* Local private structure (extension of pub) */ +typedef struct dhd_info { +#if defined(CONFIG_WIRELESS_EXT) + wl_iw_t iw; /* wireless extensions state (must be first) */ +#endif /* defined(CONFIG_WIRELESS_EXT) */ + + dhd_pub_t pub; + + /* OS/stack specifics */ + dhd_if_t *iflist[DHD_MAX_IFS]; + + struct semaphore proto_sem; + wait_queue_head_t ioctl_resp_wait; + struct timer_list timer; + bool wd_timer_valid; + struct tasklet_struct tasklet; + spinlock_t sdlock; + spinlock_t txqlock; + /* Thread based operation */ + bool threads_only; + struct semaphore sdsem; + long watchdog_pid; + struct semaphore watchdog_sem; + struct completion watchdog_exited; + long dpc_pid; + struct semaphore dpc_sem; + struct completion dpc_exited; + + /* Wakelocks */ +#ifdef CONFIG_HAS_WAKELOCK + struct wake_lock wl_wifi; /* Wifi wakelock */ + struct wake_lock wl_rxwake; /* Wifi rx wakelock */ +#endif + spinlock_t wl_lock; + int wl_count; + int wl_packet; + + int hang_was_sent; + + /* Thread to issue ioctl for multicast */ + long sysioc_pid; + struct semaphore sysioc_sem; + struct completion sysioc_exited; + bool set_multicast; + bool set_macaddress; + struct ether_addr macvalue; + wait_queue_head_t ctrl_wait; + atomic_t pend_8021x_cnt; + +#ifdef CONFIG_HAS_EARLYSUSPEND + struct early_suspend early_suspend; +#endif /* CONFIG_HAS_EARLYSUSPEND */ +} dhd_info_t; + +/* Definitions to provide path to the firmware and nvram + * example nvram_path[MOD_PARAM_PATHLEN]="/projects/wlan/nvram.txt" + */ +char firmware_path[MOD_PARAM_PATHLEN]; +char nvram_path[MOD_PARAM_PATHLEN]; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) +struct semaphore dhd_registration_sem; +#define DHD_REGISTRATION_TIMEOUT 8000 /* msec : allowed time to finished dhd registration */ +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */ +/* load firmware and/or nvram values from the filesystem */ +module_param_string(firmware_path, firmware_path, MOD_PARAM_PATHLEN, 0); +module_param_string(nvram_path, nvram_path, MOD_PARAM_PATHLEN, 0); + +/* Error bits */ +module_param(dhd_msg_level, int, 0); + +/* Spawn a thread for system ioctls (set mac, set mcast) */ +uint dhd_sysioc = TRUE; +module_param(dhd_sysioc, uint, 0); + +/* Watchdog interval */ +uint dhd_watchdog_ms = 10; +module_param(dhd_watchdog_ms, uint, 0); + +#ifdef DHD_DEBUG +/* Console poll interval */ +uint dhd_console_ms = 0; +module_param(dhd_console_ms, uint, 0); +#endif /* DHD_DEBUG */ + +/* ARP offload agent mode : Enable ARP Host Auto-Reply and ARP Peer Auto-Reply */ +uint dhd_arp_mode = 0xb; +module_param(dhd_arp_mode, uint, 0); + +/* ARP offload enable */ +uint dhd_arp_enable = TRUE; +module_param(dhd_arp_enable, uint, 0); + +/* Global Pkt filter enable control */ +uint dhd_pkt_filter_enable = TRUE; +module_param(dhd_pkt_filter_enable, uint, 0); + +/* Pkt filter init setup */ +uint dhd_pkt_filter_init = 0; +module_param(dhd_pkt_filter_init, uint, 0); + +/* Pkt filter mode control */ +uint dhd_master_mode = TRUE; +module_param(dhd_master_mode, uint, 1); + +/* Watchdog thread priority, -1 to use kernel timer */ +int dhd_watchdog_prio = 97; +module_param(dhd_watchdog_prio, int, 0); + +/* DPC thread priority, -1 to use tasklet */ +int dhd_dpc_prio = 98; +module_param(dhd_dpc_prio, int, 0); + +/* DPC thread priority, -1 to use tasklet */ +extern int dhd_dongle_memsize; +module_param(dhd_dongle_memsize, int, 0); + +/* Control fw roaming */ +#ifdef CUSTOMER_HW2 +uint dhd_roam = 0; +#else +uint dhd_roam = 1; +#endif + +/* Control radio state */ +uint dhd_radio_up = 1; + +/* Network inteface name */ +char iface_name[IFNAMSIZ]; +module_param_string(iface_name, iface_name, IFNAMSIZ, 0); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) +#define DAEMONIZE(a) daemonize(a); \ + allow_signal(SIGKILL); \ + allow_signal(SIGTERM); +#else /* Linux 2.4 (w/o preemption patch) */ +#define RAISE_RX_SOFTIRQ() \ + cpu_raise_softirq(smp_processor_id(), NET_RX_SOFTIRQ) +#define DAEMONIZE(a) daemonize(); \ + do { if (a) \ + strncpy(current->comm, a, MIN(sizeof(current->comm), (strlen(a) + 1))); \ + } while (0); +#endif /* LINUX_VERSION_CODE */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) +#define BLOCKABLE() (!in_atomic()) +#else +#define BLOCKABLE() (!in_interrupt()) +#endif + +/* The following are specific to the SDIO dongle */ + +/* IOCTL response timeout */ +int dhd_ioctl_timeout_msec = IOCTL_RESP_TIMEOUT; + +/* Idle timeout for backplane clock */ +int dhd_idletime = DHD_IDLETIME_TICKS; +module_param(dhd_idletime, int, 0); + +/* Use polling */ +uint dhd_poll = FALSE; +module_param(dhd_poll, uint, 0); + +/* Use interrupts */ +uint dhd_intr = TRUE; +module_param(dhd_intr, uint, 0); + +/* SDIO Drive Strength (in milliamps) */ +uint dhd_sdiod_drive_strength = 6; +module_param(dhd_sdiod_drive_strength, uint, 0); + +/* Tx/Rx bounds */ +extern uint dhd_txbound; +extern uint dhd_rxbound; +module_param(dhd_txbound, uint, 0); +module_param(dhd_rxbound, uint, 0); + +/* Deferred transmits */ +extern uint dhd_deferred_tx; +module_param(dhd_deferred_tx, uint, 0); + + + +#ifdef SDTEST +/* Echo packet generator (pkts/s) */ +uint dhd_pktgen = 0; +module_param(dhd_pktgen, uint, 0); + +/* Echo packet len (0 => sawtooth, max 2040) */ +uint dhd_pktgen_len = 0; +module_param(dhd_pktgen_len, uint, 0); +#endif + +/* Version string to report */ +#ifdef DHD_DEBUG +#ifndef SRCBASE +#define SRCBASE "drivers/net/wireless/bcm4329" +#endif +#define DHD_COMPILED "\nCompiled in " SRCBASE +#else +#define DHD_COMPILED +#endif + +static char dhd_version[] = "Dongle Host Driver, version " EPI_VERSION_STR +#ifdef DHD_DEBUG +"\nCompiled in " SRCBASE " on " __DATE__ " at " __TIME__ +#endif +; + + +#if defined(CONFIG_WIRELESS_EXT) +struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev); +#endif /* defined(CONFIG_WIRELESS_EXT) */ + +static void dhd_dpc(ulong data); +/* forward decl */ +extern int dhd_wait_pend8021x(struct net_device *dev); + +#ifdef TOE +#ifndef BDC +#error TOE requires BDC +#endif /* !BDC */ +static int dhd_toe_get(dhd_info_t *dhd, int idx, uint32 *toe_ol); +static int dhd_toe_set(dhd_info_t *dhd, int idx, uint32 toe_ol); +#endif /* TOE */ + +static int dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata, + wl_event_msg_t *event_ptr, void **data_ptr); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) +static int dhd_sleep_pm_callback(struct notifier_block *nfb, unsigned long action, void *ignored) +{ + int ret = NOTIFY_DONE; + + switch (action) { + case PM_HIBERNATION_PREPARE: + case PM_SUSPEND_PREPARE: + dhd_mmc_suspend = TRUE; + ret = NOTIFY_OK; + break; + case PM_POST_HIBERNATION: + case PM_POST_SUSPEND: + dhd_mmc_suspend = FALSE; + ret = NOTIFY_OK; + break; + } + smp_mb(); + return ret; +} + +static struct notifier_block dhd_sleep_pm_notifier = { + .notifier_call = dhd_sleep_pm_callback, + .priority = 0 +}; +extern int register_pm_notifier(struct notifier_block *nb); +extern int unregister_pm_notifier(struct notifier_block *nb); +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */ + +static void dhd_set_packet_filter(int value, dhd_pub_t *dhd) +{ +#ifdef PKT_FILTER_SUPPORT + DHD_TRACE(("%s: %d\n", __FUNCTION__, value)); + /* 1 - Enable packet filter, only allow unicast packet to send up */ + /* 0 - Disable packet filter */ + if (dhd_pkt_filter_enable) { + int i; + + for (i = 0; i < dhd->pktfilter_count; i++) { + dhd_pktfilter_offload_set(dhd, dhd->pktfilter[i]); + dhd_pktfilter_offload_enable(dhd, dhd->pktfilter[i], + value, dhd_master_mode); + } + } +#endif +} + +#if defined(CONFIG_HAS_EARLYSUSPEND) +static int dhd_set_suspend(int value, dhd_pub_t *dhd) +{ + int power_mode = PM_MAX; + /* wl_pkt_filter_enable_t enable_parm; */ + char iovbuf[32]; + int bcn_li_dtim = 3; +#ifdef CUSTOMER_HW2 + uint roamvar = 1; +#endif /* CUSTOMER_HW2 */ + + DHD_TRACE(("%s: enter, value = %d in_suspend = %d\n", + __FUNCTION__, value, dhd->in_suspend)); + + if (dhd && dhd->up) { + if (value && dhd->in_suspend) { + + /* Kernel suspended */ + DHD_TRACE(("%s: force extra Suspend setting \n", __FUNCTION__)); + + dhdcdc_set_ioctl(dhd, 0, WLC_SET_PM, + (char *)&power_mode, sizeof(power_mode)); + + /* Enable packet filter, only allow unicast packet to send up */ + dhd_set_packet_filter(1, dhd); + + /* if dtim skip setup as default force it to wake each thrid dtim + * for better power saving. + * Note that side effect is chance to miss BC/MC packet + */ + if ((dhd->dtim_skip == 0) || (dhd->dtim_skip == 1)) + bcn_li_dtim = 3; + else + bcn_li_dtim = dhd->dtim_skip; + bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim, + 4, iovbuf, sizeof(iovbuf)); + dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, sizeof(iovbuf)); +#ifdef CUSTOMER_HW2 + /* Disable build-in roaming to allowed ext supplicant to take of roaming */ + bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf)); + dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, sizeof(iovbuf)); +#endif /* CUSTOMER_HW2 */ + } else { + + /* Kernel resumed */ + DHD_TRACE(("%s: Remove extra suspend setting \n", __FUNCTION__)); + + power_mode = PM_FAST; + dhdcdc_set_ioctl(dhd, 0, WLC_SET_PM, (char *)&power_mode, + sizeof(power_mode)); + + /* disable pkt filter */ + dhd_set_packet_filter(0, dhd); + + /* restore pre-suspend setting for dtim_skip */ + bcm_mkiovar("bcn_li_dtim", (char *)&dhd->dtim_skip, + 4, iovbuf, sizeof(iovbuf)); + + dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, sizeof(iovbuf)); +#ifdef CUSTOMER_HW2 + roamvar = dhd_roam; + bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf)); + dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, sizeof(iovbuf)); +#endif /* CUSTOMER_HW2 */ + } + } + + return 0; +} + +static void dhd_suspend_resume_helper(struct dhd_info *dhd, int val) +{ + dhd_pub_t *dhdp = &dhd->pub; + + dhd_os_wake_lock(dhdp); + dhd_os_proto_block(dhdp); + /* Set flag when early suspend was called */ + dhdp->in_suspend = val; + if (!dhdp->suspend_disable_flag) + dhd_set_suspend(val, dhdp); + dhd_os_proto_unblock(dhdp); + dhd_os_wake_unlock(dhdp); +} + +static void dhd_early_suspend(struct early_suspend *h) +{ + struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend); + + DHD_TRACE(("%s: enter\n", __FUNCTION__)); + + if (dhd) + dhd_suspend_resume_helper(dhd, 1); +} + +static void dhd_late_resume(struct early_suspend *h) +{ + struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend); + + DHD_TRACE(("%s: enter\n", __FUNCTION__)); + + if (dhd) + dhd_suspend_resume_helper(dhd, 0); +} +#endif /* defined(CONFIG_HAS_EARLYSUSPEND) */ + +/* + * Generalized timeout mechanism. Uses spin sleep with exponential back-off until + * the sleep time reaches one jiffy, then switches over to task delay. Usage: + * + * dhd_timeout_start(&tmo, usec); + * while (!dhd_timeout_expired(&tmo)) + * if (poll_something()) + * break; + * if (dhd_timeout_expired(&tmo)) + * fatal(); + */ + +void +dhd_timeout_start(dhd_timeout_t *tmo, uint usec) +{ + tmo->limit = usec; + tmo->increment = 0; + tmo->elapsed = 0; + tmo->tick = 1000000 / HZ; +} + +int +dhd_timeout_expired(dhd_timeout_t *tmo) +{ + /* Does nothing the first call */ + if (tmo->increment == 0) { + tmo->increment = 1; + return 0; + } + + if (tmo->elapsed >= tmo->limit) + return 1; + + /* Add the delay that's about to take place */ + tmo->elapsed += tmo->increment; + + if (tmo->increment < tmo->tick) { + OSL_DELAY(tmo->increment); + tmo->increment *= 2; + if (tmo->increment > tmo->tick) + tmo->increment = tmo->tick; + } else { + wait_queue_head_t delay_wait; + DECLARE_WAITQUEUE(wait, current); + int pending; + init_waitqueue_head(&delay_wait); + add_wait_queue(&delay_wait, &wait); + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(1); + pending = signal_pending(current); + remove_wait_queue(&delay_wait, &wait); + set_current_state(TASK_RUNNING); + if (pending) + return 1; /* Interrupted */ + } + + return 0; +} + +static int +dhd_net2idx(dhd_info_t *dhd, struct net_device *net) +{ + int i = 0; + + ASSERT(dhd); + while (i < DHD_MAX_IFS) { + if (dhd->iflist[i] && (dhd->iflist[i]->net == net)) + return i; + i++; + } + + return DHD_BAD_IF; +} + +int +dhd_ifname2idx(dhd_info_t *dhd, char *name) +{ + int i = DHD_MAX_IFS; + + ASSERT(dhd); + + if (name == NULL || *name == '\0') + return 0; + + while (--i > 0) + if (dhd->iflist[i] && !strncmp(dhd->iflist[i]->name, name, IFNAMSIZ)) + break; + + DHD_TRACE(("%s: return idx %d for \"%s\"\n", __FUNCTION__, i, name)); + + return i; /* default - the primary interface */ +} + +char * +dhd_ifname(dhd_pub_t *dhdp, int ifidx) +{ + dhd_info_t *dhd = (dhd_info_t *)dhdp->info; + + ASSERT(dhd); + + if (ifidx < 0 || ifidx >= DHD_MAX_IFS) { + DHD_ERROR(("%s: ifidx %d out of range\n", __FUNCTION__, ifidx)); + return "<if_bad>"; + } + + if (dhd->iflist[ifidx] == NULL) { + DHD_ERROR(("%s: null i/f %d\n", __FUNCTION__, ifidx)); + return "<if_null>"; + } + + if (dhd->iflist[ifidx]->net) + return dhd->iflist[ifidx]->net->name; + + return "<if_none>"; +} + +static void +_dhd_set_multicast_list(dhd_info_t *dhd, int ifidx) +{ + struct net_device *dev; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35) + struct netdev_hw_addr *ha; +#else + struct dev_mc_list *mclist; +#endif + uint32 allmulti, cnt; + + wl_ioctl_t ioc; + char *buf, *bufp; + uint buflen; + int ret; + + ASSERT(dhd && dhd->iflist[ifidx]); + dev = dhd->iflist[ifidx]->net; + + netif_addr_lock_bh(dev); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35) + cnt = netdev_mc_count(dev); +#else + cnt = dev->mc_count; +#endif + netif_addr_unlock_bh(dev); + + /* Determine initial value of allmulti flag */ + allmulti = (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE; + + /* Send down the multicast list first. */ + buflen = sizeof("mcast_list") + sizeof(cnt) + (cnt * ETHER_ADDR_LEN); + if (!(bufp = buf = MALLOC(dhd->pub.osh, buflen))) { + DHD_ERROR(("%s: out of memory for mcast_list, cnt %d\n", + dhd_ifname(&dhd->pub, ifidx), cnt)); + return; + } + + strcpy(bufp, "mcast_list"); + bufp += strlen("mcast_list") + 1; + + cnt = htol32(cnt); + memcpy(bufp, &cnt, sizeof(cnt)); + bufp += sizeof(cnt); + + netif_addr_lock_bh(dev); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35) + netdev_for_each_mc_addr(ha, dev) { + if (!cnt) + break; + memcpy(bufp, ha->addr, ETHER_ADDR_LEN); + bufp += ETHER_ADDR_LEN; + cnt--; + } +#else + for (mclist = dev->mc_list;(mclist && (cnt > 0)); cnt--, mclist = mclist->next) { + memcpy(bufp, (void *)mclist->dmi_addr, ETHER_ADDR_LEN); + bufp += ETHER_ADDR_LEN; + } +#endif + netif_addr_unlock_bh(dev); + + memset(&ioc, 0, sizeof(ioc)); + ioc.cmd = WLC_SET_VAR; + ioc.buf = buf; + ioc.len = buflen; + ioc.set = TRUE; + + ret = dhd_prot_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len); + if (ret < 0) { + DHD_ERROR(("%s: set mcast_list failed, cnt %d\n", + dhd_ifname(&dhd->pub, ifidx), cnt)); + allmulti = cnt ? TRUE : allmulti; + } + + MFREE(dhd->pub.osh, buf, buflen); + + /* Now send the allmulti setting. This is based on the setting in the + * net_device flags, but might be modified above to be turned on if we + * were trying to set some addresses and dongle rejected it... + */ + + buflen = sizeof("allmulti") + sizeof(allmulti); + if (!(buf = MALLOC(dhd->pub.osh, buflen))) { + DHD_ERROR(("%s: out of memory for allmulti\n", dhd_ifname(&dhd->pub, ifidx))); + return; + } + allmulti = htol32(allmulti); + + if (!bcm_mkiovar("allmulti", (void*)&allmulti, sizeof(allmulti), buf, buflen)) { + DHD_ERROR(("%s: mkiovar failed for allmulti, datalen %d buflen %u\n", + dhd_ifname(&dhd->pub, ifidx), (int)sizeof(allmulti), buflen)); + MFREE(dhd->pub.osh, buf, buflen); + return; + } + + + memset(&ioc, 0, sizeof(ioc)); + ioc.cmd = WLC_SET_VAR; + ioc.buf = buf; + ioc.len = buflen; + ioc.set = TRUE; + + ret = dhd_prot_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len); + if (ret < 0) { + DHD_ERROR(("%s: set allmulti %d failed\n", + dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti))); + } + + MFREE(dhd->pub.osh, buf, buflen); + + /* Finally, pick up the PROMISC flag as well, like the NIC driver does */ + + allmulti = (dev->flags & IFF_PROMISC) ? TRUE : FALSE; + allmulti = htol32(allmulti); + + memset(&ioc, 0, sizeof(ioc)); + ioc.cmd = WLC_SET_PROMISC; + ioc.buf = &allmulti; + ioc.len = sizeof(allmulti); + ioc.set = TRUE; + + ret = dhd_prot_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len); + if (ret < 0) { + DHD_ERROR(("%s: set promisc %d failed\n", + dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti))); + } +} + +static int +_dhd_set_mac_address(dhd_info_t *dhd, int ifidx, struct ether_addr *addr) +{ + char buf[32]; + wl_ioctl_t ioc; + int ret; + + DHD_TRACE(("%s enter\n", __FUNCTION__)); + if (!bcm_mkiovar("cur_etheraddr", (char*)addr, ETHER_ADDR_LEN, buf, 32)) { + DHD_ERROR(("%s: mkiovar failed for cur_etheraddr\n", dhd_ifname(&dhd->pub, ifidx))); + return -1; + } + memset(&ioc, 0, sizeof(ioc)); + ioc.cmd = WLC_SET_VAR; + ioc.buf = buf; + ioc.len = 32; + ioc.set = TRUE; + + ret = dhd_prot_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len); + if (ret < 0) { + DHD_ERROR(("%s: set cur_etheraddr failed\n", dhd_ifname(&dhd->pub, ifidx))); + } else { + memcpy(dhd->iflist[ifidx]->net->dev_addr, addr, ETHER_ADDR_LEN); + } + + return ret; +} + +#ifdef SOFTAP +extern struct net_device *ap_net_dev; +#endif + +static void +dhd_op_if(dhd_if_t *ifp) +{ + dhd_info_t *dhd; + int ret = 0, err = 0; + + ASSERT(ifp && ifp->info && ifp->idx); /* Virtual interfaces only */ + + dhd = ifp->info; + + DHD_TRACE(("%s: idx %d, state %d\n", __FUNCTION__, ifp->idx, ifp->state)); + + switch (ifp->state) { + case WLC_E_IF_ADD: + /* + * Delete the existing interface before overwriting it + * in case we missed the WLC_E_IF_DEL event. + */ + if (ifp->net != NULL) { + DHD_ERROR(("%s: ERROR: netdev:%s already exists, try free & unregister \n", + __FUNCTION__, ifp->net->name)); + netif_stop_queue(ifp->net); + unregister_netdev(ifp->net); + free_netdev(ifp->net); + } + /* Allocate etherdev, including space for private structure */ + if (!(ifp->net = alloc_etherdev(sizeof(dhd)))) { + DHD_ERROR(("%s: OOM - alloc_etherdev\n", __FUNCTION__)); + ret = -ENOMEM; + } + if (ret == 0) { + strcpy(ifp->net->name, ifp->name); + memcpy(netdev_priv(ifp->net), &dhd, sizeof(dhd)); + if ((err = dhd_net_attach(&dhd->pub, ifp->idx)) != 0) { + DHD_ERROR(("%s: dhd_net_attach failed, err %d\n", + __FUNCTION__, err)); + ret = -EOPNOTSUPP; + } else { +#ifdef SOFTAP + /* semaphore that the soft AP CODE waits on */ + extern struct semaphore ap_eth_sema; + + /* save ptr to wl0.1 netdev for use in wl_iw.c */ + ap_net_dev = ifp->net; + /* signal to the SOFTAP 'sleeper' thread, wl0.1 is ready */ + up(&ap_eth_sema); +#endif + DHD_TRACE(("\n ==== pid:%x, net_device for if:%s created ===\n\n", + current->pid, ifp->net->name)); + ifp->state = 0; + } + } + break; + case WLC_E_IF_DEL: + if (ifp->net != NULL) { + DHD_TRACE(("\n%s: got 'WLC_E_IF_DEL' state\n", __FUNCTION__)); + netif_stop_queue(ifp->net); + unregister_netdev(ifp->net); + ret = DHD_DEL_IF; /* Make sure the free_netdev() is called */ + } + break; + default: + DHD_ERROR(("%s: bad op %d\n", __FUNCTION__, ifp->state)); + ASSERT(!ifp->state); + break; + } + + if (ret < 0) { + if (ifp->net) { + free_netdev(ifp->net); + } + dhd->iflist[ifp->idx] = NULL; + MFREE(dhd->pub.osh, ifp, sizeof(*ifp)); +#ifdef SOFTAP + if (ifp->net == ap_net_dev) + ap_net_dev = NULL; /* NULL SOFTAP global as well */ +#endif /* SOFTAP */ + } +} + +static int +_dhd_sysioc_thread(void *data) +{ + dhd_info_t *dhd = (dhd_info_t *)data; + int i; +#ifdef SOFTAP + bool in_ap = FALSE; +#endif + + DAEMONIZE("dhd_sysioc"); + + while (down_interruptible(&dhd->sysioc_sem) == 0) { + dhd_os_wake_lock(&dhd->pub); + for (i = 0; i < DHD_MAX_IFS; i++) { + if (dhd->iflist[i]) { +#ifdef SOFTAP + in_ap = (ap_net_dev != NULL); +#endif /* SOFTAP */ + if (dhd->iflist[i]->state) + dhd_op_if(dhd->iflist[i]); +#ifdef SOFTAP + if (dhd->iflist[i] == NULL) { + DHD_TRACE(("%s: interface %d just been removed!\n\n", __FUNCTION__, i)); + continue; + } + + if (in_ap && dhd->set_macaddress) { + DHD_TRACE(("attempt to set MAC for %s in AP Mode blocked.\n", dhd->iflist[i]->net->name)); + dhd->set_macaddress = FALSE; + continue; + } + + if (in_ap && dhd->set_multicast) { + DHD_TRACE(("attempt to set MULTICAST list for %s in AP Mode blocked.\n", dhd->iflist[i]->net->name)); + dhd->set_multicast = FALSE; + continue; + } +#endif /* SOFTAP */ + if (dhd->set_multicast) { + dhd->set_multicast = FALSE; + _dhd_set_multicast_list(dhd, i); + } + if (dhd->set_macaddress) { + dhd->set_macaddress = FALSE; + _dhd_set_mac_address(dhd, i, &dhd->macvalue); + } + } + } + dhd_os_wake_unlock(&dhd->pub); + } + complete_and_exit(&dhd->sysioc_exited, 0); +} + +static int +dhd_set_mac_address(struct net_device *dev, void *addr) +{ + int ret = 0; + + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + struct sockaddr *sa = (struct sockaddr *)addr; + int ifidx; + + ifidx = dhd_net2idx(dhd, dev); + if (ifidx == DHD_BAD_IF) + return -1; + + ASSERT(dhd->sysioc_pid >= 0); + memcpy(&dhd->macvalue, sa->sa_data, ETHER_ADDR_LEN); + dhd->set_macaddress = TRUE; + up(&dhd->sysioc_sem); + + return ret; +} + +static void +dhd_set_multicast_list(struct net_device *dev) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + int ifidx; + + ifidx = dhd_net2idx(dhd, dev); + if (ifidx == DHD_BAD_IF) + return; + + ASSERT(dhd->sysioc_pid >= 0); + dhd->set_multicast = TRUE; + up(&dhd->sysioc_sem); +} + +int +dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf) +{ + int ret; + dhd_info_t *dhd = (dhd_info_t *)(dhdp->info); + + /* Reject if down */ + if (!dhdp->up || (dhdp->busstate == DHD_BUS_DOWN)) { + return -ENODEV; + } + + /* Update multicast statistic */ + if (PKTLEN(dhdp->osh, pktbuf) >= ETHER_ADDR_LEN) { + uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf); + struct ether_header *eh = (struct ether_header *)pktdata; + + if (ETHER_ISMULTI(eh->ether_dhost)) + dhdp->tx_multicast++; + if (ntoh16(eh->ether_type) == ETHER_TYPE_802_1X) + atomic_inc(&dhd->pend_8021x_cnt); + } + + /* Look into the packet and update the packet priority */ + if ((PKTPRIO(pktbuf) == 0)) + pktsetprio(pktbuf, FALSE); + + /* If the protocol uses a data header, apply it */ + dhd_prot_hdrpush(dhdp, ifidx, pktbuf); + + /* Use bus module to send data frame */ +#ifdef BCMDBUS + ret = dbus_send_pkt(dhdp->dbus, pktbuf, NULL /* pktinfo */); +#else + ret = dhd_bus_txdata(dhdp->bus, pktbuf); +#endif /* BCMDBUS */ + + return ret; +} + +static int +dhd_start_xmit(struct sk_buff *skb, struct net_device *net) +{ + int ret; + void *pktbuf; + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net); + int ifidx; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + dhd_os_wake_lock(&dhd->pub); + + /* Reject if down */ + if (!dhd->pub.up || (dhd->pub.busstate == DHD_BUS_DOWN)) { + DHD_ERROR(("%s: xmit rejected pub.up=%d busstate=%d\n", + __FUNCTION__, dhd->pub.up, dhd->pub.busstate)); + netif_stop_queue(net); + /* Send Event when bus down detected during data session */ + if (dhd->pub.busstate == DHD_BUS_DOWN) { + DHD_ERROR(("%s: Event HANG send up\n", __FUNCTION__)); + net_os_send_hang_message(net); + } + dhd_os_wake_unlock(&dhd->pub); + return -ENODEV; + } + + ifidx = dhd_net2idx(dhd, net); + if (ifidx == DHD_BAD_IF) { + DHD_ERROR(("%s: bad ifidx %d\n", __FUNCTION__, ifidx)); + netif_stop_queue(net); + dhd_os_wake_unlock(&dhd->pub); + return -ENODEV; + } + + /* Make sure there's enough room for any header */ + if (skb_headroom(skb) < dhd->pub.hdrlen) { + struct sk_buff *skb2; + + DHD_INFO(("%s: insufficient headroom\n", + dhd_ifname(&dhd->pub, ifidx))); + dhd->pub.tx_realloc++; + skb2 = skb_realloc_headroom(skb, dhd->pub.hdrlen); + dev_kfree_skb(skb); + if ((skb = skb2) == NULL) { + DHD_ERROR(("%s: skb_realloc_headroom failed\n", + dhd_ifname(&dhd->pub, ifidx))); + ret = -ENOMEM; + goto done; + } + } + + /* Convert to packet */ + if (!(pktbuf = PKTFRMNATIVE(dhd->pub.osh, skb))) { + DHD_ERROR(("%s: PKTFRMNATIVE failed\n", + dhd_ifname(&dhd->pub, ifidx))); + dev_kfree_skb_any(skb); + ret = -ENOMEM; + goto done; + } + + ret = dhd_sendpkt(&dhd->pub, ifidx, pktbuf); + +done: + if (ret) + dhd->pub.dstats.tx_dropped++; + else + dhd->pub.tx_packets++; + + dhd_os_wake_unlock(&dhd->pub); + + /* Return ok: we always eat the packet */ + return 0; +} + +void +dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool state) +{ + struct net_device *net; + dhd_info_t *dhd = dhdp->info; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + dhdp->txoff = state; + ASSERT(dhd && dhd->iflist[ifidx]); + net = dhd->iflist[ifidx]->net; + if (state == ON) + netif_stop_queue(net); + else + netif_wake_queue(net); +} + +void +dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt) +{ + dhd_info_t *dhd = (dhd_info_t *)dhdp->info; + struct sk_buff *skb; + uchar *eth; + uint len; + void * data, *pnext, *save_pktbuf; + int i; + dhd_if_t *ifp; + wl_event_msg_t event; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + save_pktbuf = pktbuf; + + for (i = 0; pktbuf && i < numpkt; i++, pktbuf = pnext) { + + pnext = PKTNEXT(dhdp->osh, pktbuf); + PKTSETNEXT(wl->sh.osh, pktbuf, NULL); + + + skb = PKTTONATIVE(dhdp->osh, pktbuf); + + /* Get the protocol, maintain skb around eth_type_trans() + * The main reason for this hack is for the limitation of + * Linux 2.4 where 'eth_type_trans' uses the 'net->hard_header_len' + * to perform skb_pull inside vs ETH_HLEN. Since to avoid + * coping of the packet coming from the network stack to add + * BDC, Hardware header etc, during network interface registration + * we set the 'net->hard_header_len' to ETH_HLEN + extra space required + * for BDC, Hardware header etc. and not just the ETH_HLEN + */ + eth = skb->data; + len = skb->len; + + ifp = dhd->iflist[ifidx]; + if (ifp == NULL) + ifp = dhd->iflist[0]; + + ASSERT(ifp); + skb->dev = ifp->net; + skb->protocol = eth_type_trans(skb, skb->dev); + + if (skb->pkt_type == PACKET_MULTICAST) { + dhd->pub.rx_multicast++; + } + + skb->data = eth; + skb->len = len; + + /* Strip header, count, deliver upward */ + skb_pull(skb, ETH_HLEN); + + /* Process special event packets and then discard them */ + if (ntoh16(skb->protocol) == ETHER_TYPE_BRCM) + dhd_wl_host_event(dhd, &ifidx, +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) + skb->mac_header, +#else + skb->mac.raw, +#endif + &event, + &data); + + ASSERT(ifidx < DHD_MAX_IFS && dhd->iflist[ifidx]); + if (dhd->iflist[ifidx] && !dhd->iflist[ifidx]->state) + ifp = dhd->iflist[ifidx]; + + if (ifp->net) + ifp->net->last_rx = jiffies; + + dhdp->dstats.rx_bytes += skb->len; + dhdp->rx_packets++; /* Local count */ + + if (in_interrupt()) { + netif_rx(skb); + } else { + /* If the receive is not processed inside an ISR, + * the softirqd must be woken explicitly to service + * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled + * by netif_rx_ni(), but in earlier kernels, we need + * to do it manually. + */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) + netif_rx_ni(skb); +#else + ulong flags; + netif_rx(skb); + local_irq_save(flags); + RAISE_RX_SOFTIRQ(); + local_irq_restore(flags); +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */ + } + } + dhd_os_wake_lock_timeout_enable(dhdp); +} + +void +dhd_event(struct dhd_info *dhd, char *evpkt, int evlen, int ifidx) +{ + /* Linux version has nothing to do */ + return; +} + +void +dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success) +{ + uint ifidx; + dhd_info_t *dhd = (dhd_info_t *)(dhdp->info); + struct ether_header *eh; + uint16 type; + + dhd_prot_hdrpull(dhdp, &ifidx, txp); + + eh = (struct ether_header *)PKTDATA(dhdp->osh, txp); + type = ntoh16(eh->ether_type); + + if (type == ETHER_TYPE_802_1X) + atomic_dec(&dhd->pend_8021x_cnt); + +} + +static struct net_device_stats * +dhd_get_stats(struct net_device *net) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net); + dhd_if_t *ifp; + int ifidx; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + ifidx = dhd_net2idx(dhd, net); + if (ifidx == DHD_BAD_IF) + return NULL; + + ifp = dhd->iflist[ifidx]; + ASSERT(dhd && ifp); + + if (dhd->pub.up) { + /* Use the protocol to get dongle stats */ + dhd_prot_dstats(&dhd->pub); + } + + /* Copy dongle stats to net device stats */ + ifp->stats.rx_packets = dhd->pub.dstats.rx_packets; + ifp->stats.tx_packets = dhd->pub.dstats.tx_packets; + ifp->stats.rx_bytes = dhd->pub.dstats.rx_bytes; + ifp->stats.tx_bytes = dhd->pub.dstats.tx_bytes; + ifp->stats.rx_errors = dhd->pub.dstats.rx_errors; + ifp->stats.tx_errors = dhd->pub.dstats.tx_errors; + ifp->stats.rx_dropped = dhd->pub.dstats.rx_dropped; + ifp->stats.tx_dropped = dhd->pub.dstats.tx_dropped; + ifp->stats.multicast = dhd->pub.dstats.multicast; + + return &ifp->stats; +} + +static int +dhd_watchdog_thread(void *data) +{ + dhd_info_t *dhd = (dhd_info_t *)data; + + /* This thread doesn't need any user-level access, + * so get rid of all our resources + */ +#ifdef DHD_SCHED + if (dhd_watchdog_prio > 0) { + struct sched_param param; + param.sched_priority = (dhd_watchdog_prio < MAX_RT_PRIO)? + dhd_watchdog_prio:(MAX_RT_PRIO-1); + setScheduler(current, SCHED_FIFO, ¶m); + } +#endif /* DHD_SCHED */ + + DAEMONIZE("dhd_watchdog"); + + /* Run until signal received */ + while (1) { + if (down_interruptible (&dhd->watchdog_sem) == 0) { + + if (dhd->pub.dongle_reset == FALSE) { + /* Call the bus module watchdog */ + dhd_bus_watchdog(&dhd->pub); + } + /* Count the tick for reference */ + dhd->pub.tickcnt++; + + /* Reschedule the watchdog */ + if (dhd->wd_timer_valid) + mod_timer(&dhd->timer, jiffies + dhd_watchdog_ms * HZ / 1000); + + dhd_os_wake_unlock(&dhd->pub); + } + else + break; + } + + complete_and_exit(&dhd->watchdog_exited, 0); +} + +static void +dhd_watchdog(ulong data) +{ + dhd_info_t *dhd = (dhd_info_t *)data; + + dhd_os_wake_lock(&dhd->pub); + if (dhd->watchdog_pid >= 0) { + up(&dhd->watchdog_sem); + return; + } + + /* Call the bus module watchdog */ + dhd_bus_watchdog(&dhd->pub); + + /* Count the tick for reference */ + dhd->pub.tickcnt++; + + /* Reschedule the watchdog */ + if (dhd->wd_timer_valid) + mod_timer(&dhd->timer, jiffies + dhd_watchdog_ms * HZ / 1000); + dhd_os_wake_unlock(&dhd->pub); +} + +static int +dhd_dpc_thread(void *data) +{ + dhd_info_t *dhd = (dhd_info_t *)data; + + /* This thread doesn't need any user-level access, + * so get rid of all our resources + */ +#ifdef DHD_SCHED + if (dhd_dpc_prio > 0) + { + struct sched_param param; + param.sched_priority = (dhd_dpc_prio < MAX_RT_PRIO)?dhd_dpc_prio:(MAX_RT_PRIO-1); + setScheduler(current, SCHED_FIFO, ¶m); + } +#endif /* DHD_SCHED */ + + DAEMONIZE("dhd_dpc"); + + /* Run until signal received */ + while (1) { + if (down_interruptible(&dhd->dpc_sem) == 0) { + /* Call bus dpc unless it indicated down (then clean stop) */ + if (dhd->pub.busstate != DHD_BUS_DOWN) { + if (dhd_bus_dpc(dhd->pub.bus)) { + up(&dhd->dpc_sem); + } + else { + dhd_os_wake_unlock(&dhd->pub); + } + } else { + dhd_bus_stop(dhd->pub.bus, TRUE); + dhd_os_wake_unlock(&dhd->pub); + } + } + else + break; + } + + complete_and_exit(&dhd->dpc_exited, 0); +} + +static void +dhd_dpc(ulong data) +{ + dhd_info_t *dhd; + + dhd = (dhd_info_t *)data; + + /* Call bus dpc unless it indicated down (then clean stop) */ + if (dhd->pub.busstate != DHD_BUS_DOWN) { + if (dhd_bus_dpc(dhd->pub.bus)) + tasklet_schedule(&dhd->tasklet); + } else { + dhd_bus_stop(dhd->pub.bus, TRUE); + } +} + +void +dhd_sched_dpc(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd = (dhd_info_t *)dhdp->info; + + dhd_os_wake_lock(dhdp); + if (dhd->dpc_pid >= 0) { + up(&dhd->dpc_sem); + return; + } + + tasklet_schedule(&dhd->tasklet); +} + +#ifdef TOE +/* Retrieve current toe component enables, which are kept as a bitmap in toe_ol iovar */ +static int +dhd_toe_get(dhd_info_t *dhd, int ifidx, uint32 *toe_ol) +{ + wl_ioctl_t ioc; + char buf[32]; + int ret; + + memset(&ioc, 0, sizeof(ioc)); + + ioc.cmd = WLC_GET_VAR; + ioc.buf = buf; + ioc.len = (uint)sizeof(buf); + ioc.set = FALSE; + + strcpy(buf, "toe_ol"); + if ((ret = dhd_prot_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) { + /* Check for older dongle image that doesn't support toe_ol */ + if (ret == -EIO) { + DHD_ERROR(("%s: toe not supported by device\n", + dhd_ifname(&dhd->pub, ifidx))); + return -EOPNOTSUPP; + } + + DHD_INFO(("%s: could not get toe_ol: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret)); + return ret; + } + + memcpy(toe_ol, buf, sizeof(uint32)); + return 0; +} + +/* Set current toe component enables in toe_ol iovar, and set toe global enable iovar */ +static int +dhd_toe_set(dhd_info_t *dhd, int ifidx, uint32 toe_ol) +{ + wl_ioctl_t ioc; + char buf[32]; + int toe, ret; + + memset(&ioc, 0, sizeof(ioc)); + + ioc.cmd = WLC_SET_VAR; + ioc.buf = buf; + ioc.len = (uint)sizeof(buf); + ioc.set = TRUE; + + /* Set toe_ol as requested */ + + strcpy(buf, "toe_ol"); + memcpy(&buf[sizeof("toe_ol")], &toe_ol, sizeof(uint32)); + + if ((ret = dhd_prot_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) { + DHD_ERROR(("%s: could not set toe_ol: ret=%d\n", + dhd_ifname(&dhd->pub, ifidx), ret)); + return ret; + } + + /* Enable toe globally only if any components are enabled. */ + + toe = (toe_ol != 0); + + strcpy(buf, "toe"); + memcpy(&buf[sizeof("toe")], &toe, sizeof(uint32)); + + if ((ret = dhd_prot_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) { + DHD_ERROR(("%s: could not set toe: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret)); + return ret; + } + + return 0; +} +#endif /* TOE */ + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) +static void dhd_ethtool_get_drvinfo(struct net_device *net, + struct ethtool_drvinfo *info) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net); + + sprintf(info->driver, "wl"); + sprintf(info->version, "%lu", dhd->pub.drv_version); +} + +struct ethtool_ops dhd_ethtool_ops = { + .get_drvinfo = dhd_ethtool_get_drvinfo +}; +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */ + + +#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) +static int +dhd_ethtool(dhd_info_t *dhd, void *uaddr) +{ + struct ethtool_drvinfo info; + char drvname[sizeof(info.driver)]; + uint32 cmd; +#ifdef TOE + struct ethtool_value edata; + uint32 toe_cmpnt, csum_dir; + int ret; +#endif + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + /* all ethtool calls start with a cmd word */ + if (copy_from_user(&cmd, uaddr, sizeof (uint32))) + return -EFAULT; + + switch (cmd) { + case ETHTOOL_GDRVINFO: + /* Copy out any request driver name */ + if (copy_from_user(&info, uaddr, sizeof(info))) + return -EFAULT; + strncpy(drvname, info.driver, sizeof(info.driver)); + drvname[sizeof(info.driver)-1] = '\0'; + + /* clear struct for return */ + memset(&info, 0, sizeof(info)); + info.cmd = cmd; + + /* if dhd requested, identify ourselves */ + if (strcmp(drvname, "?dhd") == 0) { + sprintf(info.driver, "dhd"); + strcpy(info.version, EPI_VERSION_STR); + } + + /* otherwise, require dongle to be up */ + else if (!dhd->pub.up) { + DHD_ERROR(("%s: dongle is not up\n", __FUNCTION__)); + return -ENODEV; + } + + /* finally, report dongle driver type */ + else if (dhd->pub.iswl) + sprintf(info.driver, "wl"); + else + sprintf(info.driver, "xx"); + + sprintf(info.version, "%lu", dhd->pub.drv_version); + if (copy_to_user(uaddr, &info, sizeof(info))) + return -EFAULT; + DHD_CTL(("%s: given %*s, returning %s\n", __FUNCTION__, + (int)sizeof(drvname), drvname, info.driver)); + break; + +#ifdef TOE + /* Get toe offload components from dongle */ + case ETHTOOL_GRXCSUM: + case ETHTOOL_GTXCSUM: + if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0) + return ret; + + csum_dir = (cmd == ETHTOOL_GTXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL; + + edata.cmd = cmd; + edata.data = (toe_cmpnt & csum_dir) ? 1 : 0; + + if (copy_to_user(uaddr, &edata, sizeof(edata))) + return -EFAULT; + break; + + /* Set toe offload components in dongle */ + case ETHTOOL_SRXCSUM: + case ETHTOOL_STXCSUM: + if (copy_from_user(&edata, uaddr, sizeof(edata))) + return -EFAULT; + + /* Read the current settings, update and write back */ + if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0) + return ret; + + csum_dir = (cmd == ETHTOOL_STXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL; + + if (edata.data != 0) + toe_cmpnt |= csum_dir; + else + toe_cmpnt &= ~csum_dir; + + if ((ret = dhd_toe_set(dhd, 0, toe_cmpnt)) < 0) + return ret; + + /* If setting TX checksum mode, tell Linux the new mode */ + if (cmd == ETHTOOL_STXCSUM) { + if (edata.data) + dhd->iflist[0]->net->features |= NETIF_F_IP_CSUM; + else + dhd->iflist[0]->net->features &= ~NETIF_F_IP_CSUM; + } + + break; +#endif /* TOE */ + + default: + return -EOPNOTSUPP; + } + + return 0; +} +#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */ + +static int +dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net); + dhd_ioctl_t ioc; + int bcmerror = 0; + int buflen = 0; + void *buf = NULL; + uint driver = 0; + int ifidx; + bool is_set_key_cmd; + int ret; + + dhd_os_wake_lock(&dhd->pub); + + ifidx = dhd_net2idx(dhd, net); + DHD_TRACE(("%s: ifidx %d, cmd 0x%04x\n", __FUNCTION__, ifidx, cmd)); + + if (ifidx == DHD_BAD_IF) { + dhd_os_wake_unlock(&dhd->pub); + return -1; + } + +#if defined(CONFIG_WIRELESS_EXT) + /* linux wireless extensions */ + if ((cmd >= SIOCIWFIRST) && (cmd <= SIOCIWLAST)) { + /* may recurse, do NOT lock */ + ret = wl_iw_ioctl(net, ifr, cmd); + dhd_os_wake_unlock(&dhd->pub); + return ret; + } +#endif /* defined(CONFIG_WIRELESS_EXT) */ + +#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) + if (cmd == SIOCETHTOOL) { + ret = dhd_ethtool(dhd, (void*)ifr->ifr_data); + dhd_os_wake_unlock(&dhd->pub); + return ret; + } +#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */ + + if (cmd != SIOCDEVPRIVATE) { + dhd_os_wake_unlock(&dhd->pub); + return -EOPNOTSUPP; + } + + memset(&ioc, 0, sizeof(ioc)); + + /* Copy the ioc control structure part of ioctl request */ + if (copy_from_user(&ioc, ifr->ifr_data, sizeof(wl_ioctl_t))) { + bcmerror = -BCME_BADADDR; + goto done; + } + + /* Copy out any buffer passed */ + if (ioc.buf) { + buflen = MIN(ioc.len, DHD_IOCTL_MAXLEN); + /* optimization for direct ioctl calls from kernel */ + /* + if (segment_eq(get_fs(), KERNEL_DS)) { + buf = ioc.buf; + } else { + */ + { + if (!(buf = (char*)MALLOC(dhd->pub.osh, buflen))) { + bcmerror = -BCME_NOMEM; + goto done; + } + if (copy_from_user(buf, ioc.buf, buflen)) { + bcmerror = -BCME_BADADDR; + goto done; + } + } + } + + /* To differentiate between wl and dhd read 4 more byes */ + if ((copy_from_user(&driver, (char *)ifr->ifr_data + sizeof(wl_ioctl_t), + sizeof(uint)) != 0)) { + bcmerror = -BCME_BADADDR; + goto done; + } + + if (!capable(CAP_NET_ADMIN)) { + bcmerror = -BCME_EPERM; + goto done; + } + + /* check for local dhd ioctl and handle it */ + if (driver == DHD_IOCTL_MAGIC) { + bcmerror = dhd_ioctl((void *)&dhd->pub, &ioc, buf, buflen); + if (bcmerror) + dhd->pub.bcmerror = bcmerror; + goto done; + } + + /* send to dongle (must be up, and wl) */ + if (dhd->pub.busstate != DHD_BUS_DATA) { + DHD_ERROR(("%s DONGLE_DOWN\n", __FUNCTION__)); + bcmerror = BCME_DONGLE_DOWN; + goto done; + } + + if (!dhd->pub.iswl) { + bcmerror = BCME_DONGLE_DOWN; + goto done; + } + + /* Intercept WLC_SET_KEY IOCTL - serialize M4 send and set key IOCTL to + * prevent M4 encryption. + */ + is_set_key_cmd = ((ioc.cmd == WLC_SET_KEY) || + ((ioc.cmd == WLC_SET_VAR) && + !(strncmp("wsec_key", ioc.buf, 9))) || + ((ioc.cmd == WLC_SET_VAR) && + !(strncmp("bsscfg:wsec_key", ioc.buf, 15)))); + if (is_set_key_cmd) { + dhd_wait_pend8021x(net); + } + + bcmerror = dhd_prot_ioctl(&dhd->pub, ifidx, (wl_ioctl_t *)&ioc, buf, buflen); + +done: + if ((bcmerror == -ETIMEDOUT) || ((dhd->pub.busstate == DHD_BUS_DOWN) && + (!dhd->pub.dongle_reset))) { + DHD_ERROR(("%s: Event HANG send up\n", __FUNCTION__)); + net_os_send_hang_message(net); + } + + if (!bcmerror && buf && ioc.buf) { + if (copy_to_user(ioc.buf, buf, buflen)) + bcmerror = -EFAULT; + } + + if (buf) + MFREE(dhd->pub.osh, buf, buflen); + + dhd_os_wake_unlock(&dhd->pub); + + return OSL_ERROR(bcmerror); +} + +static int +dhd_stop(struct net_device *net) +{ +#if !defined(IGNORE_ETH0_DOWN) + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net); + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + if (dhd->pub.up == 0) { + return 0; + } + + /* Set state and stop OS transmissions */ + dhd->pub.up = 0; + netif_stop_queue(net); +#else + DHD_ERROR(("BYPASS %s:due to BRCM compilation : under investigation ...\n", __FUNCTION__)); +#endif /* !defined(IGNORE_ETH0_DOWN) */ + + OLD_MOD_DEC_USE_COUNT; + return 0; +} + +static int +dhd_open(struct net_device *net) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net); +#ifdef TOE + uint32 toe_ol; +#endif + int ifidx; + + wl_control_wl_start(net); /* start if needed */ + + ifidx = dhd_net2idx(dhd, net); + DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx)); + + if ((dhd->iflist[ifidx]) && (dhd->iflist[ifidx]->state == WLC_E_IF_DEL)) { + DHD_ERROR(("%s: Error: called when IF already deleted\n", __FUNCTION__)); + return -1; + } + + + if (ifidx == 0) { /* do it only for primary eth0 */ + + atomic_set(&dhd->pend_8021x_cnt, 0); + + memcpy(net->dev_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN); + +#ifdef TOE + /* Get current TOE mode from dongle */ + if (dhd_toe_get(dhd, ifidx, &toe_ol) >= 0 && (toe_ol & TOE_TX_CSUM_OL) != 0) + dhd->iflist[ifidx]->net->features |= NETIF_F_IP_CSUM; + else + dhd->iflist[ifidx]->net->features &= ~NETIF_F_IP_CSUM; +#endif + } + /* Allow transmit calls */ + netif_start_queue(net); + dhd->pub.up = 1; + + OLD_MOD_INC_USE_COUNT; + return 0; +} + +osl_t * +dhd_osl_attach(void *pdev, uint bustype) +{ + return osl_attach(pdev, bustype, TRUE); +} + +void +dhd_osl_detach(osl_t *osh) +{ + if (MALLOCED(osh)) { + DHD_ERROR(("%s: MEMORY LEAK %d bytes\n", __FUNCTION__, MALLOCED(osh))); + } + osl_detach(osh); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && 1 + up(&dhd_registration_sem); +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */ +} + +int +dhd_add_if(dhd_info_t *dhd, int ifidx, void *handle, char *name, + uint8 *mac_addr, uint32 flags, uint8 bssidx) +{ + dhd_if_t *ifp; + + DHD_TRACE(("%s: idx %d, handle->%p\n", __FUNCTION__, ifidx, handle)); + + ASSERT(dhd && (ifidx < DHD_MAX_IFS)); + + ifp = dhd->iflist[ifidx]; + if (!ifp && !(ifp = MALLOC(dhd->pub.osh, sizeof(dhd_if_t)))) { + DHD_ERROR(("%s: OOM - dhd_if_t\n", __FUNCTION__)); + return -ENOMEM; + } + + memset(ifp, 0, sizeof(dhd_if_t)); + ifp->info = dhd; + dhd->iflist[ifidx] = ifp; + strncpy(ifp->name, name, IFNAMSIZ); + ifp->name[IFNAMSIZ] = '\0'; + if (mac_addr != NULL) + memcpy(&ifp->mac_addr, mac_addr, ETHER_ADDR_LEN); + + if (handle == NULL) { + ifp->state = WLC_E_IF_ADD; + ifp->idx = ifidx; + ASSERT(dhd->sysioc_pid >= 0); + up(&dhd->sysioc_sem); + } else + ifp->net = (struct net_device *)handle; + + return 0; +} + +void +dhd_del_if(dhd_info_t *dhd, int ifidx) +{ + dhd_if_t *ifp; + + DHD_TRACE(("%s: idx %d\n", __FUNCTION__, ifidx)); + + ASSERT(dhd && ifidx && (ifidx < DHD_MAX_IFS)); + ifp = dhd->iflist[ifidx]; + if (!ifp) { + DHD_ERROR(("%s: Null interface\n", __FUNCTION__)); + return; + } + + ifp->state = WLC_E_IF_DEL; + ifp->idx = ifidx; + ASSERT(dhd->sysioc_pid >= 0); + up(&dhd->sysioc_sem); +} + +dhd_pub_t * +dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen) +{ + dhd_info_t *dhd = NULL; + struct net_device *net; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + /* updates firmware nvram path if it was provided as module paramters */ + if ((firmware_path != NULL) && (firmware_path[0] != '\0')) + strcpy(fw_path, firmware_path); + if ((nvram_path != NULL) && (nvram_path[0] != '\0')) + strcpy(nv_path, nvram_path); + + /* Allocate etherdev, including space for private structure */ + if (!(net = alloc_etherdev(sizeof(dhd)))) { + DHD_ERROR(("%s: OOM - alloc_etherdev\n", __FUNCTION__)); + goto fail; + } + + /* Allocate primary dhd_info */ + if (!(dhd = MALLOC(osh, sizeof(dhd_info_t)))) { + DHD_ERROR(("%s: OOM - alloc dhd_info\n", __FUNCTION__)); + goto fail; + } + + memset(dhd, 0, sizeof(dhd_info_t)); + + /* + * Save the dhd_info into the priv + */ + memcpy(netdev_priv(net), &dhd, sizeof(dhd)); + dhd->pub.osh = osh; + + /* Set network interface name if it was provided as module parameter */ + if (iface_name[0]) { + int len; + char ch; + strncpy(net->name, iface_name, IFNAMSIZ); + net->name[IFNAMSIZ - 1] = 0; + len = strlen(net->name); + ch = net->name[len - 1]; + if ((ch > '9' || ch < '0') && (len < IFNAMSIZ - 2)) + strcat(net->name, "%d"); + } + + if (dhd_add_if(dhd, 0, (void *)net, net->name, NULL, 0, 0) == DHD_BAD_IF) + goto fail; + +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 31)) + net->open = NULL; +#else + net->netdev_ops = NULL; +#endif + + init_MUTEX(&dhd->proto_sem); + /* Initialize other structure content */ + init_waitqueue_head(&dhd->ioctl_resp_wait); + init_waitqueue_head(&dhd->ctrl_wait); + + /* Initialize the spinlocks */ + spin_lock_init(&dhd->sdlock); + spin_lock_init(&dhd->txqlock); + + /* Initialize Wakelock stuff */ + spin_lock_init(&dhd->wl_lock); + dhd->wl_count = 0; + dhd->wl_packet = 0; +#ifdef CONFIG_HAS_WAKELOCK + wake_lock_init(&dhd->wl_wifi, WAKE_LOCK_SUSPEND, "wlan_wake"); + wake_lock_init(&dhd->wl_rxwake, WAKE_LOCK_SUSPEND, "wlan_rx_wake"); +#endif + + /* Link to info module */ + dhd->pub.info = dhd; + + /* Link to bus module */ + dhd->pub.bus = bus; + dhd->pub.hdrlen = bus_hdrlen; + + /* Attach and link in the protocol */ + if (dhd_prot_attach(&dhd->pub) != 0) { + DHD_ERROR(("dhd_prot_attach failed\n")); + goto fail; + } +#if defined(CONFIG_WIRELESS_EXT) + /* Attach and link in the iw */ + if (wl_iw_attach(net, (void *)&dhd->pub) != 0) { + DHD_ERROR(("wl_iw_attach failed\n")); + goto fail; + } +#endif /* defined(CONFIG_WIRELESS_EXT) */ + + /* Set up the watchdog timer */ + init_timer(&dhd->timer); + dhd->timer.data = (ulong)dhd; + dhd->timer.function = dhd_watchdog; + + /* Initialize thread based operation and lock */ + init_MUTEX(&dhd->sdsem); + if ((dhd_watchdog_prio >= 0) && (dhd_dpc_prio >= 0)) { + dhd->threads_only = TRUE; + } + else { + dhd->threads_only = FALSE; + } + + if (dhd_dpc_prio >= 0) { + /* Initialize watchdog thread */ + sema_init(&dhd->watchdog_sem, 0); + init_completion(&dhd->watchdog_exited); + dhd->watchdog_pid = kernel_thread(dhd_watchdog_thread, dhd, 0); + } else { + dhd->watchdog_pid = -1; + } + + /* Set up the bottom half handler */ + if (dhd_dpc_prio >= 0) { + /* Initialize DPC thread */ + sema_init(&dhd->dpc_sem, 0); + init_completion(&dhd->dpc_exited); + dhd->dpc_pid = kernel_thread(dhd_dpc_thread, dhd, 0); + } else { + tasklet_init(&dhd->tasklet, dhd_dpc, (ulong)dhd); + dhd->dpc_pid = -1; + } + + if (dhd_sysioc) { + sema_init(&dhd->sysioc_sem, 0); + init_completion(&dhd->sysioc_exited); + dhd->sysioc_pid = kernel_thread(_dhd_sysioc_thread, dhd, 0); + } else { + dhd->sysioc_pid = -1; + } + + /* + * Save the dhd_info into the priv + */ + memcpy(netdev_priv(net), &dhd, sizeof(dhd)); + +#if defined(CUSTOMER_HW2) && defined(CONFIG_WIFI_CONTROL_FUNC) + g_bus = bus; +#endif +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) + register_pm_notifier(&dhd_sleep_pm_notifier); +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */ + +#ifdef CONFIG_HAS_EARLYSUSPEND + dhd->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 20; + dhd->early_suspend.suspend = dhd_early_suspend; + dhd->early_suspend.resume = dhd_late_resume; + register_early_suspend(&dhd->early_suspend); +#endif + + return &dhd->pub; + +fail: + if (net) + free_netdev(net); + if (dhd) + dhd_detach(&dhd->pub); + + return NULL; +} + + +int +dhd_bus_start(dhd_pub_t *dhdp) +{ + int ret = -1; + dhd_info_t *dhd = (dhd_info_t*)dhdp->info; +#ifdef EMBEDDED_PLATFORM + char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */ +#endif /* EMBEDDED_PLATFORM */ + + ASSERT(dhd); + + DHD_TRACE(("%s: \n", __FUNCTION__)); + + /* try to download image and nvram to the dongle */ + if (dhd->pub.busstate == DHD_BUS_DOWN) { + if (!(dhd_bus_download_firmware(dhd->pub.bus, dhd->pub.osh, + fw_path, nv_path))) { + DHD_ERROR(("%s: dhdsdio_probe_download failed. firmware = %s nvram = %s\n", + __FUNCTION__, fw_path, nv_path)); + return -1; + } + } + + /* Start the watchdog timer */ + dhd->pub.tickcnt = 0; + dhd_os_wd_timer(&dhd->pub, dhd_watchdog_ms); + + /* Bring up the bus */ + if ((ret = dhd_bus_init(&dhd->pub, TRUE)) != 0) { + DHD_ERROR(("%s, dhd_bus_init failed %d\n", __FUNCTION__, ret)); + return ret; + } +#if defined(OOB_INTR_ONLY) + /* Host registration for OOB interrupt */ + if (bcmsdh_register_oob_intr(dhdp)) { + del_timer_sync(&dhd->timer); + dhd->wd_timer_valid = FALSE; + DHD_ERROR(("%s Host failed to resgister for OOB\n", __FUNCTION__)); + return -ENODEV; + } + + /* Enable oob at firmware */ + dhd_enable_oob_intr(dhd->pub.bus, TRUE); +#endif /* defined(OOB_INTR_ONLY) */ + + /* If bus is not ready, can't come up */ + if (dhd->pub.busstate != DHD_BUS_DATA) { + del_timer_sync(&dhd->timer); + dhd->wd_timer_valid = FALSE; + DHD_ERROR(("%s failed bus is not ready\n", __FUNCTION__)); + return -ENODEV; + } + +#ifdef EMBEDDED_PLATFORM + bcm_mkiovar("event_msgs", dhdp->eventmask, WL_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf)); + dhdcdc_query_ioctl(dhdp, 0, WLC_GET_VAR, iovbuf, sizeof(iovbuf)); + bcopy(iovbuf, dhdp->eventmask, WL_EVENTING_MASK_LEN); + + setbit(dhdp->eventmask, WLC_E_SET_SSID); + setbit(dhdp->eventmask, WLC_E_PRUNE); + setbit(dhdp->eventmask, WLC_E_AUTH); + setbit(dhdp->eventmask, WLC_E_REASSOC); + setbit(dhdp->eventmask, WLC_E_REASSOC_IND); + setbit(dhdp->eventmask, WLC_E_DEAUTH_IND); + setbit(dhdp->eventmask, WLC_E_DISASSOC_IND); + setbit(dhdp->eventmask, WLC_E_DISASSOC); + setbit(dhdp->eventmask, WLC_E_JOIN); + setbit(dhdp->eventmask, WLC_E_ASSOC_IND); + setbit(dhdp->eventmask, WLC_E_PSK_SUP); + setbit(dhdp->eventmask, WLC_E_LINK); + setbit(dhdp->eventmask, WLC_E_NDIS_LINK); + setbit(dhdp->eventmask, WLC_E_MIC_ERROR); + setbit(dhdp->eventmask, WLC_E_PMKID_CACHE); + setbit(dhdp->eventmask, WLC_E_TXFAIL); + setbit(dhdp->eventmask, WLC_E_JOIN_START); + setbit(dhdp->eventmask, WLC_E_SCAN_COMPLETE); +#ifdef PNO_SUPPORT + setbit(dhdp->eventmask, WLC_E_PFN_NET_FOUND); +#endif /* PNO_SUPPORT */ + +/* enable dongle roaming event */ + setbit(dhdp->eventmask, WLC_E_ROAM); + + dhdp->pktfilter_count = 1; + /* Setup filter to allow only unicast */ + dhdp->pktfilter[0] = "100 0 0 0 0x01 0x00"; +#endif /* EMBEDDED_PLATFORM */ + + /* Bus is ready, do any protocol initialization */ + if ((ret = dhd_prot_init(&dhd->pub)) < 0) + return ret; + + return 0; +} + +int +dhd_iovar(dhd_pub_t *pub, int ifidx, char *name, char *cmd_buf, uint cmd_len, int set) +{ + char buf[strlen(name) + 1 + cmd_len]; + int len = sizeof(buf); + wl_ioctl_t ioc; + int ret; + + len = bcm_mkiovar(name, cmd_buf, cmd_len, buf, len); + + memset(&ioc, 0, sizeof(ioc)); + + ioc.cmd = set? WLC_SET_VAR : WLC_GET_VAR; + ioc.buf = buf; + ioc.len = len; + ioc.set = set; + + ret = dhd_prot_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len); + if (!set && ret >= 0) + memcpy(cmd_buf, buf, cmd_len); + + return ret; +} + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 31)) +static struct net_device_ops dhd_ops_pri = { + .ndo_open = dhd_open, + .ndo_stop = dhd_stop, + .ndo_get_stats = dhd_get_stats, + .ndo_do_ioctl = dhd_ioctl_entry, + .ndo_start_xmit = dhd_start_xmit, + .ndo_set_mac_address = dhd_set_mac_address, + .ndo_set_multicast_list = dhd_set_multicast_list, +}; + +static struct net_device_ops dhd_ops_virt = { + .ndo_get_stats = dhd_get_stats, + .ndo_do_ioctl = dhd_ioctl_entry, + .ndo_start_xmit = dhd_start_xmit, + .ndo_set_mac_address = dhd_set_mac_address, + .ndo_set_multicast_list = dhd_set_multicast_list, +}; +#endif + +int +dhd_net_attach(dhd_pub_t *dhdp, int ifidx) +{ + dhd_info_t *dhd = (dhd_info_t *)dhdp->info; + struct net_device *net; + uint8 temp_addr[ETHER_ADDR_LEN] = { 0x00, 0x90, 0x4c, 0x11, 0x22, 0x33 }; + + DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx)); + + ASSERT(dhd && dhd->iflist[ifidx]); + net = dhd->iflist[ifidx]->net; + + ASSERT(net); +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 31)) + ASSERT(!net->open); + net->get_stats = dhd_get_stats; + net->do_ioctl = dhd_ioctl_entry; + net->hard_start_xmit = dhd_start_xmit; + net->set_mac_address = dhd_set_mac_address; + net->set_multicast_list = dhd_set_multicast_list; + net->open = net->stop = NULL; +#else + ASSERT(!net->netdev_ops); + net->netdev_ops = &dhd_ops_virt; +#endif + +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 31)) + net->open = dhd_open; + net->stop = dhd_stop; +#else + net->netdev_ops = &dhd_ops_pri; +#endif + + /* + * We have to use the primary MAC for virtual interfaces + */ + if (ifidx != 0) { + /* for virtual interfaces use the primary MAC */ + memcpy(temp_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN); + } + + if (ifidx == 1) { + DHD_TRACE(("%s ACCESS POINT MAC: \n", __FUNCTION__)); + /* ACCESSPOINT INTERFACE CASE */ + temp_addr[0] |= 0x02; /* set bit 2 , - Locally Administered address */ + } + net->hard_header_len = ETH_HLEN + dhd->pub.hdrlen; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) + net->ethtool_ops = &dhd_ethtool_ops; +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */ + +#if defined(CONFIG_WIRELESS_EXT) +#if WIRELESS_EXT < 19 + net->get_wireless_stats = dhd_get_wireless_stats; +#endif /* WIRELESS_EXT < 19 */ +#if WIRELESS_EXT > 12 + net->wireless_handlers = (struct iw_handler_def *)&wl_iw_handler_def; +#endif /* WIRELESS_EXT > 12 */ +#endif /* defined(CONFIG_WIRELESS_EXT) */ + + dhd->pub.rxsz = net->mtu + net->hard_header_len + dhd->pub.hdrlen; + + memcpy(net->dev_addr, temp_addr, ETHER_ADDR_LEN); + + if (register_netdev(net) != 0) { + DHD_ERROR(("%s: couldn't register the net device\n", __FUNCTION__)); + goto fail; + } + + printf("%s: Broadcom Dongle Host Driver mac=%.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n", net->name, + dhd->pub.mac.octet[0], dhd->pub.mac.octet[1], dhd->pub.mac.octet[2], + dhd->pub.mac.octet[3], dhd->pub.mac.octet[4], dhd->pub.mac.octet[5]); + +#if defined(CONFIG_WIRELESS_EXT) +#ifdef SOFTAP + if (ifidx == 0) + /* Don't call for SOFTAP Interface in SOFTAP MODE */ + wl_iw_iscan_set_scan_broadcast_prep(net, 1); +#else + wl_iw_iscan_set_scan_broadcast_prep(net, 1); +#endif /* SOFTAP */ +#endif /* CONFIG_WIRELESS_EXT */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + up(&dhd_registration_sem); +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */ + return 0; + +fail: +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 31)) + net->open = NULL; +#else + net->netdev_ops = NULL; +#endif + return BCME_ERROR; +} + +void +dhd_bus_detach(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (dhdp) { + dhd = (dhd_info_t *)dhdp->info; + if (dhd) { + /* Stop the protocol module */ + dhd_prot_stop(&dhd->pub); + + /* Stop the bus module */ + dhd_bus_stop(dhd->pub.bus, TRUE); +#if defined(OOB_INTR_ONLY) + bcmsdh_unregister_oob_intr(); +#endif /* defined(OOB_INTR_ONLY) */ + + /* Clear the watchdog timer */ + del_timer_sync(&dhd->timer); + dhd->wd_timer_valid = FALSE; + } + } +} + +void +dhd_detach(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (dhdp) { + dhd = (dhd_info_t *)dhdp->info; + if (dhd) { + dhd_if_t *ifp; + int i; + +#if defined(CONFIG_HAS_EARLYSUSPEND) + if (dhd->early_suspend.suspend) + unregister_early_suspend(&dhd->early_suspend); +#endif /* defined(CONFIG_HAS_EARLYSUSPEND) */ +#if defined(CONFIG_WIRELESS_EXT) + /* Attach and link in the iw */ + wl_iw_detach(); +#endif + + for (i = 1; i < DHD_MAX_IFS; i++) + if (dhd->iflist[i]) + dhd_del_if(dhd, i); + + if (dhd->sysioc_pid >= 0) { + KILL_PROC(dhd->sysioc_pid, SIGTERM); + wait_for_completion(&dhd->sysioc_exited); + } + + ifp = dhd->iflist[0]; + ASSERT(ifp); +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 31)) + if (ifp->net->open) { +#else + if (ifp->net->netdev_ops == &dhd_ops_pri) { +#endif + dhd_stop(ifp->net); + unregister_netdev(ifp->net); + } + + if (dhd->watchdog_pid >= 0) + { + KILL_PROC(dhd->watchdog_pid, SIGTERM); + wait_for_completion(&dhd->watchdog_exited); + } + + if (dhd->dpc_pid >= 0) + { + KILL_PROC(dhd->dpc_pid, SIGTERM); + wait_for_completion(&dhd->dpc_exited); + } + else + tasklet_kill(&dhd->tasklet); + + dhd_bus_detach(dhdp); + + if (dhdp->prot) + dhd_prot_detach(dhdp); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) + unregister_pm_notifier(&dhd_sleep_pm_notifier); +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */ + free_netdev(ifp->net); +#ifdef CONFIG_HAS_WAKELOCK + wake_lock_destroy(&dhd->wl_wifi); + wake_lock_destroy(&dhd->wl_rxwake); +#endif + MFREE(dhd->pub.osh, ifp, sizeof(*ifp)); + MFREE(dhd->pub.osh, dhd, sizeof(*dhd)); + } + } +} + +static void __exit +dhd_module_cleanup(void) +{ + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + dhd_bus_unregister(); +#if defined(CUSTOMER_HW2) && defined(CONFIG_WIFI_CONTROL_FUNC) + wifi_del_dev(); +#endif + /* Call customer gpio to turn off power with WL_REG_ON signal */ + dhd_customer_gpio_wlan_ctrl(WLAN_POWER_OFF); +} + +static int __init +dhd_module_init(void) +{ + int error; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + /* Sanity check on the module parameters */ + do { + /* Both watchdog and DPC as tasklets are ok */ + if ((dhd_watchdog_prio < 0) && (dhd_dpc_prio < 0)) + break; + + /* If both watchdog and DPC are threads, TX must be deferred */ + if ((dhd_watchdog_prio >= 0) && (dhd_dpc_prio >= 0) && dhd_deferred_tx) + break; + + DHD_ERROR(("Invalid module parameters.\n")); + return -EINVAL; + } while (0); + + /* Call customer gpio to turn on power with WL_REG_ON signal */ + dhd_customer_gpio_wlan_ctrl(WLAN_POWER_ON); + +#if defined(CUSTOMER_HW2) && defined(CONFIG_WIFI_CONTROL_FUNC) + sema_init(&wifi_control_sem, 0); + + error = wifi_add_dev(); + if (error) { + DHD_ERROR(("%s: platform_driver_register failed\n", __FUNCTION__)); + goto fail_0; + } + + /* Waiting callback after platform_driver_register is done or exit with error */ + if (down_timeout(&wifi_control_sem, msecs_to_jiffies(5000)) != 0) { + error = -EINVAL; + DHD_ERROR(("%s: platform_driver_register timeout\n", __FUNCTION__)); + goto fail_1; + } +#endif /* #if defined(CUSTOMER_HW2) && defined(CONFIG_WIFI_CONTROL_FUNC) */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + sema_init(&dhd_registration_sem, 0); +#endif + + error = dhd_bus_register(); + + if (!error) + printf("\n%s\n", dhd_version); + else { + DHD_ERROR(("%s: sdio_register_driver failed\n", __FUNCTION__)); + goto fail_1; + } +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + /* + * Wait till MMC sdio_register_driver callback called and made driver attach. + * It's needed to make sync up exit from dhd insmod and + * Kernel MMC sdio device callback registration + */ + if (down_timeout(&dhd_registration_sem, msecs_to_jiffies(DHD_REGISTRATION_TIMEOUT)) != 0) { + error = -EINVAL; + DHD_ERROR(("%s: sdio_register_driver timeout\n", __FUNCTION__)); + goto fail_2; + } +#endif + return error; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) +fail_2: + dhd_bus_unregister(); +#endif +fail_1: +#if defined(CUSTOMER_HW2) && defined(CONFIG_WIFI_CONTROL_FUNC) + wifi_del_dev(); +fail_0: +#endif /* defined(CUSTOMER_HW2) && defined(CONFIG_WIFI_CONTROL_FUNC) */ + + /* Call customer gpio to turn off power with WL_REG_ON signal */ + dhd_customer_gpio_wlan_ctrl(WLAN_POWER_OFF); + + return error; +} + +module_init(dhd_module_init); +module_exit(dhd_module_cleanup); + +/* + * OS specific functions required to implement DHD driver in OS independent way + */ +int +dhd_os_proto_block(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + + if (dhd) { + down(&dhd->proto_sem); + return 1; + } + + return 0; +} + +int +dhd_os_proto_unblock(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + + if (dhd) { + up(&dhd->proto_sem); + return 1; + } + + return 0; +} + +unsigned int +dhd_os_get_ioctl_resp_timeout(void) +{ + return ((unsigned int)dhd_ioctl_timeout_msec); +} + +void +dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec) +{ + dhd_ioctl_timeout_msec = (int)timeout_msec; +} + +int +dhd_os_ioctl_resp_wait(dhd_pub_t *pub, uint *condition, bool *pending) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + DECLARE_WAITQUEUE(wait, current); + int timeout = dhd_ioctl_timeout_msec; + + /* Convert timeout in millsecond to jiffies */ + /* timeout = timeout * HZ / 1000; */ + timeout = msecs_to_jiffies(timeout); + + /* Wait until control frame is available */ + add_wait_queue(&dhd->ioctl_resp_wait, &wait); + set_current_state(TASK_INTERRUPTIBLE); + smp_mb(); + while (!(*condition) && (!signal_pending(current) && timeout)) { + timeout = schedule_timeout(timeout); + smp_mb(); + } + + if (signal_pending(current)) + *pending = TRUE; + + set_current_state(TASK_RUNNING); + remove_wait_queue(&dhd->ioctl_resp_wait, &wait); + + return timeout; +} + +int +dhd_os_ioctl_resp_wake(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + + if (waitqueue_active(&dhd->ioctl_resp_wait)) { + wake_up_interruptible(&dhd->ioctl_resp_wait); + } + + return 0; +} + +void +dhd_os_wd_timer(void *bus, uint wdtick) +{ + dhd_pub_t *pub = bus; + static uint save_dhd_watchdog_ms = 0; + dhd_info_t *dhd = (dhd_info_t *)pub->info; + + /* don't start the wd until fw is loaded */ + if (pub->busstate == DHD_BUS_DOWN) + return; + + /* Totally stop the timer */ + if (!wdtick && dhd->wd_timer_valid == TRUE) { + del_timer_sync(&dhd->timer); + dhd->wd_timer_valid = FALSE; + save_dhd_watchdog_ms = wdtick; + return; + } + + if (wdtick) { + dhd_watchdog_ms = (uint)wdtick; + + /* Re arm the timer, at last watchdog period */ + mod_timer(&dhd->timer, jiffies + dhd_watchdog_ms * HZ / 1000); + + dhd->wd_timer_valid = TRUE; + save_dhd_watchdog_ms = wdtick; + } +} + +void * +dhd_os_open_image(char *filename) +{ + struct file *fp; + + fp = filp_open(filename, O_RDONLY, 0); + /* + * 2.6.11 (FC4) supports filp_open() but later revs don't? + * Alternative: + * fp = open_namei(AT_FDCWD, filename, O_RD, 0); + * ??? + */ + if (IS_ERR(fp)) + fp = NULL; + + return fp; +} + +int +dhd_os_get_image_block(char *buf, int len, void *image) +{ + struct file *fp = (struct file *)image; + int rdlen; + + if (!image) + return 0; + + rdlen = kernel_read(fp, fp->f_pos, buf, len); + if (rdlen > 0) + fp->f_pos += rdlen; + + return rdlen; +} + +void +dhd_os_close_image(void *image) +{ + if (image) + filp_close((struct file *)image, NULL); +} + + +void +dhd_os_sdlock(dhd_pub_t *pub) +{ + dhd_info_t *dhd; + + dhd = (dhd_info_t *)(pub->info); + + if (dhd->threads_only) + down(&dhd->sdsem); + else + spin_lock_bh(&dhd->sdlock); +} + +void +dhd_os_sdunlock(dhd_pub_t *pub) +{ + dhd_info_t *dhd; + + dhd = (dhd_info_t *)(pub->info); + + if (dhd->threads_only) + up(&dhd->sdsem); + else + spin_unlock_bh(&dhd->sdlock); +} + +void +dhd_os_sdlock_txq(dhd_pub_t *pub) +{ + dhd_info_t *dhd; + + dhd = (dhd_info_t *)(pub->info); + spin_lock_bh(&dhd->txqlock); +} + +void +dhd_os_sdunlock_txq(dhd_pub_t *pub) +{ + dhd_info_t *dhd; + + dhd = (dhd_info_t *)(pub->info); + spin_unlock_bh(&dhd->txqlock); +} +void +dhd_os_sdlock_rxq(dhd_pub_t *pub) +{ +} +void +dhd_os_sdunlock_rxq(dhd_pub_t *pub) +{ +} + +void +dhd_os_sdtxlock(dhd_pub_t *pub) +{ + dhd_os_sdlock(pub); +} + +void +dhd_os_sdtxunlock(dhd_pub_t *pub) +{ + dhd_os_sdunlock(pub); +} + +#ifdef DHD_USE_STATIC_BUF +void * dhd_os_prealloc(int section, unsigned long size) +{ +#if defined(CUSTOMER_HW2) && defined(CONFIG_WIFI_CONTROL_FUNC) + void *alloc_ptr = NULL; + if (wifi_control_data && wifi_control_data->mem_prealloc) + { + alloc_ptr = wifi_control_data->mem_prealloc(section, size); + if (alloc_ptr) + { + DHD_INFO(("success alloc section %d\n", section)); + bzero(alloc_ptr, size); + return alloc_ptr; + } + } + + DHD_ERROR(("can't alloc section %d\n", section)); + return 0; +#else +return MALLOC(0, size); +#endif /* #if defined(CUSTOMER_HW2) && defined(CONFIG_WIFI_CONTROL_FUNC) */ +} +#endif /* DHD_USE_STATIC_BUF */ +#if defined(CONFIG_WIRELESS_EXT) +struct iw_statistics * +dhd_get_wireless_stats(struct net_device *dev) +{ + int res = 0; + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + res = wl_iw_get_wireless_stats(dev, &dhd->iw.wstats); + + if (res == 0) + return &dhd->iw.wstats; + else + return NULL; +} +#endif /* defined(CONFIG_WIRELESS_EXT) */ + +static int +dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata, + wl_event_msg_t *event, void **data) +{ + int bcmerror = 0; + + ASSERT(dhd != NULL); + + bcmerror = wl_host_event(dhd, ifidx, pktdata, event, data); + if (bcmerror != BCME_OK) + return (bcmerror); + +#if defined(CONFIG_WIRELESS_EXT) + ASSERT(dhd->iflist[*ifidx] != NULL); + + if (ntoh32(event->event_type) == WLC_E_IF) { + DHD_INFO(("<0> interface:%d OP:%d don't pass to wext," + "net_device might not be created yet\n", + *ifidx, ntoh32(event->event_type))); + return bcmerror; + } + + ASSERT(dhd->iflist[*ifidx]->net != NULL); + + if (dhd->iflist[*ifidx]->net) + wl_iw_event(dhd->iflist[*ifidx]->net, event, *data); +#endif /* defined(CONFIG_WIRELESS_EXT) */ + + return (bcmerror); +} + +/* send up locally generated event */ +void +dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data) +{ + switch (ntoh32(event->event_type)) { + default: + break; + } +} + +void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) + struct dhd_info *dhdinfo = dhd->info; + dhd_os_sdunlock(dhd); + wait_event_interruptible_timeout(dhdinfo->ctrl_wait, (*lockvar == FALSE), HZ * 2); + dhd_os_sdlock(dhd); +#endif + return; +} + +void dhd_wait_event_wakeup(dhd_pub_t *dhd) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) + struct dhd_info *dhdinfo = dhd->info; + if (waitqueue_active(&dhdinfo->ctrl_wait)) + wake_up_interruptible(&dhdinfo->ctrl_wait); +#endif + return; +} + +int +dhd_dev_reset(struct net_device *dev, uint8 flag) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + /* Turning off watchdog */ + if (flag) + dhd_os_wd_timer(&dhd->pub, 0); + + dhd_bus_devreset(&dhd->pub, flag); + + /* Turning on watchdog back */ + if (!flag) + dhd_os_wd_timer(&dhd->pub, dhd_watchdog_ms); + DHD_ERROR(("%s: WLAN OFF DONE\n", __FUNCTION__)); + + return 1; +} + +int net_os_set_suspend_disable(struct net_device *dev, int val) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + int ret = 0; + + if (dhd) { + ret = dhd->pub.suspend_disable_flag; + dhd->pub.suspend_disable_flag = val; + } + return ret; +} + +int net_os_set_suspend(struct net_device *dev, int val) +{ + int ret = 0; +#if defined(CONFIG_HAS_EARLYSUSPEND) + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + if (dhd) { + dhd_os_proto_block(&dhd->pub); + ret = dhd_set_suspend(val, &dhd->pub); + dhd_os_proto_unblock(&dhd->pub); + } +#endif /* defined(CONFIG_HAS_EARLYSUSPEND) */ + return ret; +} + +int net_os_set_dtim_skip(struct net_device *dev, int val) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + if (dhd) + dhd->pub.dtim_skip = val; + + return 0; +} + +int net_os_set_packet_filter(struct net_device *dev, int val) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + int ret = 0; + + /* Packet filtering is set only if we still in early-suspend and + * we need either to turn it ON or turn it OFF + * We can always turn it OFF in case of early-suspend, but we turn it + * back ON only if suspend_disable_flag was not set + */ + if (dhd && dhd->pub.up) { + dhd_os_proto_block(&dhd->pub); + if (dhd->pub.in_suspend) { + if (!val || (val && !dhd->pub.suspend_disable_flag)) + dhd_set_packet_filter(val, &dhd->pub); + } + dhd_os_proto_unblock(&dhd->pub); + } + return ret; +} + + +void +dhd_dev_init_ioctl(struct net_device *dev) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + dhd_preinit_ioctls(&dhd->pub); +} + +#ifdef PNO_SUPPORT +/* Linux wrapper to call common dhd_pno_clean */ +int +dhd_dev_pno_reset(struct net_device *dev) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_pno_clean(&dhd->pub)); +} + + +/* Linux wrapper to call common dhd_pno_enable */ +int +dhd_dev_pno_enable(struct net_device *dev, int pfn_enabled) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_pno_enable(&dhd->pub, pfn_enabled)); +} + + +/* Linux wrapper to call common dhd_pno_set */ +int +dhd_dev_pno_set(struct net_device *dev, wlc_ssid_t* ssids_local, int nssid, ushort scan_fr) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_pno_set(&dhd->pub, ssids_local, nssid, scan_fr)); +} + +/* Linux wrapper to get pno status */ +int +dhd_dev_get_pno_status(struct net_device *dev) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_pno_get_status(&dhd->pub)); +} + +#endif /* PNO_SUPPORT */ + +static int +dhd_get_pend_8021x_cnt(dhd_info_t *dhd) +{ + return (atomic_read(&dhd->pend_8021x_cnt)); +} + +#define MAX_WAIT_FOR_8021X_TX 10 + +int +dhd_wait_pend8021x(struct net_device *dev) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + int timeout = 10 * HZ / 1000; + int ntimes = MAX_WAIT_FOR_8021X_TX; + int pend = dhd_get_pend_8021x_cnt(dhd); + + while (ntimes && pend) { + if (pend) { + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(timeout); + set_current_state(TASK_RUNNING); + ntimes--; + } + pend = dhd_get_pend_8021x_cnt(dhd); + } + return pend; +} + +#ifdef DHD_DEBUG +int +write_to_file(dhd_pub_t *dhd, uint8 *buf, int size) +{ + int ret = 0; + struct file *fp; + mm_segment_t old_fs; + loff_t pos = 0; + + /* change to KERNEL_DS address limit */ + old_fs = get_fs(); + set_fs(KERNEL_DS); + + /* open file to write */ + fp = filp_open("/tmp/mem_dump", O_WRONLY|O_CREAT, 0640); + if (!fp) { + printf("%s: open file error\n", __FUNCTION__); + ret = -1; + goto exit; + } + + /* Write buf to file */ + fp->f_op->write(fp, buf, size, &pos); + +exit: + /* free buf before return */ + MFREE(dhd->osh, buf, size); + /* close file before return */ + if (fp) + filp_close(fp, current->files); + /* restore previous address limit */ + set_fs(old_fs); + + return ret; +} +#endif /* DHD_DEBUG */ + +int dhd_os_wake_lock_timeout(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags; + int ret = 0; + + if (dhd) { + spin_lock_irqsave(&dhd->wl_lock, flags); + ret = dhd->wl_packet; +#ifdef CONFIG_HAS_WAKELOCK + if (dhd->wl_packet) + wake_lock_timeout(&dhd->wl_rxwake, HZ); +#endif + dhd->wl_packet = 0; + spin_unlock_irqrestore(&dhd->wl_lock, flags); + } + /* printk("%s: %d\n", __FUNCTION__, ret); */ + return ret; +} + +int net_os_wake_lock_timeout(struct net_device *dev) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + int ret = 0; + + if (dhd) + ret = dhd_os_wake_lock_timeout(&dhd->pub); + return ret; +} + +int dhd_os_wake_lock_timeout_enable(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags; + + if (dhd) { + spin_lock_irqsave(&dhd->wl_lock, flags); + dhd->wl_packet = 1; + spin_unlock_irqrestore(&dhd->wl_lock, flags); + } + /* printk("%s\n",__func__); */ + return 0; +} + +int net_os_wake_lock_timeout_enable(struct net_device *dev) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + int ret = 0; + + if (dhd) + ret = dhd_os_wake_lock_timeout_enable(&dhd->pub); + return ret; +} + +int dhd_os_wake_lock(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags; + int ret = 0; + + if (dhd) { + spin_lock_irqsave(&dhd->wl_lock, flags); +#ifdef CONFIG_HAS_WAKELOCK + if (!dhd->wl_count) + wake_lock(&dhd->wl_wifi); +#endif + dhd->wl_count++; + ret = dhd->wl_count; + spin_unlock_irqrestore(&dhd->wl_lock, flags); + } + /* printk("%s: %d\n", __FUNCTION__, ret); */ + return ret; +} + +int net_os_wake_lock(struct net_device *dev) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + int ret = 0; + + if (dhd) + ret = dhd_os_wake_lock(&dhd->pub); + return ret; +} + +int dhd_os_wake_unlock(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags; + int ret = 0; + + dhd_os_wake_lock_timeout(pub); + if (dhd) { + spin_lock_irqsave(&dhd->wl_lock, flags); + if (dhd->wl_count) { + dhd->wl_count--; +#ifdef CONFIG_HAS_WAKELOCK + if (!dhd->wl_count) + wake_unlock(&dhd->wl_wifi); +#endif + ret = dhd->wl_count; + } + spin_unlock_irqrestore(&dhd->wl_lock, flags); + } + /* printk("%s: %d\n", __FUNCTION__, ret); */ + return ret; +} + +int net_os_wake_unlock(struct net_device *dev) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + int ret = 0; + + if (dhd) + ret = dhd_os_wake_unlock(&dhd->pub); + return ret; +} + +int net_os_send_hang_message(struct net_device *dev) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + int ret = 0; + + if (dhd) { + if (!dhd->hang_was_sent) { + dhd->hang_was_sent = 1; + ret = wl_iw_send_priv_event(dev, "HANG"); + } + } + return ret; +} diff --git a/drivers/net/wireless/bcm4329/dhd_linux_sched.c b/drivers/net/wireless/bcm4329/dhd_linux_sched.c new file mode 100644 index 000000000000..480b416657ee --- /dev/null +++ b/drivers/net/wireless/bcm4329/dhd_linux_sched.c @@ -0,0 +1,38 @@ +/* + * Expose some of the kernel scheduler routines + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: dhd_linux_sched.c,v 1.1.34.1.6.1 2009/01/16 01:17:40 Exp $ + */ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/sched.h> +#include <linuxver.h> + +int setScheduler(struct task_struct *p, int policy, struct sched_param *param) +{ + int rc = 0; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) + rc = sched_setscheduler(p, policy, param); +#endif /* LinuxVer */ + return rc; +} diff --git a/drivers/net/wireless/bcm4329/dhd_proto.h b/drivers/net/wireless/bcm4329/dhd_proto.h new file mode 100644 index 000000000000..7ef6929a5bf7 --- /dev/null +++ b/drivers/net/wireless/bcm4329/dhd_proto.h @@ -0,0 +1,102 @@ +/* + * Header file describing the internal (inter-module) DHD interfaces. + * + * Provides type definitions and function prototypes used to link the + * DHD OS, bus, and protocol modules. + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: dhd_proto.h,v 1.2.82.1.4.1.16.7 2010/05/10 12:54:59 Exp $ + */ + +#ifndef _dhd_proto_h_ +#define _dhd_proto_h_ + +#include <dhdioctl.h> +#include <wlioctl.h> + +#ifndef IOCTL_RESP_TIMEOUT +#define IOCTL_RESP_TIMEOUT 3000 /* In milli second */ +#endif + +#ifndef IOCTL_CHIP_ACTIVE_TIMEOUT +#define IOCTL_CHIP_ACTIVE_TIMEOUT 10 /* In milli second */ +#endif + +/* + * Exported from the dhd protocol module (dhd_cdc, dhd_rndis) + */ + +/* Linkage, sets prot link and updates hdrlen in pub */ +extern int dhd_prot_attach(dhd_pub_t *dhdp); + +/* Unlink, frees allocated protocol memory (including dhd_prot) */ +extern void dhd_prot_detach(dhd_pub_t *dhdp); + +/* Initialize protocol: sync w/dongle state. + * Sets dongle media info (iswl, drv_version, mac address). + */ +extern int dhd_prot_init(dhd_pub_t *dhdp); + +/* Stop protocol: sync w/dongle state. */ +extern void dhd_prot_stop(dhd_pub_t *dhdp); + +extern bool dhd_proto_fcinfo(dhd_pub_t *dhd, void *pktbuf, uint8 *fcbits); + +/* Add any protocol-specific data header. + * Caller must reserve prot_hdrlen prepend space. + */ +extern void dhd_prot_hdrpush(dhd_pub_t *, int ifidx, void *txp); + +/* Remove any protocol-specific data header. */ +extern int dhd_prot_hdrpull(dhd_pub_t *, int *ifidx, void *rxp); + +/* Use protocol to issue ioctl to dongle */ +extern int dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int len); + +/* Check for and handle local prot-specific iovar commands */ +extern int dhd_prot_iovar_op(dhd_pub_t *dhdp, const char *name, + void *params, int plen, void *arg, int len, bool set); + +/* Add prot dump output to a buffer */ +extern void dhd_prot_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf); + +/* Update local copy of dongle statistics */ +extern void dhd_prot_dstats(dhd_pub_t *dhdp); + +extern int dhd_ioctl(dhd_pub_t * dhd_pub, dhd_ioctl_t *ioc, void * buf, uint buflen); + +extern int dhd_preinit_ioctls(dhd_pub_t *dhd); + +/******************************** + * For version-string expansion * + */ +#if defined(BDC) +#define DHD_PROTOCOL "bdc" +#elif defined(CDC) +#define DHD_PROTOCOL "cdc" +#elif defined(RNDIS) +#define DHD_PROTOCOL "rndis" +#else +#define DHD_PROTOCOL "unknown" +#endif /* proto */ + +#endif /* _dhd_proto_h_ */ diff --git a/drivers/net/wireless/bcm4329/dhd_sdio.c b/drivers/net/wireless/bcm4329/dhd_sdio.c new file mode 100644 index 000000000000..ff1bdedbc9f3 --- /dev/null +++ b/drivers/net/wireless/bcm4329/dhd_sdio.c @@ -0,0 +1,5876 @@ +/* + * DHD Bus Module for SDIO + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: dhd_sdio.c,v 1.157.2.27.2.33.2.129.4.1 2010/09/02 23:13:16 Exp $ + */ + +#include <typedefs.h> +#include <osl.h> +#include <bcmsdh.h> + +#ifdef BCMEMBEDIMAGE +#include BCMEMBEDIMAGE +#endif /* BCMEMBEDIMAGE */ + +#include <bcmdefs.h> +#include <bcmutils.h> +#include <bcmendian.h> +#include <bcmdevs.h> +#include <siutils.h> +#include <hndpmu.h> +#include <hndsoc.h> +#include <sbchipc.h> +#include <sbhnddma.h> +#include <sdio.h> +#include <sbsdio.h> +#include <sbsdpcmdev.h> +#include <bcmsdpcm.h> + +#include <proto/ethernet.h> +#include <proto/802.1d.h> +#include <proto/802.11.h> + +#include <dngl_stats.h> +#include <dhd.h> +#include <dhd_bus.h> +#include <dhd_proto.h> +#include <dhd_dbg.h> +#include <dhdioctl.h> +#include <sdiovar.h> + +#ifdef DHD_DEBUG +#include <hndrte_cons.h> +#endif /* DHD_DEBUG */ +#ifdef DHD_DEBUG_TRAP +#include <hndrte_armtrap.h> +#endif /* DHD_DEBUG_TRAP */ + +#define QLEN 256 /* bulk rx and tx queue lengths */ +#define FCHI (QLEN - 10) +#define FCLOW (FCHI / 2) +#define PRIOMASK 7 + +#define TXRETRIES 2 /* # of retries for tx frames */ + +#if defined(CONFIG_MACH_SANDGATE2G) +#define DHD_RXBOUND 250 /* Default for max rx frames in one scheduling */ +#else +#define DHD_RXBOUND 50 /* Default for max rx frames in one scheduling */ +#endif /* defined(CONFIG_MACH_SANDGATE2G) */ + +#define DHD_TXBOUND 20 /* Default for max tx frames in one scheduling */ + +#define DHD_TXMINMAX 1 /* Max tx frames if rx still pending */ + +#define MEMBLOCK 2048 /* Block size used for downloading of dongle image */ +#define MAX_DATA_BUF (32 * 1024) /* Must be large enough to hold biggest possible glom */ + +/* Packet alignment for most efficient SDIO (can change based on platform) */ +#ifndef DHD_SDALIGN +#define DHD_SDALIGN 32 +#endif +#if !ISPOWEROF2(DHD_SDALIGN) +#error DHD_SDALIGN is not a power of 2! +#endif + +#ifndef DHD_FIRSTREAD +#define DHD_FIRSTREAD 32 +#endif +#if !ISPOWEROF2(DHD_FIRSTREAD) +#error DHD_FIRSTREAD is not a power of 2! +#endif + +/* Total length of frame header for dongle protocol */ +#define SDPCM_HDRLEN (SDPCM_FRAMETAG_LEN + SDPCM_SWHEADER_LEN) +#ifdef SDTEST +#define SDPCM_RESERVE (SDPCM_HDRLEN + SDPCM_TEST_HDRLEN + DHD_SDALIGN) +#else +#define SDPCM_RESERVE (SDPCM_HDRLEN + DHD_SDALIGN) +#endif + +/* Space for header read, limit for data packets */ +#ifndef MAX_HDR_READ +#define MAX_HDR_READ 32 +#endif +#if !ISPOWEROF2(MAX_HDR_READ) +#error MAX_HDR_READ is not a power of 2! +#endif + +#define MAX_RX_DATASZ 2048 + +/* Maximum milliseconds to wait for F2 to come up */ +#define DHD_WAIT_F2RDY 3000 + +/* Bump up limit on waiting for HT to account for first startup; + * if the image is doing a CRC calculation before programming the PMU + * for HT availability, it could take a couple hundred ms more, so + * max out at a 1 second (1000000us). + */ +#if (PMU_MAX_TRANSITION_DLY < 1000000) +#undef PMU_MAX_TRANSITION_DLY +#define PMU_MAX_TRANSITION_DLY 1000000 +#endif + +/* Value for ChipClockCSR during initial setup */ +#define DHD_INIT_CLKCTL1 (SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ) +#define DHD_INIT_CLKCTL2 (SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP) + +/* Flags for SDH calls */ +#define F2SYNC (SDIO_REQ_4BYTE | SDIO_REQ_FIXED) + +/* Packet free applicable unconditionally for sdio and sdspi. Conditional if + * bufpool was present for gspi bus. + */ +#define PKTFREE2() if ((bus->bus != SPI_BUS) || bus->usebufpool) \ + PKTFREE(bus->dhd->osh, pkt, FALSE); +DHD_SPINWAIT_SLEEP_INIT(sdioh_spinwait_sleep); +extern int dhdcdc_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len); + +#ifdef DHD_DEBUG +/* Device console log buffer state */ +typedef struct dhd_console { + uint count; /* Poll interval msec counter */ + uint log_addr; /* Log struct address (fixed) */ + hndrte_log_t log; /* Log struct (host copy) */ + uint bufsize; /* Size of log buffer */ + uint8 *buf; /* Log buffer (host copy) */ + uint last; /* Last buffer read index */ +} dhd_console_t; +#endif /* DHD_DEBUG */ + +/* Private data for SDIO bus interaction */ +typedef struct dhd_bus { + dhd_pub_t *dhd; + + bcmsdh_info_t *sdh; /* Handle for BCMSDH calls */ + si_t *sih; /* Handle for SI calls */ + char *vars; /* Variables (from CIS and/or other) */ + uint varsz; /* Size of variables buffer */ + uint32 sbaddr; /* Current SB window pointer (-1, invalid) */ + + sdpcmd_regs_t *regs; /* Registers for SDIO core */ + uint sdpcmrev; /* SDIO core revision */ + uint armrev; /* CPU core revision */ + uint ramrev; /* SOCRAM core revision */ + uint32 ramsize; /* Size of RAM in SOCRAM (bytes) */ + uint32 orig_ramsize; /* Size of RAM in SOCRAM (bytes) */ + + uint32 bus; /* gSPI or SDIO bus */ + uint32 hostintmask; /* Copy of Host Interrupt Mask */ + uint32 intstatus; /* Intstatus bits (events) pending */ + bool dpc_sched; /* Indicates DPC schedule (intrpt rcvd) */ + bool fcstate; /* State of dongle flow-control */ + + uint16 cl_devid; /* cached devid for dhdsdio_probe_attach() */ + char *fw_path; /* module_param: path to firmware image */ + char *nv_path; /* module_param: path to nvram vars file */ + const char *nvram_params; /* user specified nvram params. */ + + uint blocksize; /* Block size of SDIO transfers */ + uint roundup; /* Max roundup limit */ + + struct pktq txq; /* Queue length used for flow-control */ + uint8 flowcontrol; /* per prio flow control bitmask */ + uint8 tx_seq; /* Transmit sequence number (next) */ + uint8 tx_max; /* Maximum transmit sequence allowed */ + + uint8 hdrbuf[MAX_HDR_READ + DHD_SDALIGN]; + uint8 *rxhdr; /* Header of current rx frame (in hdrbuf) */ + uint16 nextlen; /* Next Read Len from last header */ + uint8 rx_seq; /* Receive sequence number (expected) */ + bool rxskip; /* Skip receive (awaiting NAK ACK) */ + + void *glomd; /* Packet containing glomming descriptor */ + void *glom; /* Packet chain for glommed superframe */ + uint glomerr; /* Glom packet read errors */ + + uint8 *rxbuf; /* Buffer for receiving control packets */ + uint rxblen; /* Allocated length of rxbuf */ + uint8 *rxctl; /* Aligned pointer into rxbuf */ + uint8 *databuf; /* Buffer for receiving big glom packet */ + uint8 *dataptr; /* Aligned pointer into databuf */ + uint rxlen; /* Length of valid data in buffer */ + + uint8 sdpcm_ver; /* Bus protocol reported by dongle */ + + bool intr; /* Use interrupts */ + bool poll; /* Use polling */ + bool ipend; /* Device interrupt is pending */ + bool intdis; /* Interrupts disabled by isr */ + uint intrcount; /* Count of device interrupt callbacks */ + uint lastintrs; /* Count as of last watchdog timer */ + uint spurious; /* Count of spurious interrupts */ + uint pollrate; /* Ticks between device polls */ + uint polltick; /* Tick counter */ + uint pollcnt; /* Count of active polls */ + +#ifdef DHD_DEBUG + dhd_console_t console; /* Console output polling support */ + uint console_addr; /* Console address from shared struct */ +#endif /* DHD_DEBUG */ + + uint regfails; /* Count of R_REG/W_REG failures */ + + uint clkstate; /* State of sd and backplane clock(s) */ + bool activity; /* Activity flag for clock down */ + int32 idletime; /* Control for activity timeout */ + int32 idlecount; /* Activity timeout counter */ + int32 idleclock; /* How to set bus driver when idle */ + int32 sd_divisor; /* Speed control to bus driver */ + int32 sd_mode; /* Mode control to bus driver */ + int32 sd_rxchain; /* If bcmsdh api accepts PKT chains */ + bool use_rxchain; /* If dhd should use PKT chains */ + bool sleeping; /* Is SDIO bus sleeping? */ + bool rxflow_mode; /* Rx flow control mode */ + bool rxflow; /* Is rx flow control on */ + uint prev_rxlim_hit; /* Is prev rx limit exceeded (per dpc schedule) */ + bool alp_only; /* Don't use HT clock (ALP only) */ + /* Field to decide if rx of control frames happen in rxbuf or lb-pool */ + bool usebufpool; + +#ifdef SDTEST + /* external loopback */ + bool ext_loop; + uint8 loopid; + + /* pktgen configuration */ + uint pktgen_freq; /* Ticks between bursts */ + uint pktgen_count; /* Packets to send each burst */ + uint pktgen_print; /* Bursts between count displays */ + uint pktgen_total; /* Stop after this many */ + uint pktgen_minlen; /* Minimum packet data len */ + uint pktgen_maxlen; /* Maximum packet data len */ + uint pktgen_mode; /* Configured mode: tx, rx, or echo */ + uint pktgen_stop; /* Number of tx failures causing stop */ + + /* active pktgen fields */ + uint pktgen_tick; /* Tick counter for bursts */ + uint pktgen_ptick; /* Burst counter for printing */ + uint pktgen_sent; /* Number of test packets generated */ + uint pktgen_rcvd; /* Number of test packets received */ + uint pktgen_fail; /* Number of failed send attempts */ + uint16 pktgen_len; /* Length of next packet to send */ +#endif /* SDTEST */ + + /* Some additional counters */ + uint tx_sderrs; /* Count of tx attempts with sd errors */ + uint fcqueued; /* Tx packets that got queued */ + uint rxrtx; /* Count of rtx requests (NAK to dongle) */ + uint rx_toolong; /* Receive frames too long to receive */ + uint rxc_errors; /* SDIO errors when reading control frames */ + uint rx_hdrfail; /* SDIO errors on header reads */ + uint rx_badhdr; /* Bad received headers (roosync?) */ + uint rx_badseq; /* Mismatched rx sequence number */ + uint fc_rcvd; /* Number of flow-control events received */ + uint fc_xoff; /* Number which turned on flow-control */ + uint fc_xon; /* Number which turned off flow-control */ + uint rxglomfail; /* Failed deglom attempts */ + uint rxglomframes; /* Number of glom frames (superframes) */ + uint rxglompkts; /* Number of packets from glom frames */ + uint f2rxhdrs; /* Number of header reads */ + uint f2rxdata; /* Number of frame data reads */ + uint f2txdata; /* Number of f2 frame writes */ + uint f1regdata; /* Number of f1 register accesses */ + + uint8 *ctrl_frame_buf; + uint32 ctrl_frame_len; + bool ctrl_frame_stat; +} dhd_bus_t; + +/* clkstate */ +#define CLK_NONE 0 +#define CLK_SDONLY 1 +#define CLK_PENDING 2 /* Not used yet */ +#define CLK_AVAIL 3 + +#define DHD_NOPMU(dhd) (FALSE) + +#ifdef DHD_DEBUG +static int qcount[NUMPRIO]; +static int tx_packets[NUMPRIO]; +#endif /* DHD_DEBUG */ + +/* Deferred transmit */ +const uint dhd_deferred_tx = 1; + +extern uint dhd_watchdog_ms; +extern void dhd_os_wd_timer(void *bus, uint wdtick); + +/* Tx/Rx bounds */ +uint dhd_txbound; +uint dhd_rxbound; +uint dhd_txminmax; + +/* override the RAM size if possible */ +#define DONGLE_MIN_MEMSIZE (128 *1024) +int dhd_dongle_memsize; + +static bool dhd_doflow; +static bool dhd_alignctl; + +static bool sd1idle; + +static bool retrydata; +#define RETRYCHAN(chan) (((chan) == SDPCM_EVENT_CHANNEL) || retrydata) + +static const uint watermark = 8; +static const uint firstread = DHD_FIRSTREAD; + +#define HDATLEN (firstread - (SDPCM_HDRLEN)) + +/* Retry count for register access failures */ +static const uint retry_limit = 2; + +/* Force even SD lengths (some host controllers mess up on odd bytes) */ +static bool forcealign; + +#define ALIGNMENT 4 + +#if defined(OOB_INTR_ONLY) && defined(HW_OOB) +extern void bcmsdh_enable_hw_oob_intr(void *sdh, bool enable); +#endif + +#if defined(OOB_INTR_ONLY) && defined(SDIO_ISR_THREAD) +#error OOB_INTR_ONLY is NOT working with SDIO_ISR_THREAD +#endif /* defined(OOB_INTR_ONLY) && defined(SDIO_ISR_THREAD) */ +#define PKTALIGN(osh, p, len, align) \ + do { \ + uint datalign; \ + datalign = (uintptr)PKTDATA((osh), (p)); \ + datalign = ROUNDUP(datalign, (align)) - datalign; \ + ASSERT(datalign < (align)); \ + ASSERT(PKTLEN((osh), (p)) >= ((len) + datalign)); \ + if (datalign) \ + PKTPULL((osh), (p), datalign); \ + PKTSETLEN((osh), (p), (len)); \ + } while (0) + +/* Limit on rounding up frames */ +static const uint max_roundup = 512; + +/* Try doing readahead */ +static bool dhd_readahead; + + +/* To check if there's window offered */ +#define DATAOK(bus) \ + (((uint8)(bus->tx_max - bus->tx_seq) != 0) && \ + (((uint8)(bus->tx_max - bus->tx_seq) & 0x80) == 0)) + +/* Macros to get register read/write status */ +/* NOTE: these assume a local dhdsdio_bus_t *bus! */ +#define R_SDREG(regvar, regaddr, retryvar) \ +do { \ + retryvar = 0; \ + do { \ + regvar = R_REG(bus->dhd->osh, regaddr); \ + } while (bcmsdh_regfail(bus->sdh) && (++retryvar <= retry_limit)); \ + if (retryvar) { \ + bus->regfails += (retryvar-1); \ + if (retryvar > retry_limit) { \ + DHD_ERROR(("%s: FAILED" #regvar "READ, LINE %d\n", \ + __FUNCTION__, __LINE__)); \ + regvar = 0; \ + } \ + } \ +} while (0) + +#define W_SDREG(regval, regaddr, retryvar) \ +do { \ + retryvar = 0; \ + do { \ + W_REG(bus->dhd->osh, regaddr, regval); \ + } while (bcmsdh_regfail(bus->sdh) && (++retryvar <= retry_limit)); \ + if (retryvar) { \ + bus->regfails += (retryvar-1); \ + if (retryvar > retry_limit) \ + DHD_ERROR(("%s: FAILED REGISTER WRITE, LINE %d\n", \ + __FUNCTION__, __LINE__)); \ + } \ +} while (0) + + +#define DHD_BUS SDIO_BUS + +#define PKT_AVAILABLE() (intstatus & I_HMB_FRAME_IND) + +#define HOSTINTMASK (I_HMB_SW_MASK | I_CHIPACTIVE) + +#define GSPI_PR55150_BAILOUT + + +#ifdef SDTEST +static void dhdsdio_testrcv(dhd_bus_t *bus, void *pkt, uint seq); +static void dhdsdio_sdtest_set(dhd_bus_t *bus, bool start); +#endif + +#ifdef DHD_DEBUG_TRAP +static int dhdsdio_checkdied(dhd_bus_t *bus, uint8 *data, uint size); +static int dhdsdio_mem_dump(dhd_bus_t *bus); +#endif /* DHD_DEBUG_TRAP */ +static int dhdsdio_download_state(dhd_bus_t *bus, bool enter); + +static void dhdsdio_release(dhd_bus_t *bus, osl_t *osh); +static void dhdsdio_release_malloc(dhd_bus_t *bus, osl_t *osh); +static void dhdsdio_disconnect(void *ptr); +static bool dhdsdio_chipmatch(uint16 chipid); +static bool dhdsdio_probe_attach(dhd_bus_t *bus, osl_t *osh, void *sdh, + void * regsva, uint16 devid); +static bool dhdsdio_probe_malloc(dhd_bus_t *bus, osl_t *osh, void *sdh); +static bool dhdsdio_probe_init(dhd_bus_t *bus, osl_t *osh, void *sdh); +static void dhdsdio_release_dongle(dhd_bus_t *bus, osl_t *osh); + +static uint process_nvram_vars(char *varbuf, uint len); + +static void dhd_dongle_setmemsize(struct dhd_bus *bus, int mem_size); +static int dhd_bcmsdh_recv_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags, + uint8 *buf, uint nbytes, + void *pkt, bcmsdh_cmplt_fn_t complete, void *handle); +static int dhd_bcmsdh_send_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags, + uint8 *buf, uint nbytes, + void *pkt, bcmsdh_cmplt_fn_t complete, void *handle); + +static bool dhdsdio_download_firmware(struct dhd_bus *bus, osl_t *osh, void *sdh); +static int _dhdsdio_download_firmware(struct dhd_bus *bus); + +static int dhdsdio_download_code_file(struct dhd_bus *bus, char *image_path); +static int dhdsdio_download_nvram(struct dhd_bus *bus); +#ifdef BCMEMBEDIMAGE +static int dhdsdio_download_code_array(struct dhd_bus *bus); +#endif + + +static void +dhd_dongle_setmemsize(struct dhd_bus *bus, int mem_size) +{ + int32 min_size = DONGLE_MIN_MEMSIZE; + /* Restrict the memsize to user specified limit */ + DHD_ERROR(("user: Restrict the dongle ram size to %d, min accepted %d\n", + dhd_dongle_memsize, min_size)); + if ((dhd_dongle_memsize > min_size) && + (dhd_dongle_memsize < (int32)bus->orig_ramsize)) + bus->ramsize = dhd_dongle_memsize; +} + +static int +dhdsdio_set_siaddr_window(dhd_bus_t *bus, uint32 address) +{ + int err = 0; + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW, + (address >> 8) & SBSDIO_SBADDRLOW_MASK, &err); + if (!err) + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRMID, + (address >> 16) & SBSDIO_SBADDRMID_MASK, &err); + if (!err) + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRHIGH, + (address >> 24) & SBSDIO_SBADDRHIGH_MASK, &err); + return err; +} + + +/* Turn backplane clock on or off */ +static int +dhdsdio_htclk(dhd_bus_t *bus, bool on, bool pendok) +{ + int err; + uint8 clkctl, clkreq, devctl; + bcmsdh_info_t *sdh; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + +#if defined(OOB_INTR_ONLY) + pendok = FALSE; +#endif + clkctl = 0; + sdh = bus->sdh; + + + if (on) { + /* Request HT Avail */ + clkreq = bus->alp_only ? SBSDIO_ALP_AVAIL_REQ : SBSDIO_HT_AVAIL_REQ; + + if ((bus->sih->chip == BCM4329_CHIP_ID) && (bus->sih->chiprev == 0)) + clkreq |= SBSDIO_FORCE_ALP; + + + + + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, clkreq, &err); + if (err) { + DHD_ERROR(("%s: HT Avail request error: %d\n", __FUNCTION__, err)); + return BCME_ERROR; + } + + if (pendok && + ((bus->sih->buscoretype == PCMCIA_CORE_ID) && (bus->sih->buscorerev == 9))) { + uint32 dummy, retries; + R_SDREG(dummy, &bus->regs->clockctlstatus, retries); + } + + /* Check current status */ + clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err); + if (err) { + DHD_ERROR(("%s: HT Avail read error: %d\n", __FUNCTION__, err)); + return BCME_ERROR; + } + + /* Go to pending and await interrupt if appropriate */ + if (!SBSDIO_CLKAV(clkctl, bus->alp_only) && pendok) { + /* Allow only clock-available interrupt */ + devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err); + if (err) { + DHD_ERROR(("%s: Devctl access error setting CA: %d\n", + __FUNCTION__, err)); + return BCME_ERROR; + } + + devctl |= SBSDIO_DEVCTL_CA_INT_ONLY; + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err); + DHD_INFO(("CLKCTL: set PENDING\n")); + bus->clkstate = CLK_PENDING; + return BCME_OK; + } else if (bus->clkstate == CLK_PENDING) { + /* Cancel CA-only interrupt filter */ + devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err); + devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY; + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err); + } + + /* Otherwise, wait here (polling) for HT Avail */ + if (!SBSDIO_CLKAV(clkctl, bus->alp_only)) { + SPINWAIT_SLEEP(sdioh_spinwait_sleep, + ((clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, + SBSDIO_FUNC1_CHIPCLKCSR, &err)), + !SBSDIO_CLKAV(clkctl, bus->alp_only)), PMU_MAX_TRANSITION_DLY); + } + if (err) { + DHD_ERROR(("%s: HT Avail request error: %d\n", __FUNCTION__, err)); + return BCME_ERROR; + } + if (!SBSDIO_CLKAV(clkctl, bus->alp_only)) { + DHD_ERROR(("%s: HT Avail timeout (%d): clkctl 0x%02x\n", + __FUNCTION__, PMU_MAX_TRANSITION_DLY, clkctl)); + return BCME_ERROR; + } + + + /* Mark clock available */ + bus->clkstate = CLK_AVAIL; + DHD_INFO(("CLKCTL: turned ON\n")); + +#if defined(DHD_DEBUG) + if (bus->alp_only == TRUE) { +#if !defined(BCMLXSDMMC) + if (!SBSDIO_ALPONLY(clkctl)) { + DHD_ERROR(("%s: HT Clock, when ALP Only\n", __FUNCTION__)); + } +#endif /* !defined(BCMLXSDMMC) */ + } else { + if (SBSDIO_ALPONLY(clkctl)) { + DHD_ERROR(("%s: HT Clock should be on.\n", __FUNCTION__)); + } + } +#endif /* defined (DHD_DEBUG) */ + + bus->activity = TRUE; + } else { + clkreq = 0; + + if (bus->clkstate == CLK_PENDING) { + /* Cancel CA-only interrupt filter */ + devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err); + devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY; + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err); + } + + bus->clkstate = CLK_SDONLY; + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, clkreq, &err); + DHD_INFO(("CLKCTL: turned OFF\n")); + if (err) { + DHD_ERROR(("%s: Failed access turning clock off: %d\n", + __FUNCTION__, err)); + return BCME_ERROR; + } + } + return BCME_OK; +} + +/* Change idle/active SD state */ +static int +dhdsdio_sdclk(dhd_bus_t *bus, bool on) +{ + int err; + int32 iovalue; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (on) { + if (bus->idleclock == DHD_IDLE_STOP) { + /* Turn on clock and restore mode */ + iovalue = 1; + err = bcmsdh_iovar_op(bus->sdh, "sd_clock", NULL, 0, + &iovalue, sizeof(iovalue), TRUE); + if (err) { + DHD_ERROR(("%s: error enabling sd_clock: %d\n", + __FUNCTION__, err)); + return BCME_ERROR; + } + + iovalue = bus->sd_mode; + err = bcmsdh_iovar_op(bus->sdh, "sd_mode", NULL, 0, + &iovalue, sizeof(iovalue), TRUE); + if (err) { + DHD_ERROR(("%s: error changing sd_mode: %d\n", + __FUNCTION__, err)); + return BCME_ERROR; + } + } else if (bus->idleclock != DHD_IDLE_ACTIVE) { + /* Restore clock speed */ + iovalue = bus->sd_divisor; + err = bcmsdh_iovar_op(bus->sdh, "sd_divisor", NULL, 0, + &iovalue, sizeof(iovalue), TRUE); + if (err) { + DHD_ERROR(("%s: error restoring sd_divisor: %d\n", + __FUNCTION__, err)); + return BCME_ERROR; + } + } + bus->clkstate = CLK_SDONLY; + } else { + /* Stop or slow the SD clock itself */ + if ((bus->sd_divisor == -1) || (bus->sd_mode == -1)) { + DHD_TRACE(("%s: can't idle clock, divisor %d mode %d\n", + __FUNCTION__, bus->sd_divisor, bus->sd_mode)); + return BCME_ERROR; + } + if (bus->idleclock == DHD_IDLE_STOP) { + if (sd1idle) { + /* Change to SD1 mode and turn off clock */ + iovalue = 1; + err = bcmsdh_iovar_op(bus->sdh, "sd_mode", NULL, 0, + &iovalue, sizeof(iovalue), TRUE); + if (err) { + DHD_ERROR(("%s: error changing sd_clock: %d\n", + __FUNCTION__, err)); + return BCME_ERROR; + } + } + + iovalue = 0; + err = bcmsdh_iovar_op(bus->sdh, "sd_clock", NULL, 0, + &iovalue, sizeof(iovalue), TRUE); + if (err) { + DHD_ERROR(("%s: error disabling sd_clock: %d\n", + __FUNCTION__, err)); + return BCME_ERROR; + } + } else if (bus->idleclock != DHD_IDLE_ACTIVE) { + /* Set divisor to idle value */ + iovalue = bus->idleclock; + err = bcmsdh_iovar_op(bus->sdh, "sd_divisor", NULL, 0, + &iovalue, sizeof(iovalue), TRUE); + if (err) { + DHD_ERROR(("%s: error changing sd_divisor: %d\n", + __FUNCTION__, err)); + return BCME_ERROR; + } + } + bus->clkstate = CLK_NONE; + } + + return BCME_OK; +} + +/* Transition SD and backplane clock readiness */ +static int +dhdsdio_clkctl(dhd_bus_t *bus, uint target, bool pendok) +{ +#ifdef DHD_DEBUG + uint oldstate = bus->clkstate; +#endif /* DHD_DEBUG */ + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + /* Early exit if we're already there */ + if (bus->clkstate == target) { + if (target == CLK_AVAIL) { + dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms); + bus->activity = TRUE; + } + return BCME_OK; + } + + switch (target) { + case CLK_AVAIL: + /* Make sure SD clock is available */ + if (bus->clkstate == CLK_NONE) + dhdsdio_sdclk(bus, TRUE); + /* Now request HT Avail on the backplane */ + dhdsdio_htclk(bus, TRUE, pendok); + dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms); + bus->activity = TRUE; + break; + + case CLK_SDONLY: + /* Remove HT request, or bring up SD clock */ + if (bus->clkstate == CLK_NONE) + dhdsdio_sdclk(bus, TRUE); + else if (bus->clkstate == CLK_AVAIL) + dhdsdio_htclk(bus, FALSE, FALSE); + else + DHD_ERROR(("dhdsdio_clkctl: request for %d -> %d\n", + bus->clkstate, target)); + dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms); + break; + + case CLK_NONE: + /* Make sure to remove HT request */ + if (bus->clkstate == CLK_AVAIL) + dhdsdio_htclk(bus, FALSE, FALSE); + /* Now remove the SD clock */ + dhdsdio_sdclk(bus, FALSE); + dhd_os_wd_timer(bus->dhd, 0); + break; + } +#ifdef DHD_DEBUG + DHD_INFO(("dhdsdio_clkctl: %d -> %d\n", oldstate, bus->clkstate)); +#endif /* DHD_DEBUG */ + + return BCME_OK; +} + +int +dhdsdio_bussleep(dhd_bus_t *bus, bool sleep) +{ + bcmsdh_info_t *sdh = bus->sdh; + sdpcmd_regs_t *regs = bus->regs; + uint retries = 0; + + DHD_INFO(("dhdsdio_bussleep: request %s (currently %s)\n", + (sleep ? "SLEEP" : "WAKE"), + (bus->sleeping ? "SLEEP" : "WAKE"))); + + /* Done if we're already in the requested state */ + if (sleep == bus->sleeping) + return BCME_OK; + + /* Going to sleep: set the alarm and turn off the lights... */ + if (sleep) { + /* Don't sleep if something is pending */ + if (bus->dpc_sched || bus->rxskip || pktq_len(&bus->txq)) + return BCME_BUSY; + + + /* Disable SDIO interrupts (no longer interested) */ + bcmsdh_intr_disable(bus->sdh); + + /* Make sure the controller has the bus up */ + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + + /* Tell device to start using OOB wakeup */ + W_SDREG(SMB_USE_OOB, ®s->tosbmailbox, retries); + if (retries > retry_limit) + DHD_ERROR(("CANNOT SIGNAL CHIP, WILL NOT WAKE UP!!\n")); + + /* Turn off our contribution to the HT clock request */ + dhdsdio_clkctl(bus, CLK_SDONLY, FALSE); + + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, + SBSDIO_FORCE_HW_CLKREQ_OFF, NULL); + + /* Isolate the bus */ + if (bus->sih->chip != BCM4329_CHIP_ID && bus->sih->chip != BCM4319_CHIP_ID) { + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, + SBSDIO_DEVCTL_PADS_ISO, NULL); + } + + /* Change state */ + bus->sleeping = TRUE; + + } else { + /* Waking up: bus power up is ok, set local state */ + + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, + 0, NULL); + + /* Force pad isolation off if possible (in case power never toggled) */ + if ((bus->sih->buscoretype == PCMCIA_CORE_ID) && (bus->sih->buscorerev >= 10)) + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, 0, NULL); + + + /* Make sure the controller has the bus up */ + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + + /* Send misc interrupt to indicate OOB not needed */ + W_SDREG(0, ®s->tosbmailboxdata, retries); + if (retries <= retry_limit) + W_SDREG(SMB_DEV_INT, ®s->tosbmailbox, retries); + + if (retries > retry_limit) + DHD_ERROR(("CANNOT SIGNAL CHIP TO CLEAR OOB!!\n")); + + /* Make sure we have SD bus access */ + dhdsdio_clkctl(bus, CLK_SDONLY, FALSE); + + /* Change state */ + bus->sleeping = FALSE; + + /* Enable interrupts again */ + if (bus->intr && (bus->dhd->busstate == DHD_BUS_DATA)) { + bus->intdis = FALSE; + bcmsdh_intr_enable(bus->sdh); + } + } + + return BCME_OK; +} +#if defined(OOB_INTR_ONLY) +void +dhd_enable_oob_intr(struct dhd_bus *bus, bool enable) +{ +#if defined(HW_OOB) + bcmsdh_enable_hw_oob_intr(bus->sdh, enable); +#else + sdpcmd_regs_t *regs = bus->regs; + uint retries = 0; + + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + if (enable == TRUE) { + + /* Tell device to start using OOB wakeup */ + W_SDREG(SMB_USE_OOB, ®s->tosbmailbox, retries); + if (retries > retry_limit) + DHD_ERROR(("CANNOT SIGNAL CHIP, WILL NOT WAKE UP!!\n")); + + } else { + /* Send misc interrupt to indicate OOB not needed */ + W_SDREG(0, ®s->tosbmailboxdata, retries); + if (retries <= retry_limit) + W_SDREG(SMB_DEV_INT, ®s->tosbmailbox, retries); + } + + /* Turn off our contribution to the HT clock request */ + dhdsdio_clkctl(bus, CLK_SDONLY, FALSE); +#endif /* !defined(HW_OOB) */ +} +#endif /* defined(OOB_INTR_ONLY) */ + +#define BUS_WAKE(bus) \ + do { \ + if ((bus)->sleeping) \ + dhdsdio_bussleep((bus), FALSE); \ + } while (0); + + +/* Writes a HW/SW header into the packet and sends it. */ +/* Assumes: (a) header space already there, (b) caller holds lock */ +static int +dhdsdio_txpkt(dhd_bus_t *bus, void *pkt, uint chan, bool free_pkt) +{ + int ret; + osl_t *osh; + uint8 *frame; + uint16 len, pad = 0; + uint32 swheader; + uint retries = 0; + bcmsdh_info_t *sdh; + void *new; + int i; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + sdh = bus->sdh; + osh = bus->dhd->osh; + + if (bus->dhd->dongle_reset) { + ret = BCME_NOTREADY; + goto done; + } + + frame = (uint8*)PKTDATA(osh, pkt); + + /* Add alignment padding, allocate new packet if needed */ + if ((pad = ((uintptr)frame % DHD_SDALIGN))) { + if (PKTHEADROOM(osh, pkt) < pad) { + DHD_INFO(("%s: insufficient headroom %d for %d pad\n", + __FUNCTION__, (int)PKTHEADROOM(osh, pkt), pad)); + bus->dhd->tx_realloc++; + new = PKTGET(osh, (PKTLEN(osh, pkt) + DHD_SDALIGN), TRUE); + if (!new) { + DHD_ERROR(("%s: couldn't allocate new %d-byte packet\n", + __FUNCTION__, PKTLEN(osh, pkt) + DHD_SDALIGN)); + ret = BCME_NOMEM; + goto done; + } + + PKTALIGN(osh, new, PKTLEN(osh, pkt), DHD_SDALIGN); + bcopy(PKTDATA(osh, pkt), PKTDATA(osh, new), PKTLEN(osh, pkt)); + if (free_pkt) + PKTFREE(osh, pkt, TRUE); + /* free the pkt if canned one is not used */ + free_pkt = TRUE; + pkt = new; + frame = (uint8*)PKTDATA(osh, pkt); + ASSERT(((uintptr)frame % DHD_SDALIGN) == 0); + pad = 0; + } else { + PKTPUSH(osh, pkt, pad); + frame = (uint8*)PKTDATA(osh, pkt); + + ASSERT((pad + SDPCM_HDRLEN) <= (int) PKTLEN(osh, pkt)); + bzero(frame, pad + SDPCM_HDRLEN); + } + } + ASSERT(pad < DHD_SDALIGN); + + /* Hardware tag: 2 byte len followed by 2 byte ~len check (all LE) */ + len = (uint16)PKTLEN(osh, pkt); + *(uint16*)frame = htol16(len); + *(((uint16*)frame) + 1) = htol16(~len); + + /* Software tag: channel, sequence number, data offset */ + swheader = ((chan << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK) | bus->tx_seq | + (((pad + SDPCM_HDRLEN) << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK); + htol32_ua_store(swheader, frame + SDPCM_FRAMETAG_LEN); + htol32_ua_store(0, frame + SDPCM_FRAMETAG_LEN + sizeof(swheader)); + +#ifdef DHD_DEBUG + tx_packets[PKTPRIO(pkt)]++; + if (DHD_BYTES_ON() && + (((DHD_CTL_ON() && (chan == SDPCM_CONTROL_CHANNEL)) || + (DHD_DATA_ON() && (chan != SDPCM_CONTROL_CHANNEL))))) { + prhex("Tx Frame", frame, len); + } else if (DHD_HDRS_ON()) { + prhex("TxHdr", frame, MIN(len, 16)); + } +#endif + + /* Raise len to next SDIO block to eliminate tail command */ + if (bus->roundup && bus->blocksize && (len > bus->blocksize)) { + uint16 pad = bus->blocksize - (len % bus->blocksize); + if ((pad <= bus->roundup) && (pad < bus->blocksize)) +#ifdef NOTUSED + if (pad <= PKTTAILROOM(osh, pkt)) +#endif /* NOTUSED */ + len += pad; + } else if (len % DHD_SDALIGN) { + len += DHD_SDALIGN - (len % DHD_SDALIGN); + } + + /* Some controllers have trouble with odd bytes -- round to even */ + if (forcealign && (len & (ALIGNMENT - 1))) { +#ifdef NOTUSED + if (PKTTAILROOM(osh, pkt)) +#endif + len = ROUNDUP(len, ALIGNMENT); +#ifdef NOTUSED + else + DHD_ERROR(("%s: sending unrounded %d-byte packet\n", __FUNCTION__, len)); +#endif + } + + do { + ret = dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC, + frame, len, pkt, NULL, NULL); + bus->f2txdata++; + ASSERT(ret != BCME_PENDING); + + if (ret < 0) { + /* On failure, abort the command and terminate the frame */ + DHD_INFO(("%s: sdio error %d, abort command and terminate frame.\n", + __FUNCTION__, ret)); + bus->tx_sderrs++; + + bcmsdh_abort(sdh, SDIO_FUNC_2); + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_FRAMECTRL, + SFC_WF_TERM, NULL); + bus->f1regdata++; + + for (i = 0; i < 3; i++) { + uint8 hi, lo; + hi = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, + SBSDIO_FUNC1_WFRAMEBCHI, NULL); + lo = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, + SBSDIO_FUNC1_WFRAMEBCLO, NULL); + bus->f1regdata += 2; + if ((hi == 0) && (lo == 0)) + break; + } + + } + if (ret == 0) { + bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP; + } + } while ((ret < 0) && retrydata && retries++ < TXRETRIES); + +done: + /* restore pkt buffer pointer before calling tx complete routine */ + PKTPULL(osh, pkt, SDPCM_HDRLEN + pad); + dhd_os_sdunlock(bus->dhd); + dhd_txcomplete(bus->dhd, pkt, ret != 0); + dhd_os_sdlock(bus->dhd); + + if (free_pkt) + PKTFREE(osh, pkt, TRUE); + + return ret; +} + +int +dhd_bus_txdata(struct dhd_bus *bus, void *pkt) +{ + int ret = BCME_ERROR; + osl_t *osh; + uint datalen, prec; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + osh = bus->dhd->osh; + datalen = PKTLEN(osh, pkt); + +#ifdef SDTEST + /* Push the test header if doing loopback */ + if (bus->ext_loop) { + uint8* data; + PKTPUSH(osh, pkt, SDPCM_TEST_HDRLEN); + data = PKTDATA(osh, pkt); + *data++ = SDPCM_TEST_ECHOREQ; + *data++ = (uint8)bus->loopid++; + *data++ = (datalen >> 0); + *data++ = (datalen >> 8); + datalen += SDPCM_TEST_HDRLEN; + } +#endif /* SDTEST */ + + /* Add space for the header */ + PKTPUSH(osh, pkt, SDPCM_HDRLEN); + ASSERT(ISALIGNED((uintptr)PKTDATA(osh, pkt), 2)); + + prec = PRIO2PREC((PKTPRIO(pkt) & PRIOMASK)); + + + /* Check for existing queue, current flow-control, pending event, or pending clock */ + if (dhd_deferred_tx || bus->fcstate || pktq_len(&bus->txq) || bus->dpc_sched || + (!DATAOK(bus)) || (bus->flowcontrol & NBITVAL(prec)) || + (bus->clkstate != CLK_AVAIL)) { + DHD_TRACE(("%s: deferring pktq len %d\n", __FUNCTION__, + pktq_len(&bus->txq))); + bus->fcqueued++; + + /* Priority based enq */ + dhd_os_sdlock_txq(bus->dhd); + if (dhd_prec_enq(bus->dhd, &bus->txq, pkt, prec) == FALSE) { + PKTPULL(osh, pkt, SDPCM_HDRLEN); + dhd_txcomplete(bus->dhd, pkt, FALSE); + PKTFREE(osh, pkt, TRUE); + DHD_ERROR(("%s: out of bus->txq !!!\n", __FUNCTION__)); + ret = BCME_NORESOURCE; + } else { + ret = BCME_OK; + } + dhd_os_sdunlock_txq(bus->dhd); + + if ((pktq_len(&bus->txq) >= FCHI) && dhd_doflow) + dhd_txflowcontrol(bus->dhd, 0, ON); + +#ifdef DHD_DEBUG + if (pktq_plen(&bus->txq, prec) > qcount[prec]) + qcount[prec] = pktq_plen(&bus->txq, prec); +#endif + /* Schedule DPC if needed to send queued packet(s) */ + if (dhd_deferred_tx && !bus->dpc_sched) { + bus->dpc_sched = TRUE; + dhd_sched_dpc(bus->dhd); + } + } else { + /* Lock: we're about to use shared data/code (and SDIO) */ + dhd_os_sdlock(bus->dhd); + + /* Otherwise, send it now */ + BUS_WAKE(bus); + /* Make sure back plane ht clk is on, no pending allowed */ + dhdsdio_clkctl(bus, CLK_AVAIL, TRUE); + +#ifndef SDTEST + DHD_TRACE(("%s: calling txpkt\n", __FUNCTION__)); + ret = dhdsdio_txpkt(bus, pkt, SDPCM_DATA_CHANNEL, TRUE); +#else + ret = dhdsdio_txpkt(bus, pkt, + (bus->ext_loop ? SDPCM_TEST_CHANNEL : SDPCM_DATA_CHANNEL), TRUE); +#endif + if (ret) + bus->dhd->tx_errors++; + else + bus->dhd->dstats.tx_bytes += datalen; + + if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) { + bus->activity = FALSE; + dhdsdio_clkctl(bus, CLK_NONE, TRUE); + } + + dhd_os_sdunlock(bus->dhd); + } + + + return ret; +} + +static uint +dhdsdio_sendfromq(dhd_bus_t *bus, uint maxframes) +{ + void *pkt; + uint32 intstatus = 0; + uint retries = 0; + int ret = 0, prec_out; + uint cnt = 0; + uint datalen; + uint8 tx_prec_map; + + dhd_pub_t *dhd = bus->dhd; + sdpcmd_regs_t *regs = bus->regs; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + tx_prec_map = ~bus->flowcontrol; + + /* Send frames until the limit or some other event */ + for (cnt = 0; (cnt < maxframes) && DATAOK(bus); cnt++) { + dhd_os_sdlock_txq(bus->dhd); + if ((pkt = pktq_mdeq(&bus->txq, tx_prec_map, &prec_out)) == NULL) { + dhd_os_sdunlock_txq(bus->dhd); + break; + } + dhd_os_sdunlock_txq(bus->dhd); + datalen = PKTLEN(bus->dhd->osh, pkt) - SDPCM_HDRLEN; + +#ifndef SDTEST + ret = dhdsdio_txpkt(bus, pkt, SDPCM_DATA_CHANNEL, TRUE); +#else + ret = dhdsdio_txpkt(bus, pkt, + (bus->ext_loop ? SDPCM_TEST_CHANNEL : SDPCM_DATA_CHANNEL), TRUE); +#endif + if (ret) + bus->dhd->tx_errors++; + else + bus->dhd->dstats.tx_bytes += datalen; + + /* In poll mode, need to check for other events */ + if (!bus->intr && cnt) + { + /* Check device status, signal pending interrupt */ + R_SDREG(intstatus, ®s->intstatus, retries); + bus->f2txdata++; + if (bcmsdh_regfail(bus->sdh)) + break; + if (intstatus & bus->hostintmask) + bus->ipend = TRUE; + } + } + + /* Deflow-control stack if needed */ + if (dhd_doflow && dhd->up && (dhd->busstate == DHD_BUS_DATA) && + dhd->txoff && (pktq_len(&bus->txq) < FCLOW)) + dhd_txflowcontrol(dhd, 0, OFF); + + return cnt; +} + +int +dhd_bus_txctl(struct dhd_bus *bus, uchar *msg, uint msglen) +{ + uint8 *frame; + uint16 len; + uint32 swheader; + uint retries = 0; + bcmsdh_info_t *sdh = bus->sdh; + uint8 doff = 0; + int ret = -1; + int i; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (bus->dhd->dongle_reset) + return -EIO; + + /* Back the pointer to make a room for bus header */ + frame = msg - SDPCM_HDRLEN; + len = (msglen += SDPCM_HDRLEN); + + /* Add alignment padding (optional for ctl frames) */ + if (dhd_alignctl) { + if ((doff = ((uintptr)frame % DHD_SDALIGN))) { + frame -= doff; + len += doff; + msglen += doff; + bzero(frame, doff + SDPCM_HDRLEN); + } + ASSERT(doff < DHD_SDALIGN); + } + doff += SDPCM_HDRLEN; + + /* Round send length to next SDIO block */ + if (bus->roundup && bus->blocksize && (len > bus->blocksize)) { + uint16 pad = bus->blocksize - (len % bus->blocksize); + if ((pad <= bus->roundup) && (pad < bus->blocksize)) + len += pad; + } else if (len % DHD_SDALIGN) { + len += DHD_SDALIGN - (len % DHD_SDALIGN); + } + + /* Satisfy length-alignment requirements */ + if (forcealign && (len & (ALIGNMENT - 1))) + len = ROUNDUP(len, ALIGNMENT); + + ASSERT(ISALIGNED((uintptr)frame, 2)); + + + /* Need to lock here to protect txseq and SDIO tx calls */ + dhd_os_sdlock(bus->dhd); + + BUS_WAKE(bus); + + /* Make sure backplane clock is on */ + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + + /* Hardware tag: 2 byte len followed by 2 byte ~len check (all LE) */ + *(uint16*)frame = htol16((uint16)msglen); + *(((uint16*)frame) + 1) = htol16(~msglen); + + /* Software tag: channel, sequence number, data offset */ + swheader = ((SDPCM_CONTROL_CHANNEL << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK) + | bus->tx_seq | ((doff << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK); + htol32_ua_store(swheader, frame + SDPCM_FRAMETAG_LEN); + htol32_ua_store(0, frame + SDPCM_FRAMETAG_LEN + sizeof(swheader)); + + if (!DATAOK(bus)) { + DHD_INFO(("%s: No bus credit bus->tx_max %d, bus->tx_seq %d\n", + __FUNCTION__, bus->tx_max, bus->tx_seq)); + bus->ctrl_frame_stat = TRUE; + /* Send from dpc */ + bus->ctrl_frame_buf = frame; + bus->ctrl_frame_len = len; + + dhd_wait_for_event(bus->dhd, &bus->ctrl_frame_stat); + + if (bus->ctrl_frame_stat == FALSE) { + DHD_INFO(("%s: ctrl_frame_stat == FALSE\n", __FUNCTION__)); + ret = 0; + } else { + DHD_INFO(("%s: ctrl_frame_stat == TRUE\n", __FUNCTION__)); + ret = -1; + } + } + + if (ret == -1) { +#ifdef DHD_DEBUG + if (DHD_BYTES_ON() && DHD_CTL_ON()) { + prhex("Tx Frame", frame, len); + } else if (DHD_HDRS_ON()) { + prhex("TxHdr", frame, MIN(len, 16)); + } +#endif + + do { + bus->ctrl_frame_stat = FALSE; + ret = dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC, + frame, len, NULL, NULL, NULL); + ASSERT(ret != BCME_PENDING); + + if (ret < 0) { + /* On failure, abort the command and terminate the frame */ + DHD_INFO(("%s: sdio error %d, abort command and terminate frame.\n", + __FUNCTION__, ret)); + bus->tx_sderrs++; + + bcmsdh_abort(sdh, SDIO_FUNC_2); + + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_FRAMECTRL, + SFC_WF_TERM, NULL); + bus->f1regdata++; + + for (i = 0; i < 3; i++) { + uint8 hi, lo; + hi = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, + SBSDIO_FUNC1_WFRAMEBCHI, NULL); + lo = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, + SBSDIO_FUNC1_WFRAMEBCLO, NULL); + bus->f1regdata += 2; + if ((hi == 0) && (lo == 0)) + break; + } + + } + if (ret == 0) { + bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP; + } + } while ((ret < 0) && retries++ < TXRETRIES); + } + + if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) { + bus->activity = FALSE; + dhdsdio_clkctl(bus, CLK_NONE, TRUE); + } + + dhd_os_sdunlock(bus->dhd); + + if (ret) + bus->dhd->tx_ctlerrs++; + else + bus->dhd->tx_ctlpkts++; + + return ret ? -EIO : 0; +} + +int +dhd_bus_rxctl(struct dhd_bus *bus, uchar *msg, uint msglen) +{ + int timeleft; + uint rxlen = 0; + bool pending; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (bus->dhd->dongle_reset) + return -EIO; + + /* Wait until control frame is available */ + timeleft = dhd_os_ioctl_resp_wait(bus->dhd, &bus->rxlen, &pending); + + dhd_os_sdlock(bus->dhd); + rxlen = bus->rxlen; + bcopy(bus->rxctl, msg, MIN(msglen, rxlen)); + bus->rxlen = 0; + dhd_os_sdunlock(bus->dhd); + + if (rxlen) { + DHD_CTL(("%s: resumed on rxctl frame, got %d expected %d\n", + __FUNCTION__, rxlen, msglen)); + } else if (timeleft == 0) { + DHD_ERROR(("%s: resumed on timeout\n", __FUNCTION__)); +#ifdef DHD_DEBUG_TRAP + dhd_os_sdlock(bus->dhd); + dhdsdio_checkdied(bus, NULL, 0); + dhd_os_sdunlock(bus->dhd); +#endif /* DHD_DEBUG_TRAP */ + } else if (pending == TRUE) { + DHD_CTL(("%s: cancelled\n", __FUNCTION__)); + return -ERESTARTSYS; + } else { + DHD_CTL(("%s: resumed for unknown reason?\n", __FUNCTION__)); +#ifdef DHD_DEBUG_TRAP + dhd_os_sdlock(bus->dhd); + dhdsdio_checkdied(bus, NULL, 0); + dhd_os_sdunlock(bus->dhd); +#endif /* DHD_DEBUG_TRAP */ + } + + if (rxlen) + bus->dhd->rx_ctlpkts++; + else + bus->dhd->rx_ctlerrs++; + + return rxlen ? (int)rxlen : -ETIMEDOUT; +} + +/* IOVar table */ +enum { + IOV_INTR = 1, + IOV_POLLRATE, + IOV_SDREG, + IOV_SBREG, + IOV_SDCIS, + IOV_MEMBYTES, + IOV_MEMSIZE, +#ifdef DHD_DEBUG_TRAP + IOV_CHECKDIED, +#endif + IOV_DOWNLOAD, + IOV_FORCEEVEN, + IOV_SDIOD_DRIVE, + IOV_READAHEAD, + IOV_SDRXCHAIN, + IOV_ALIGNCTL, + IOV_SDALIGN, + IOV_DEVRESET, + IOV_CPU, +#ifdef SDTEST + IOV_PKTGEN, + IOV_EXTLOOP, +#endif /* SDTEST */ + IOV_SPROM, + IOV_TXBOUND, + IOV_RXBOUND, + IOV_TXMINMAX, + IOV_IDLETIME, + IOV_IDLECLOCK, + IOV_SD1IDLE, + IOV_SLEEP, + IOV_VARS +}; + +const bcm_iovar_t dhdsdio_iovars[] = { + {"intr", IOV_INTR, 0, IOVT_BOOL, 0 }, + {"sleep", IOV_SLEEP, 0, IOVT_BOOL, 0 }, + {"pollrate", IOV_POLLRATE, 0, IOVT_UINT32, 0 }, + {"idletime", IOV_IDLETIME, 0, IOVT_INT32, 0 }, + {"idleclock", IOV_IDLECLOCK, 0, IOVT_INT32, 0 }, + {"sd1idle", IOV_SD1IDLE, 0, IOVT_BOOL, 0 }, + {"membytes", IOV_MEMBYTES, 0, IOVT_BUFFER, 2 * sizeof(int) }, + {"memsize", IOV_MEMSIZE, 0, IOVT_UINT32, 0 }, + {"download", IOV_DOWNLOAD, 0, IOVT_BOOL, 0 }, + {"vars", IOV_VARS, 0, IOVT_BUFFER, 0 }, + {"sdiod_drive", IOV_SDIOD_DRIVE, 0, IOVT_UINT32, 0 }, + {"readahead", IOV_READAHEAD, 0, IOVT_BOOL, 0 }, + {"sdrxchain", IOV_SDRXCHAIN, 0, IOVT_BOOL, 0 }, + {"alignctl", IOV_ALIGNCTL, 0, IOVT_BOOL, 0 }, + {"sdalign", IOV_SDALIGN, 0, IOVT_BOOL, 0 }, + {"devreset", IOV_DEVRESET, 0, IOVT_BOOL, 0 }, +#ifdef DHD_DEBUG + {"sdreg", IOV_SDREG, 0, IOVT_BUFFER, sizeof(sdreg_t) }, + {"sbreg", IOV_SBREG, 0, IOVT_BUFFER, sizeof(sdreg_t) }, + {"sd_cis", IOV_SDCIS, 0, IOVT_BUFFER, DHD_IOCTL_MAXLEN }, + {"forcealign", IOV_FORCEEVEN, 0, IOVT_BOOL, 0 }, + {"txbound", IOV_TXBOUND, 0, IOVT_UINT32, 0 }, + {"rxbound", IOV_RXBOUND, 0, IOVT_UINT32, 0 }, + {"txminmax", IOV_TXMINMAX, 0, IOVT_UINT32, 0 }, + {"cpu", IOV_CPU, 0, IOVT_BOOL, 0 }, +#endif /* DHD_DEBUG */ +#ifdef DHD_DEBUG_TRAP + {"checkdied", IOV_CHECKDIED, 0, IOVT_BUFFER, 0 }, +#endif /* DHD_DEBUG_TRAP */ +#ifdef SDTEST + {"extloop", IOV_EXTLOOP, 0, IOVT_BOOL, 0 }, + {"pktgen", IOV_PKTGEN, 0, IOVT_BUFFER, sizeof(dhd_pktgen_t) }, +#endif /* SDTEST */ + + {NULL, 0, 0, 0, 0 } +}; + +static void +dhd_dump_pct(struct bcmstrbuf *strbuf, char *desc, uint num, uint div) +{ + uint q1, q2; + + if (!div) { + bcm_bprintf(strbuf, "%s N/A", desc); + } else { + q1 = num / div; + q2 = (100 * (num - (q1 * div))) / div; + bcm_bprintf(strbuf, "%s %d.%02d", desc, q1, q2); + } +} + +void +dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf) +{ + dhd_bus_t *bus = dhdp->bus; + + bcm_bprintf(strbuf, "Bus SDIO structure:\n"); + bcm_bprintf(strbuf, "hostintmask 0x%08x intstatus 0x%08x sdpcm_ver %d\n", + bus->hostintmask, bus->intstatus, bus->sdpcm_ver); + bcm_bprintf(strbuf, "fcstate %d qlen %d tx_seq %d, max %d, rxskip %d rxlen %d rx_seq %d\n", + bus->fcstate, pktq_len(&bus->txq), bus->tx_seq, bus->tx_max, bus->rxskip, + bus->rxlen, bus->rx_seq); + bcm_bprintf(strbuf, "intr %d intrcount %d lastintrs %d spurious %d\n", + bus->intr, bus->intrcount, bus->lastintrs, bus->spurious); + bcm_bprintf(strbuf, "pollrate %d pollcnt %d regfails %d\n", + bus->pollrate, bus->pollcnt, bus->regfails); + + bcm_bprintf(strbuf, "\nAdditional counters:\n"); + bcm_bprintf(strbuf, "tx_sderrs %d fcqueued %d rxrtx %d rx_toolong %d rxc_errors %d\n", + bus->tx_sderrs, bus->fcqueued, bus->rxrtx, bus->rx_toolong, + bus->rxc_errors); + bcm_bprintf(strbuf, "rx_hdrfail %d badhdr %d badseq %d\n", + bus->rx_hdrfail, bus->rx_badhdr, bus->rx_badseq); + bcm_bprintf(strbuf, "fc_rcvd %d, fc_xoff %d, fc_xon %d\n", + bus->fc_rcvd, bus->fc_xoff, bus->fc_xon); + bcm_bprintf(strbuf, "rxglomfail %d, rxglomframes %d, rxglompkts %d\n", + bus->rxglomfail, bus->rxglomframes, bus->rxglompkts); + bcm_bprintf(strbuf, "f2rx (hdrs/data) %d (%d/%d), f2tx %d f1regs %d\n", + (bus->f2rxhdrs + bus->f2rxdata), bus->f2rxhdrs, bus->f2rxdata, + bus->f2txdata, bus->f1regdata); + { + dhd_dump_pct(strbuf, "\nRx: pkts/f2rd", bus->dhd->rx_packets, + (bus->f2rxhdrs + bus->f2rxdata)); + dhd_dump_pct(strbuf, ", pkts/f1sd", bus->dhd->rx_packets, bus->f1regdata); + dhd_dump_pct(strbuf, ", pkts/sd", bus->dhd->rx_packets, + (bus->f2rxhdrs + bus->f2rxdata + bus->f1regdata)); + dhd_dump_pct(strbuf, ", pkts/int", bus->dhd->rx_packets, bus->intrcount); + bcm_bprintf(strbuf, "\n"); + + dhd_dump_pct(strbuf, "Rx: glom pct", (100 * bus->rxglompkts), + bus->dhd->rx_packets); + dhd_dump_pct(strbuf, ", pkts/glom", bus->rxglompkts, bus->rxglomframes); + bcm_bprintf(strbuf, "\n"); + + dhd_dump_pct(strbuf, "Tx: pkts/f2wr", bus->dhd->tx_packets, bus->f2txdata); + dhd_dump_pct(strbuf, ", pkts/f1sd", bus->dhd->tx_packets, bus->f1regdata); + dhd_dump_pct(strbuf, ", pkts/sd", bus->dhd->tx_packets, + (bus->f2txdata + bus->f1regdata)); + dhd_dump_pct(strbuf, ", pkts/int", bus->dhd->tx_packets, bus->intrcount); + bcm_bprintf(strbuf, "\n"); + + dhd_dump_pct(strbuf, "Total: pkts/f2rw", + (bus->dhd->tx_packets + bus->dhd->rx_packets), + (bus->f2txdata + bus->f2rxhdrs + bus->f2rxdata)); + dhd_dump_pct(strbuf, ", pkts/f1sd", + (bus->dhd->tx_packets + bus->dhd->rx_packets), bus->f1regdata); + dhd_dump_pct(strbuf, ", pkts/sd", + (bus->dhd->tx_packets + bus->dhd->rx_packets), + (bus->f2txdata + bus->f2rxhdrs + bus->f2rxdata + bus->f1regdata)); + dhd_dump_pct(strbuf, ", pkts/int", + (bus->dhd->tx_packets + bus->dhd->rx_packets), bus->intrcount); + bcm_bprintf(strbuf, "\n\n"); + } + +#ifdef SDTEST + if (bus->pktgen_count) { + bcm_bprintf(strbuf, "pktgen config and count:\n"); + bcm_bprintf(strbuf, "freq %d count %d print %d total %d min %d len %d\n", + bus->pktgen_freq, bus->pktgen_count, bus->pktgen_print, + bus->pktgen_total, bus->pktgen_minlen, bus->pktgen_maxlen); + bcm_bprintf(strbuf, "send attempts %d rcvd %d fail %d\n", + bus->pktgen_sent, bus->pktgen_rcvd, bus->pktgen_fail); + } +#endif /* SDTEST */ +#ifdef DHD_DEBUG + bcm_bprintf(strbuf, "dpc_sched %d host interrupt%spending\n", + bus->dpc_sched, (bcmsdh_intr_pending(bus->sdh) ? " " : " not ")); + bcm_bprintf(strbuf, "blocksize %d roundup %d\n", bus->blocksize, bus->roundup); +#endif /* DHD_DEBUG */ + bcm_bprintf(strbuf, "clkstate %d activity %d idletime %d idlecount %d sleeping %d\n", + bus->clkstate, bus->activity, bus->idletime, bus->idlecount, bus->sleeping); +} + +void +dhd_bus_clearcounts(dhd_pub_t *dhdp) +{ + dhd_bus_t *bus = (dhd_bus_t *)dhdp->bus; + + bus->intrcount = bus->lastintrs = bus->spurious = bus->regfails = 0; + bus->rxrtx = bus->rx_toolong = bus->rxc_errors = 0; + bus->rx_hdrfail = bus->rx_badhdr = bus->rx_badseq = 0; + bus->tx_sderrs = bus->fc_rcvd = bus->fc_xoff = bus->fc_xon = 0; + bus->rxglomfail = bus->rxglomframes = bus->rxglompkts = 0; + bus->f2rxhdrs = bus->f2rxdata = bus->f2txdata = bus->f1regdata = 0; +} + +#ifdef SDTEST +static int +dhdsdio_pktgen_get(dhd_bus_t *bus, uint8 *arg) +{ + dhd_pktgen_t pktgen; + + pktgen.version = DHD_PKTGEN_VERSION; + pktgen.freq = bus->pktgen_freq; + pktgen.count = bus->pktgen_count; + pktgen.print = bus->pktgen_print; + pktgen.total = bus->pktgen_total; + pktgen.minlen = bus->pktgen_minlen; + pktgen.maxlen = bus->pktgen_maxlen; + pktgen.numsent = bus->pktgen_sent; + pktgen.numrcvd = bus->pktgen_rcvd; + pktgen.numfail = bus->pktgen_fail; + pktgen.mode = bus->pktgen_mode; + pktgen.stop = bus->pktgen_stop; + + bcopy(&pktgen, arg, sizeof(pktgen)); + + return 0; +} + +static int +dhdsdio_pktgen_set(dhd_bus_t *bus, uint8 *arg) +{ + dhd_pktgen_t pktgen; + uint oldcnt, oldmode; + + bcopy(arg, &pktgen, sizeof(pktgen)); + if (pktgen.version != DHD_PKTGEN_VERSION) + return BCME_BADARG; + + oldcnt = bus->pktgen_count; + oldmode = bus->pktgen_mode; + + bus->pktgen_freq = pktgen.freq; + bus->pktgen_count = pktgen.count; + bus->pktgen_print = pktgen.print; + bus->pktgen_total = pktgen.total; + bus->pktgen_minlen = pktgen.minlen; + bus->pktgen_maxlen = pktgen.maxlen; + bus->pktgen_mode = pktgen.mode; + bus->pktgen_stop = pktgen.stop; + + bus->pktgen_tick = bus->pktgen_ptick = 0; + bus->pktgen_len = MAX(bus->pktgen_len, bus->pktgen_minlen); + bus->pktgen_len = MIN(bus->pktgen_len, bus->pktgen_maxlen); + + /* Clear counts for a new pktgen (mode change, or was stopped) */ + if (bus->pktgen_count && (!oldcnt || oldmode != bus->pktgen_mode)) + bus->pktgen_sent = bus->pktgen_rcvd = bus->pktgen_fail = 0; + + return 0; +} +#endif /* SDTEST */ + +static int +dhdsdio_membytes(dhd_bus_t *bus, bool write, uint32 address, uint8 *data, uint size) +{ + int bcmerror = 0; + uint32 sdaddr; + uint dsize; + + /* Determine initial transfer parameters */ + sdaddr = address & SBSDIO_SB_OFT_ADDR_MASK; + if ((sdaddr + size) & SBSDIO_SBWINDOW_MASK) + dsize = (SBSDIO_SB_OFT_ADDR_LIMIT - sdaddr); + else + dsize = size; + + /* Set the backplane window to include the start address */ + if ((bcmerror = dhdsdio_set_siaddr_window(bus, address))) { + DHD_ERROR(("%s: window change failed\n", __FUNCTION__)); + goto xfer_done; + } + + /* Do the transfer(s) */ + while (size) { + DHD_INFO(("%s: %s %d bytes at offset 0x%08x in window 0x%08x\n", + __FUNCTION__, (write ? "write" : "read"), dsize, sdaddr, + (address & SBSDIO_SBWINDOW_MASK))); + if ((bcmerror = bcmsdh_rwdata(bus->sdh, write, sdaddr, data, dsize))) { + DHD_ERROR(("%s: membytes transfer failed\n", __FUNCTION__)); + break; + } + + /* Adjust for next transfer (if any) */ + if ((size -= dsize)) { + data += dsize; + address += dsize; + if ((bcmerror = dhdsdio_set_siaddr_window(bus, address))) { + DHD_ERROR(("%s: window change failed\n", __FUNCTION__)); + break; + } + sdaddr = 0; + dsize = MIN(SBSDIO_SB_OFT_ADDR_LIMIT, size); + } + } + +xfer_done: + /* Return the window to backplane enumeration space for core access */ + if (dhdsdio_set_siaddr_window(bus, bcmsdh_cur_sbwad(bus->sdh))) { + DHD_ERROR(("%s: FAILED to set window back to 0x%x\n", __FUNCTION__, + bcmsdh_cur_sbwad(bus->sdh))); + } + + return bcmerror; +} + +#ifdef DHD_DEBUG_TRAP +static int +dhdsdio_readshared(dhd_bus_t *bus, sdpcm_shared_t *sh) +{ + uint32 addr; + int rv; + + /* Read last word in memory to determine address of sdpcm_shared structure */ + if ((rv = dhdsdio_membytes(bus, FALSE, bus->ramsize - 4, (uint8 *)&addr, 4)) < 0) + return rv; + + addr = ltoh32(addr); + + DHD_INFO(("sdpcm_shared address 0x%08X\n", addr)); + + /* + * Check if addr is valid. + * NVRAM length at the end of memory should have been overwritten. + */ + if (addr == 0 || ((~addr >> 16) & 0xffff) == (addr & 0xffff)) { + DHD_ERROR(("%s: address (0x%08x) of sdpcm_shared invalid\n", __FUNCTION__, addr)); + return BCME_ERROR; + } + + /* Read hndrte_shared structure */ + if ((rv = dhdsdio_membytes(bus, FALSE, addr, (uint8 *)sh, sizeof(sdpcm_shared_t))) < 0) + return rv; + + /* Endianness */ + sh->flags = ltoh32(sh->flags); + sh->trap_addr = ltoh32(sh->trap_addr); + sh->assert_exp_addr = ltoh32(sh->assert_exp_addr); + sh->assert_file_addr = ltoh32(sh->assert_file_addr); + sh->assert_line = ltoh32(sh->assert_line); + sh->console_addr = ltoh32(sh->console_addr); + sh->msgtrace_addr = ltoh32(sh->msgtrace_addr); + + if ((sh->flags & SDPCM_SHARED_VERSION_MASK) != SDPCM_SHARED_VERSION) { + DHD_ERROR(("%s: sdpcm_shared version %d in dhd " + "is different than sdpcm_shared version %d in dongle\n", + __FUNCTION__, SDPCM_SHARED_VERSION, + sh->flags & SDPCM_SHARED_VERSION_MASK)); + return BCME_ERROR; + } + + return BCME_OK; +} + +static int +dhdsdio_checkdied(dhd_bus_t *bus, uint8 *data, uint size) +{ + int bcmerror = 0; + uint msize = 512; + char *mbuffer = NULL; + uint maxstrlen = 256; + char *str = NULL; + trap_t tr; + sdpcm_shared_t sdpcm_shared; + struct bcmstrbuf strbuf; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (data == NULL) { + /* + * Called after a rx ctrl timeout. "data" is NULL. + * allocate memory to trace the trap or assert. + */ + size = msize; + mbuffer = data = MALLOC(bus->dhd->osh, msize); + if (mbuffer == NULL) { + DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, msize)); + bcmerror = BCME_NOMEM; + goto done; + } + } + + if ((str = MALLOC(bus->dhd->osh, maxstrlen)) == NULL) { + DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, maxstrlen)); + bcmerror = BCME_NOMEM; + goto done; + } + + if ((bcmerror = dhdsdio_readshared(bus, &sdpcm_shared)) < 0) + goto done; + + bcm_binit(&strbuf, data, size); + + bcm_bprintf(&strbuf, "msgtrace address : 0x%08X\nconsole address : 0x%08X\n", + sdpcm_shared.msgtrace_addr, sdpcm_shared.console_addr); + + if ((sdpcm_shared.flags & SDPCM_SHARED_ASSERT_BUILT) == 0) { + /* NOTE: Misspelled assert is intentional - DO NOT FIX. + * (Avoids conflict with real asserts for programmatic parsing of output.) + */ + bcm_bprintf(&strbuf, "Assrt not built in dongle\n"); + } + + if ((sdpcm_shared.flags & (SDPCM_SHARED_ASSERT|SDPCM_SHARED_TRAP)) == 0) { + /* NOTE: Misspelled assert is intentional - DO NOT FIX. + * (Avoids conflict with real asserts for programmatic parsing of output.) + */ + bcm_bprintf(&strbuf, "No trap%s in dongle", + (sdpcm_shared.flags & SDPCM_SHARED_ASSERT_BUILT) + ?"/assrt" :""); + } else { + if (sdpcm_shared.flags & SDPCM_SHARED_ASSERT) { + /* Download assert */ + bcm_bprintf(&strbuf, "Dongle assert"); + if (sdpcm_shared.assert_exp_addr != 0) { + str[0] = '\0'; + if ((bcmerror = dhdsdio_membytes(bus, FALSE, + sdpcm_shared.assert_exp_addr, + (uint8 *)str, maxstrlen)) < 0) + goto done; + + str[maxstrlen - 1] = '\0'; + bcm_bprintf(&strbuf, " expr \"%s\"", str); + } + + if (sdpcm_shared.assert_file_addr != 0) { + str[0] = '\0'; + if ((bcmerror = dhdsdio_membytes(bus, FALSE, + sdpcm_shared.assert_file_addr, + (uint8 *)str, maxstrlen)) < 0) + goto done; + + str[maxstrlen - 1] = '\0'; + bcm_bprintf(&strbuf, " file \"%s\"", str); + } + + bcm_bprintf(&strbuf, " line %d ", sdpcm_shared.assert_line); + } + + if (sdpcm_shared.flags & SDPCM_SHARED_TRAP) { + if ((bcmerror = dhdsdio_membytes(bus, FALSE, + sdpcm_shared.trap_addr, + (uint8*)&tr, sizeof(trap_t))) < 0) + goto done; + + bcm_bprintf(&strbuf, + "Dongle trap type 0x%x @ epc 0x%x, cpsr 0x%x, spsr 0x%x, sp 0x%x," + "lp 0x%x, rpc 0x%x Trap offset 0x%x, " + "r0 0x%x, r1 0x%x, r2 0x%x, r3 0x%x, r4 0x%x, r5 0x%x, r6 0x%x, r7 0x%x\n", + tr.type, tr.epc, tr.cpsr, tr.spsr, tr.r13, tr.r14, tr.pc, + sdpcm_shared.trap_addr, + tr.r0, tr.r1, tr.r2, tr.r3, tr.r4, tr.r5, tr.r6, tr.r7); + } + } + + if (sdpcm_shared.flags & (SDPCM_SHARED_ASSERT | SDPCM_SHARED_TRAP)) { + DHD_ERROR(("%s: %s\n", __FUNCTION__, strbuf.origbuf)); + } + + if (sdpcm_shared.flags & SDPCM_SHARED_TRAP) { + /* Mem dump to a file on device */ + dhdsdio_mem_dump(bus); + } + +done: + if (mbuffer) + MFREE(bus->dhd->osh, mbuffer, msize); + if (str) + MFREE(bus->dhd->osh, str, maxstrlen); + + return bcmerror; +} + +static int +dhdsdio_mem_dump(dhd_bus_t *bus) +{ + int ret = 0; + int size; /* Full mem size */ + int start = 0; /* Start address */ + int read_size = 0; /* Read size of each iteration */ + uint8 *buf = NULL, *databuf = NULL; + + /* Get full mem size */ + size = bus->ramsize; + buf = MALLOC(bus->dhd->osh, size); + if (!buf) { + printf("%s: Out of memory (%d bytes)\n", __FUNCTION__, size); + return -1; + } + + /* Read mem content */ + printf("Dump dongle memory"); + databuf = buf; + while (size) + { + read_size = MIN(MEMBLOCK, size); + if ((ret = dhdsdio_membytes(bus, FALSE, start, databuf, read_size))) + { + printf("%s: Error membytes %d\n", __FUNCTION__, ret); + if (buf) { + MFREE(bus->dhd->osh, buf, size); + } + return -1; + } + printf("."); + + /* Decrement size and increment start address */ + size -= read_size; + start += read_size; + databuf += read_size; + } + printf("Done\n"); + +#ifdef DHD_DEBUG + /* free buf before return !!! */ + if (write_to_file(bus->dhd, buf, bus->ramsize)) + { + printf("%s: Error writing to files\n", __FUNCTION__); + return -1; + } + /* buf free handled in write_to_file, not here */ +#else + MFREE(bus->dhd->osh, buf, size); +#endif + return 0; +} +#endif /* DHD_DEBUG_TRAP */ + +#ifdef DHD_DEBUG +#define CONSOLE_LINE_MAX 192 + +static int +dhdsdio_readconsole(dhd_bus_t *bus) +{ + dhd_console_t *c = &bus->console; + uint8 line[CONSOLE_LINE_MAX], ch; + uint32 n, idx, addr; + int rv; + + /* Don't do anything until FWREADY updates console address */ + if (bus->console_addr == 0) + return 0; + + /* Read console log struct */ + addr = bus->console_addr + OFFSETOF(hndrte_cons_t, log); + if ((rv = dhdsdio_membytes(bus, FALSE, addr, (uint8 *)&c->log, sizeof(c->log))) < 0) + return rv; + + /* Allocate console buffer (one time only) */ + if (c->buf == NULL) { + c->bufsize = ltoh32(c->log.buf_size); + if ((c->buf = MALLOC(bus->dhd->osh, c->bufsize)) == NULL) + return BCME_NOMEM; + } + + idx = ltoh32(c->log.idx); + + /* Protect against corrupt value */ + if (idx > c->bufsize) + return BCME_ERROR; + + /* Skip reading the console buffer if the index pointer has not moved */ + if (idx == c->last) + return BCME_OK; + + /* Read the console buffer */ + addr = ltoh32(c->log.buf); + if ((rv = dhdsdio_membytes(bus, FALSE, addr, c->buf, c->bufsize)) < 0) + return rv; + + while (c->last != idx) { + for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) { + if (c->last == idx) { + /* This would output a partial line. Instead, back up + * the buffer pointer and output this line next time around. + */ + if (c->last >= n) + c->last -= n; + else + c->last = c->bufsize - n; + goto break2; + } + ch = c->buf[c->last]; + c->last = (c->last + 1) % c->bufsize; + if (ch == '\n') + break; + line[n] = ch; + } + + if (n > 0) { + if (line[n - 1] == '\r') + n--; + line[n] = 0; + printf("CONSOLE: %s\n", line); + } + } +break2: + + return BCME_OK; +} +#endif /* DHD_DEBUG */ + +int +dhdsdio_downloadvars(dhd_bus_t *bus, void *arg, int len) +{ + int bcmerror = BCME_OK; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + /* Basic sanity checks */ + if (bus->dhd->up) { + bcmerror = BCME_NOTDOWN; + goto err; + } + if (!len) { + bcmerror = BCME_BUFTOOSHORT; + goto err; + } + + /* Free the old ones and replace with passed variables */ + if (bus->vars) + MFREE(bus->dhd->osh, bus->vars, bus->varsz); + + bus->vars = MALLOC(bus->dhd->osh, len); + bus->varsz = bus->vars ? len : 0; + if (bus->vars == NULL) { + bcmerror = BCME_NOMEM; + goto err; + } + + /* Copy the passed variables, which should include the terminating double-null */ + bcopy(arg, bus->vars, bus->varsz); +err: + return bcmerror; +} + +static int +dhdsdio_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, const char *name, + void *params, int plen, void *arg, int len, int val_size) +{ + int bcmerror = 0; + int32 int_val = 0; + bool bool_val = 0; + + DHD_TRACE(("%s: Enter, action %d name %s params %p plen %d arg %p len %d val_size %d\n", + __FUNCTION__, actionid, name, params, plen, arg, len, val_size)); + + if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0) + goto exit; + + if (plen >= (int)sizeof(int_val)) + bcopy(params, &int_val, sizeof(int_val)); + + bool_val = (int_val != 0) ? TRUE : FALSE; + + + /* Some ioctls use the bus */ + dhd_os_sdlock(bus->dhd); + + /* Check if dongle is in reset. If so, only allow DEVRESET iovars */ + if (bus->dhd->dongle_reset && !(actionid == IOV_SVAL(IOV_DEVRESET) || + actionid == IOV_GVAL(IOV_DEVRESET))) { + bcmerror = BCME_NOTREADY; + goto exit; + } + + /* Handle sleep stuff before any clock mucking */ + if (vi->varid == IOV_SLEEP) { + if (IOV_ISSET(actionid)) { + bcmerror = dhdsdio_bussleep(bus, bool_val); + } else { + int_val = (int32)bus->sleeping; + bcopy(&int_val, arg, val_size); + } + goto exit; + } + + /* Request clock to allow SDIO accesses */ + if (!bus->dhd->dongle_reset) { + BUS_WAKE(bus); + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + } + + switch (actionid) { + case IOV_GVAL(IOV_INTR): + int_val = (int32)bus->intr; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_INTR): + bus->intr = bool_val; + bus->intdis = FALSE; + if (bus->dhd->up) { + if (bus->intr) { + DHD_INTR(("%s: enable SDIO device interrupts\n", __FUNCTION__)); + bcmsdh_intr_enable(bus->sdh); + } else { + DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__)); + bcmsdh_intr_disable(bus->sdh); + } + } + break; + + case IOV_GVAL(IOV_POLLRATE): + int_val = (int32)bus->pollrate; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_POLLRATE): + bus->pollrate = (uint)int_val; + bus->poll = (bus->pollrate != 0); + break; + + case IOV_GVAL(IOV_IDLETIME): + int_val = bus->idletime; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_IDLETIME): + if ((int_val < 0) && (int_val != DHD_IDLE_IMMEDIATE)) { + bcmerror = BCME_BADARG; + } else { + bus->idletime = int_val; + } + break; + + case IOV_GVAL(IOV_IDLECLOCK): + int_val = (int32)bus->idleclock; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_IDLECLOCK): + bus->idleclock = int_val; + break; + + case IOV_GVAL(IOV_SD1IDLE): + int_val = (int32)sd1idle; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_SD1IDLE): + sd1idle = bool_val; + break; + + + case IOV_SVAL(IOV_MEMBYTES): + case IOV_GVAL(IOV_MEMBYTES): + { + uint32 address; + uint size, dsize; + uint8 *data; + + bool set = (actionid == IOV_SVAL(IOV_MEMBYTES)); + + ASSERT(plen >= 2*sizeof(int)); + + address = (uint32)int_val; + bcopy((char *)params + sizeof(int_val), &int_val, sizeof(int_val)); + size = (uint)int_val; + + /* Do some validation */ + dsize = set ? plen - (2 * sizeof(int)) : len; + if (dsize < size) { + DHD_ERROR(("%s: error on %s membytes, addr 0x%08x size %d dsize %d\n", + __FUNCTION__, (set ? "set" : "get"), address, size, dsize)); + bcmerror = BCME_BADARG; + break; + } + + DHD_INFO(("%s: Request to %s %d bytes at address 0x%08x\n", __FUNCTION__, + (set ? "write" : "read"), size, address)); + + /* If we know about SOCRAM, check for a fit */ + if ((bus->orig_ramsize) && + ((address > bus->orig_ramsize) || (address + size > bus->orig_ramsize))) { + DHD_ERROR(("%s: ramsize 0x%08x doesn't have %d bytes at 0x%08x\n", + __FUNCTION__, bus->orig_ramsize, size, address)); + bcmerror = BCME_BADARG; + break; + } + + /* Generate the actual data pointer */ + data = set ? (uint8*)params + 2 * sizeof(int): (uint8*)arg; + + /* Call to do the transfer */ + bcmerror = dhdsdio_membytes(bus, set, address, data, size); + + break; + } + + case IOV_GVAL(IOV_MEMSIZE): + int_val = (int32)bus->ramsize; + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_SDIOD_DRIVE): + int_val = (int32)dhd_sdiod_drive_strength; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_SDIOD_DRIVE): + dhd_sdiod_drive_strength = int_val; + si_sdiod_drive_strength_init(bus->sih, bus->dhd->osh, dhd_sdiod_drive_strength); + break; + + case IOV_SVAL(IOV_DOWNLOAD): + bcmerror = dhdsdio_download_state(bus, bool_val); + break; + + case IOV_SVAL(IOV_VARS): + bcmerror = dhdsdio_downloadvars(bus, arg, len); + break; + + case IOV_GVAL(IOV_READAHEAD): + int_val = (int32)dhd_readahead; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_READAHEAD): + if (bool_val && !dhd_readahead) + bus->nextlen = 0; + dhd_readahead = bool_val; + break; + + case IOV_GVAL(IOV_SDRXCHAIN): + int_val = (int32)bus->use_rxchain; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_SDRXCHAIN): + if (bool_val && !bus->sd_rxchain) + bcmerror = BCME_UNSUPPORTED; + else + bus->use_rxchain = bool_val; + break; + case IOV_GVAL(IOV_ALIGNCTL): + int_val = (int32)dhd_alignctl; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_ALIGNCTL): + dhd_alignctl = bool_val; + break; + + case IOV_GVAL(IOV_SDALIGN): + int_val = DHD_SDALIGN; + bcopy(&int_val, arg, val_size); + break; + +#ifdef DHD_DEBUG + case IOV_GVAL(IOV_VARS): + if (bus->varsz < (uint)len) + bcopy(bus->vars, arg, bus->varsz); + else + bcmerror = BCME_BUFTOOSHORT; + break; +#endif /* DHD_DEBUG */ + +#ifdef DHD_DEBUG + case IOV_GVAL(IOV_SDREG): + { + sdreg_t *sd_ptr; + uint32 addr, size; + + sd_ptr = (sdreg_t *)params; + + addr = (uintptr)bus->regs + sd_ptr->offset; + size = sd_ptr->func; + int_val = (int32)bcmsdh_reg_read(bus->sdh, addr, size); + if (bcmsdh_regfail(bus->sdh)) + bcmerror = BCME_SDIO_ERROR; + bcopy(&int_val, arg, sizeof(int32)); + break; + } + + case IOV_SVAL(IOV_SDREG): + { + sdreg_t *sd_ptr; + uint32 addr, size; + + sd_ptr = (sdreg_t *)params; + + addr = (uintptr)bus->regs + sd_ptr->offset; + size = sd_ptr->func; + bcmsdh_reg_write(bus->sdh, addr, size, sd_ptr->value); + if (bcmsdh_regfail(bus->sdh)) + bcmerror = BCME_SDIO_ERROR; + break; + } + + /* Same as above, but offset is not backplane (not SDIO core) */ + case IOV_GVAL(IOV_SBREG): + { + sdreg_t sdreg; + uint32 addr, size; + + bcopy(params, &sdreg, sizeof(sdreg)); + + addr = SI_ENUM_BASE + sdreg.offset; + size = sdreg.func; + int_val = (int32)bcmsdh_reg_read(bus->sdh, addr, size); + if (bcmsdh_regfail(bus->sdh)) + bcmerror = BCME_SDIO_ERROR; + bcopy(&int_val, arg, sizeof(int32)); + break; + } + + case IOV_SVAL(IOV_SBREG): + { + sdreg_t sdreg; + uint32 addr, size; + + bcopy(params, &sdreg, sizeof(sdreg)); + + addr = SI_ENUM_BASE + sdreg.offset; + size = sdreg.func; + bcmsdh_reg_write(bus->sdh, addr, size, sdreg.value); + if (bcmsdh_regfail(bus->sdh)) + bcmerror = BCME_SDIO_ERROR; + break; + } + + case IOV_GVAL(IOV_SDCIS): + { + *(char *)arg = 0; + + bcmstrcat(arg, "\nFunc 0\n"); + bcmsdh_cis_read(bus->sdh, 0x10, (uint8 *)arg + strlen(arg), SBSDIO_CIS_SIZE_LIMIT); + bcmstrcat(arg, "\nFunc 1\n"); + bcmsdh_cis_read(bus->sdh, 0x11, (uint8 *)arg + strlen(arg), SBSDIO_CIS_SIZE_LIMIT); + bcmstrcat(arg, "\nFunc 2\n"); + bcmsdh_cis_read(bus->sdh, 0x12, (uint8 *)arg + strlen(arg), SBSDIO_CIS_SIZE_LIMIT); + break; + } + + case IOV_GVAL(IOV_FORCEEVEN): + int_val = (int32)forcealign; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_FORCEEVEN): + forcealign = bool_val; + break; + + case IOV_GVAL(IOV_TXBOUND): + int_val = (int32)dhd_txbound; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_TXBOUND): + dhd_txbound = (uint)int_val; + break; + + case IOV_GVAL(IOV_RXBOUND): + int_val = (int32)dhd_rxbound; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_RXBOUND): + dhd_rxbound = (uint)int_val; + break; + + case IOV_GVAL(IOV_TXMINMAX): + int_val = (int32)dhd_txminmax; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_TXMINMAX): + dhd_txminmax = (uint)int_val; + break; + + + +#endif /* DHD_DEBUG */ + + +#ifdef SDTEST + case IOV_GVAL(IOV_EXTLOOP): + int_val = (int32)bus->ext_loop; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_EXTLOOP): + bus->ext_loop = bool_val; + break; + + case IOV_GVAL(IOV_PKTGEN): + bcmerror = dhdsdio_pktgen_get(bus, arg); + break; + + case IOV_SVAL(IOV_PKTGEN): + bcmerror = dhdsdio_pktgen_set(bus, arg); + break; +#endif /* SDTEST */ + + + case IOV_SVAL(IOV_DEVRESET): + DHD_TRACE(("%s: Called set IOV_DEVRESET=%d dongle_reset=%d busstate=%d\n", + __FUNCTION__, bool_val, bus->dhd->dongle_reset, + bus->dhd->busstate)); + + ASSERT(bus->dhd->osh); + /* ASSERT(bus->cl_devid); */ + + dhd_bus_devreset(bus->dhd, (uint8)bool_val); + + break; + + case IOV_GVAL(IOV_DEVRESET): + DHD_TRACE(("%s: Called get IOV_DEVRESET\n", __FUNCTION__)); + + /* Get its status */ + int_val = (bool) bus->dhd->dongle_reset; + bcopy(&int_val, arg, val_size); + + break; + + default: + bcmerror = BCME_UNSUPPORTED; + break; + } + +exit: + if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) { + bus->activity = FALSE; + dhdsdio_clkctl(bus, CLK_NONE, TRUE); + } + + dhd_os_sdunlock(bus->dhd); + + if (actionid == IOV_SVAL(IOV_DEVRESET) && bool_val == FALSE) + dhd_preinit_ioctls((dhd_pub_t *) bus->dhd); + + return bcmerror; +} + +static int +dhdsdio_write_vars(dhd_bus_t *bus) +{ + int bcmerror = 0; + uint32 varsize; + uint32 varaddr; + uint8 *vbuffer; + uint32 varsizew; +#ifdef DHD_DEBUG + char *nvram_ularray; +#endif /* DHD_DEBUG */ + + /* Even if there are no vars are to be written, we still need to set the ramsize. */ + varsize = bus->varsz ? ROUNDUP(bus->varsz, 4) : 0; + varaddr = (bus->ramsize - 4) - varsize; + + if (bus->vars) { + vbuffer = (uint8 *)MALLOC(bus->dhd->osh, varsize); + if (!vbuffer) + return BCME_NOMEM; + + bzero(vbuffer, varsize); + bcopy(bus->vars, vbuffer, bus->varsz); + + /* Write the vars list */ + bcmerror = dhdsdio_membytes(bus, TRUE, varaddr, vbuffer, varsize); +#ifdef DHD_DEBUG + /* Verify NVRAM bytes */ + DHD_INFO(("Compare NVRAM dl & ul; varsize=%d\n", varsize)); + nvram_ularray = (char*)MALLOC(bus->dhd->osh, varsize); + if (!nvram_ularray) + return BCME_NOMEM; + + /* Upload image to verify downloaded contents. */ + memset(nvram_ularray, 0xaa, varsize); + + /* Read the vars list to temp buffer for comparison */ + bcmerror = dhdsdio_membytes(bus, FALSE, varaddr, nvram_ularray, varsize); + if (bcmerror) { + DHD_ERROR(("%s: error %d on reading %d nvram bytes at 0x%08x\n", + __FUNCTION__, bcmerror, varsize, varaddr)); + } + /* Compare the org NVRAM with the one read from RAM */ + if (memcmp(vbuffer, nvram_ularray, varsize)) { + DHD_ERROR(("%s: Downloaded NVRAM image is corrupted.\n", __FUNCTION__)); + } else + DHD_ERROR(("%s: Download, Upload and compare of NVRAM succeeded.\n", + __FUNCTION__)); + + MFREE(bus->dhd->osh, nvram_ularray, varsize); +#endif /* DHD_DEBUG */ + + MFREE(bus->dhd->osh, vbuffer, varsize); + } + + /* adjust to the user specified RAM */ + DHD_INFO(("Physical memory size: %d, usable memory size: %d\n", + bus->orig_ramsize, bus->ramsize)); + DHD_INFO(("Vars are at %d, orig varsize is %d\n", + varaddr, varsize)); + varsize = ((bus->orig_ramsize - 4) - varaddr); + + /* + * Determine the length token: + * Varsize, converted to words, in lower 16-bits, checksum in upper 16-bits. + */ + if (bcmerror) { + varsizew = 0; + } else { + varsizew = varsize / 4; + varsizew = (~varsizew << 16) | (varsizew & 0x0000FFFF); + varsizew = htol32(varsizew); + } + + DHD_INFO(("New varsize is %d, length token=0x%08x\n", varsize, varsizew)); + + /* Write the length token to the last word */ + bcmerror = dhdsdio_membytes(bus, TRUE, (bus->orig_ramsize - 4), + (uint8*)&varsizew, 4); + + return bcmerror; +} + +static int +dhdsdio_download_state(dhd_bus_t *bus, bool enter) +{ + uint retries; + int bcmerror = 0; + + /* To enter download state, disable ARM and reset SOCRAM. + * To exit download state, simply reset ARM (default is RAM boot). + */ + if (enter) { + + bus->alp_only = TRUE; + + if (!(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) && + !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) { + DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + + si_core_disable(bus->sih, 0); + if (bcmsdh_regfail(bus->sdh)) { + bcmerror = BCME_SDIO_ERROR; + goto fail; + } + + if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) { + DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + + si_core_reset(bus->sih, 0, 0); + if (bcmsdh_regfail(bus->sdh)) { + DHD_ERROR(("%s: Failure trying reset SOCRAM core?\n", __FUNCTION__)); + bcmerror = BCME_SDIO_ERROR; + goto fail; + } + + /* Clear the top bit of memory */ + if (bus->ramsize) { + uint32 zeros = 0; + dhdsdio_membytes(bus, TRUE, bus->ramsize - 4, (uint8*)&zeros, 4); + } + } else { + if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) { + DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + + if (!si_iscoreup(bus->sih)) { + DHD_ERROR(("%s: SOCRAM core is down after reset?\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + + if ((bcmerror = dhdsdio_write_vars(bus))) { + DHD_ERROR(("%s: no vars written to RAM\n", __FUNCTION__)); + bcmerror = 0; + } + + if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0) && + !si_setcore(bus->sih, SDIOD_CORE_ID, 0)) { + DHD_ERROR(("%s: Can't change back to SDIO core?\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + W_SDREG(0xFFFFFFFF, &bus->regs->intstatus, retries); + + + if (!(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) && + !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) { + DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + + si_core_reset(bus->sih, 0, 0); + if (bcmsdh_regfail(bus->sdh)) { + DHD_ERROR(("%s: Failure trying to reset ARM core?\n", __FUNCTION__)); + bcmerror = BCME_SDIO_ERROR; + goto fail; + } + + /* Allow HT Clock now that the ARM is running. */ + bus->alp_only = FALSE; + + bus->dhd->busstate = DHD_BUS_LOAD; + } + +fail: + /* Always return to SDIOD core */ + if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0)) + si_setcore(bus->sih, SDIOD_CORE_ID, 0); + + return bcmerror; +} + +int +dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name, + void *params, int plen, void *arg, int len, bool set) +{ + dhd_bus_t *bus = dhdp->bus; + const bcm_iovar_t *vi = NULL; + int bcmerror = 0; + int val_size; + uint32 actionid; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + ASSERT(name); + ASSERT(len >= 0); + + /* Get MUST have return space */ + ASSERT(set || (arg && len)); + + /* Set does NOT take qualifiers */ + ASSERT(!set || (!params && !plen)); + + /* Look up var locally; if not found pass to host driver */ + if ((vi = bcm_iovar_lookup(dhdsdio_iovars, name)) == NULL) { + dhd_os_sdlock(bus->dhd); + + BUS_WAKE(bus); + + /* Turn on clock in case SD command needs backplane */ + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + + bcmerror = bcmsdh_iovar_op(bus->sdh, name, params, plen, arg, len, set); + + /* Check for bus configuration changes of interest */ + + /* If it was divisor change, read the new one */ + if (set && strcmp(name, "sd_divisor") == 0) { + if (bcmsdh_iovar_op(bus->sdh, "sd_divisor", NULL, 0, + &bus->sd_divisor, sizeof(int32), FALSE) != BCME_OK) { + bus->sd_divisor = -1; + DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, name)); + } else { + DHD_INFO(("%s: noted %s update, value now %d\n", + __FUNCTION__, name, bus->sd_divisor)); + } + } + /* If it was a mode change, read the new one */ + if (set && strcmp(name, "sd_mode") == 0) { + if (bcmsdh_iovar_op(bus->sdh, "sd_mode", NULL, 0, + &bus->sd_mode, sizeof(int32), FALSE) != BCME_OK) { + bus->sd_mode = -1; + DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, name)); + } else { + DHD_INFO(("%s: noted %s update, value now %d\n", + __FUNCTION__, name, bus->sd_mode)); + } + } + /* Similar check for blocksize change */ + if (set && strcmp(name, "sd_blocksize") == 0) { + int32 fnum = 2; + if (bcmsdh_iovar_op(bus->sdh, "sd_blocksize", &fnum, sizeof(int32), + &bus->blocksize, sizeof(int32), FALSE) != BCME_OK) { + bus->blocksize = 0; + DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_blocksize")); + } else { + DHD_INFO(("%s: noted %s update, value now %d\n", + __FUNCTION__, "sd_blocksize", bus->blocksize)); + } + } + bus->roundup = MIN(max_roundup, bus->blocksize); + + if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) { + bus->activity = FALSE; + dhdsdio_clkctl(bus, CLK_NONE, TRUE); + } + + dhd_os_sdunlock(bus->dhd); + goto exit; + } + + DHD_CTL(("%s: %s %s, len %d plen %d\n", __FUNCTION__, + name, (set ? "set" : "get"), len, plen)); + + /* set up 'params' pointer in case this is a set command so that + * the convenience int and bool code can be common to set and get + */ + if (params == NULL) { + params = arg; + plen = len; + } + + if (vi->type == IOVT_VOID) + val_size = 0; + else if (vi->type == IOVT_BUFFER) + val_size = len; + else + /* all other types are integer sized */ + val_size = sizeof(int); + + actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid); + bcmerror = dhdsdio_doiovar(bus, vi, actionid, name, params, plen, arg, len, val_size); + +exit: + return bcmerror; +} + +void +dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex) +{ + osl_t *osh = bus->dhd->osh; + uint32 local_hostintmask; + uint8 saveclk; + uint retries; + int err; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (enforce_mutex) + dhd_os_sdlock(bus->dhd); + + BUS_WAKE(bus); + + /* Enable clock for device interrupts */ + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + + /* Disable and clear interrupts at the chip level also */ + W_SDREG(0, &bus->regs->hostintmask, retries); + local_hostintmask = bus->hostintmask; + bus->hostintmask = 0; + + /* Change our idea of bus state */ + bus->dhd->busstate = DHD_BUS_DOWN; + + /* Force clocks on backplane to be sure F2 interrupt propagates */ + saveclk = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err); + if (!err) { + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, + (saveclk | SBSDIO_FORCE_HT), &err); + } + if (err) { + DHD_ERROR(("%s: Failed to force clock for F2: err %d\n", __FUNCTION__, err)); + } + + /* Turn off the bus (F2), free any pending packets */ + DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__)); + bcmsdh_intr_disable(bus->sdh); + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, SDIO_FUNC_ENABLE_1, NULL); + + /* Clear any pending interrupts now that F2 is disabled */ + W_SDREG(local_hostintmask, &bus->regs->intstatus, retries); + + /* Turn off the backplane clock (only) */ + dhdsdio_clkctl(bus, CLK_SDONLY, FALSE); + + /* Clear the data packet queues */ + pktq_flush(osh, &bus->txq, TRUE); + + /* Clear any held glomming stuff */ + if (bus->glomd) + PKTFREE(osh, bus->glomd, FALSE); + + if (bus->glom) + PKTFREE(osh, bus->glom, FALSE); + + bus->glom = bus->glomd = NULL; + + /* Clear rx control and wake any waiters */ + bus->rxlen = 0; + dhd_os_ioctl_resp_wake(bus->dhd); + + /* Reset some F2 state stuff */ + bus->rxskip = FALSE; + bus->tx_seq = bus->rx_seq = 0; + + if (enforce_mutex) + dhd_os_sdunlock(bus->dhd); +} + +int +dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex) +{ + dhd_bus_t *bus = dhdp->bus; + dhd_timeout_t tmo; + uint retries = 0; + uint8 ready, enable; + int err, ret = 0; + uint8 saveclk; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + ASSERT(bus->dhd); + if (!bus->dhd) + return 0; + + if (enforce_mutex) + dhd_os_sdlock(bus->dhd); + + /* Make sure backplane clock is on, needed to generate F2 interrupt */ + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + if (bus->clkstate != CLK_AVAIL) + goto exit; + + + /* Force clocks on backplane to be sure F2 interrupt propagates */ + saveclk = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err); + if (!err) { + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, + (saveclk | SBSDIO_FORCE_HT), &err); + } + if (err) { + DHD_ERROR(("%s: Failed to force clock for F2: err %d\n", __FUNCTION__, err)); + goto exit; + } + + /* Enable function 2 (frame transfers) */ + W_SDREG((SDPCM_PROT_VERSION << SMB_DATA_VERSION_SHIFT), + &bus->regs->tosbmailboxdata, retries); + enable = (SDIO_FUNC_ENABLE_1 | SDIO_FUNC_ENABLE_2); + + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, enable, NULL); + + /* Give the dongle some time to do its thing and set IOR2 */ + dhd_timeout_start(&tmo, DHD_WAIT_F2RDY * 1000); + + ready = 0; + while (ready != enable && !dhd_timeout_expired(&tmo)) + ready = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IORDY, NULL); + + + DHD_INFO(("%s: enable 0x%02x, ready 0x%02x (waited %uus)\n", + __FUNCTION__, enable, ready, tmo.elapsed)); + + + /* If F2 successfully enabled, set core and enable interrupts */ + if (ready == enable) { + /* Make sure we're talking to the core. */ + if (!(bus->regs = si_setcore(bus->sih, PCMCIA_CORE_ID, 0))) + bus->regs = si_setcore(bus->sih, SDIOD_CORE_ID, 0); + + /* Set up the interrupt mask and enable interrupts */ + bus->hostintmask = HOSTINTMASK; + W_SDREG(bus->hostintmask, &bus->regs->hostintmask, retries); + + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_WATERMARK, (uint8)watermark, &err); + + /* Set bus state according to enable result */ + dhdp->busstate = DHD_BUS_DATA; + + /* bcmsdh_intr_unmask(bus->sdh); */ + + bus->intdis = FALSE; + if (bus->intr) { + DHD_INTR(("%s: enable SDIO device interrupts\n", __FUNCTION__)); + bcmsdh_intr_enable(bus->sdh); + } else { + DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__)); + bcmsdh_intr_disable(bus->sdh); + } + + } + + + else { + /* Disable F2 again */ + enable = SDIO_FUNC_ENABLE_1; + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, enable, NULL); + } + + /* Restore previous clock setting */ + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, saveclk, &err); + + + /* If we didn't come up, turn off backplane clock */ + if (dhdp->busstate != DHD_BUS_DATA) + dhdsdio_clkctl(bus, CLK_NONE, FALSE); + +exit: + if (enforce_mutex) + dhd_os_sdunlock(bus->dhd); + + return ret; +} + +static void +dhdsdio_rxfail(dhd_bus_t *bus, bool abort, bool rtx) +{ + bcmsdh_info_t *sdh = bus->sdh; + sdpcmd_regs_t *regs = bus->regs; + uint retries = 0; + uint16 lastrbc; + uint8 hi, lo; + int err; + + DHD_ERROR(("%s: %sterminate frame%s\n", __FUNCTION__, + (abort ? "abort command, " : ""), (rtx ? ", send NAK" : ""))); + + if (abort) { + bcmsdh_abort(sdh, SDIO_FUNC_2); + } + + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_FRAMECTRL, SFC_RF_TERM, &err); + bus->f1regdata++; + + /* Wait until the packet has been flushed (device/FIFO stable) */ + for (lastrbc = retries = 0xffff; retries > 0; retries--) { + hi = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_RFRAMEBCHI, NULL); + lo = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_RFRAMEBCLO, NULL); + bus->f1regdata += 2; + + if ((hi == 0) && (lo == 0)) + break; + + if ((hi > (lastrbc >> 8)) && (lo > (lastrbc & 0x00ff))) { + DHD_ERROR(("%s: count growing: last 0x%04x now 0x%04x\n", + __FUNCTION__, lastrbc, ((hi << 8) + lo))); + } + lastrbc = (hi << 8) + lo; + } + + if (!retries) { + DHD_ERROR(("%s: count never zeroed: last 0x%04x\n", __FUNCTION__, lastrbc)); + } else { + DHD_INFO(("%s: flush took %d iterations\n", __FUNCTION__, (0xffff - retries))); + } + + if (rtx) { + bus->rxrtx++; + W_SDREG(SMB_NAK, ®s->tosbmailbox, retries); + bus->f1regdata++; + if (retries <= retry_limit) { + bus->rxskip = TRUE; + } + } + + /* Clear partial in any case */ + bus->nextlen = 0; + + /* If we can't reach the device, signal failure */ + if (err || bcmsdh_regfail(sdh)) + bus->dhd->busstate = DHD_BUS_DOWN; +} + +static void +dhdsdio_read_control(dhd_bus_t *bus, uint8 *hdr, uint len, uint doff) +{ + bcmsdh_info_t *sdh = bus->sdh; + uint rdlen, pad; + + int sdret; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + /* Control data already received in aligned rxctl */ + if ((bus->bus == SPI_BUS) && (!bus->usebufpool)) + goto gotpkt; + + ASSERT(bus->rxbuf); + /* Set rxctl for frame (w/optional alignment) */ + bus->rxctl = bus->rxbuf; + if (dhd_alignctl) { + bus->rxctl += firstread; + if ((pad = ((uintptr)bus->rxctl % DHD_SDALIGN))) + bus->rxctl += (DHD_SDALIGN - pad); + bus->rxctl -= firstread; + } + ASSERT(bus->rxctl >= bus->rxbuf); + + /* Copy the already-read portion over */ + bcopy(hdr, bus->rxctl, firstread); + if (len <= firstread) + goto gotpkt; + + /* Copy the full data pkt in gSPI case and process ioctl. */ + if (bus->bus == SPI_BUS) { + bcopy(hdr, bus->rxctl, len); + goto gotpkt; + } + + /* Raise rdlen to next SDIO block to avoid tail command */ + rdlen = len - firstread; + if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) { + pad = bus->blocksize - (rdlen % bus->blocksize); + if ((pad <= bus->roundup) && (pad < bus->blocksize) && + ((len + pad) < bus->dhd->maxctl)) + rdlen += pad; + } else if (rdlen % DHD_SDALIGN) { + rdlen += DHD_SDALIGN - (rdlen % DHD_SDALIGN); + } + + /* Satisfy length-alignment requirements */ + if (forcealign && (rdlen & (ALIGNMENT - 1))) + rdlen = ROUNDUP(rdlen, ALIGNMENT); + + /* Drop if the read is too big or it exceeds our maximum */ + if ((rdlen + firstread) > bus->dhd->maxctl) { + DHD_ERROR(("%s: %d-byte control read exceeds %d-byte buffer\n", + __FUNCTION__, rdlen, bus->dhd->maxctl)); + bus->dhd->rx_errors++; + dhdsdio_rxfail(bus, FALSE, FALSE); + goto done; + } + + if ((len - doff) > bus->dhd->maxctl) { + DHD_ERROR(("%s: %d-byte ctl frame (%d-byte ctl data) exceeds %d-byte limit\n", + __FUNCTION__, len, (len - doff), bus->dhd->maxctl)); + bus->dhd->rx_errors++; bus->rx_toolong++; + dhdsdio_rxfail(bus, FALSE, FALSE); + goto done; + } + + + /* Read remainder of frame body into the rxctl buffer */ + sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC, + (bus->rxctl + firstread), rdlen, NULL, NULL, NULL); + bus->f2rxdata++; + ASSERT(sdret != BCME_PENDING); + + /* Control frame failures need retransmission */ + if (sdret < 0) { + DHD_ERROR(("%s: read %d control bytes failed: %d\n", __FUNCTION__, rdlen, sdret)); + bus->rxc_errors++; /* dhd.rx_ctlerrs is higher level */ + dhdsdio_rxfail(bus, TRUE, TRUE); + goto done; + } + +gotpkt: + +#ifdef DHD_DEBUG + if (DHD_BYTES_ON() && DHD_CTL_ON()) { + prhex("RxCtrl", bus->rxctl, len); + } +#endif + + /* Point to valid data and indicate its length */ + bus->rxctl += doff; + bus->rxlen = len - doff; + +done: + /* Awake any waiters */ + dhd_os_ioctl_resp_wake(bus->dhd); +} + +static uint8 +dhdsdio_rxglom(dhd_bus_t *bus, uint8 rxseq) +{ + uint16 dlen, totlen; + uint8 *dptr, num = 0; + + uint16 sublen, check; + void *pfirst, *plast, *pnext, *save_pfirst; + osl_t *osh = bus->dhd->osh; + + int errcode; + uint8 chan, seq, doff, sfdoff; + uint8 txmax; + + int ifidx = 0; + bool usechain = bus->use_rxchain; + + /* If packets, issue read(s) and send up packet chain */ + /* Return sequence numbers consumed? */ + + DHD_TRACE(("dhdsdio_rxglom: start: glomd %p glom %p\n", bus->glomd, bus->glom)); + + /* If there's a descriptor, generate the packet chain */ + if (bus->glomd) { + dhd_os_sdlock_rxq(bus->dhd); + + pfirst = plast = pnext = NULL; + dlen = (uint16)PKTLEN(osh, bus->glomd); + dptr = PKTDATA(osh, bus->glomd); + if (!dlen || (dlen & 1)) { + DHD_ERROR(("%s: bad glomd len (%d), ignore descriptor\n", + __FUNCTION__, dlen)); + dlen = 0; + } + + for (totlen = num = 0; dlen; num++) { + /* Get (and move past) next length */ + sublen = ltoh16_ua(dptr); + dlen -= sizeof(uint16); + dptr += sizeof(uint16); + if ((sublen < SDPCM_HDRLEN) || + ((num == 0) && (sublen < (2 * SDPCM_HDRLEN)))) { + DHD_ERROR(("%s: descriptor len %d bad: %d\n", + __FUNCTION__, num, sublen)); + pnext = NULL; + break; + } + if (sublen % DHD_SDALIGN) { + DHD_ERROR(("%s: sublen %d not a multiple of %d\n", + __FUNCTION__, sublen, DHD_SDALIGN)); + usechain = FALSE; + } + totlen += sublen; + + /* For last frame, adjust read len so total is a block multiple */ + if (!dlen) { + sublen += (ROUNDUP(totlen, bus->blocksize) - totlen); + totlen = ROUNDUP(totlen, bus->blocksize); + } + + /* Allocate/chain packet for next subframe */ + if ((pnext = PKTGET(osh, sublen + DHD_SDALIGN, FALSE)) == NULL) { + DHD_ERROR(("%s: PKTGET failed, num %d len %d\n", + __FUNCTION__, num, sublen)); + break; + } + ASSERT(!PKTLINK(pnext)); + if (!pfirst) { + ASSERT(!plast); + pfirst = plast = pnext; + } else { + ASSERT(plast); + PKTSETNEXT(osh, plast, pnext); + plast = pnext; + } + + /* Adhere to start alignment requirements */ + PKTALIGN(osh, pnext, sublen, DHD_SDALIGN); + } + + /* If all allocations succeeded, save packet chain in bus structure */ + if (pnext) { + DHD_GLOM(("%s: allocated %d-byte packet chain for %d subframes\n", + __FUNCTION__, totlen, num)); + if (DHD_GLOM_ON() && bus->nextlen) { + if (totlen != bus->nextlen) { + DHD_GLOM(("%s: glomdesc mismatch: nextlen %d glomdesc %d " + "rxseq %d\n", __FUNCTION__, bus->nextlen, + totlen, rxseq)); + } + } + bus->glom = pfirst; + pfirst = pnext = NULL; + } else { + if (pfirst) + PKTFREE(osh, pfirst, FALSE); + bus->glom = NULL; + num = 0; + } + + /* Done with descriptor packet */ + PKTFREE(osh, bus->glomd, FALSE); + bus->glomd = NULL; + bus->nextlen = 0; + + dhd_os_sdunlock_rxq(bus->dhd); + } + + /* Ok -- either we just generated a packet chain, or had one from before */ + if (bus->glom) { + if (DHD_GLOM_ON()) { + DHD_GLOM(("%s: attempt superframe read, packet chain:\n", __FUNCTION__)); + for (pnext = bus->glom; pnext; pnext = PKTNEXT(osh, pnext)) { + DHD_GLOM((" %p: %p len 0x%04x (%d)\n", + pnext, (uint8*)PKTDATA(osh, pnext), + PKTLEN(osh, pnext), PKTLEN(osh, pnext))); + } + } + + pfirst = bus->glom; + dlen = (uint16)pkttotlen(osh, pfirst); + + /* Do an SDIO read for the superframe. Configurable iovar to + * read directly into the chained packet, or allocate a large + * packet and and copy into the chain. + */ + if (usechain) { + errcode = dhd_bcmsdh_recv_buf(bus, + bcmsdh_cur_sbwad(bus->sdh), SDIO_FUNC_2, + F2SYNC, (uint8*)PKTDATA(osh, pfirst), + dlen, pfirst, NULL, NULL); + } else if (bus->dataptr) { + errcode = dhd_bcmsdh_recv_buf(bus, + bcmsdh_cur_sbwad(bus->sdh), SDIO_FUNC_2, + F2SYNC, bus->dataptr, + dlen, NULL, NULL, NULL); + sublen = (uint16)pktfrombuf(osh, pfirst, 0, dlen, bus->dataptr); + if (sublen != dlen) { + DHD_ERROR(("%s: FAILED TO COPY, dlen %d sublen %d\n", + __FUNCTION__, dlen, sublen)); + errcode = -1; + } + pnext = NULL; + } else { + DHD_ERROR(("COULDN'T ALLOC %d-BYTE GLOM, FORCE FAILURE\n", dlen)); + errcode = -1; + } + bus->f2rxdata++; + ASSERT(errcode != BCME_PENDING); + + /* On failure, kill the superframe, allow a couple retries */ + if (errcode < 0) { + DHD_ERROR(("%s: glom read of %d bytes failed: %d\n", + __FUNCTION__, dlen, errcode)); + bus->dhd->rx_errors++; + + if (bus->glomerr++ < 3) { + dhdsdio_rxfail(bus, TRUE, TRUE); + } else { + bus->glomerr = 0; + dhdsdio_rxfail(bus, TRUE, FALSE); + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE(osh, bus->glom, FALSE); + dhd_os_sdunlock_rxq(bus->dhd); + bus->rxglomfail++; + bus->glom = NULL; + } + return 0; + } + +#ifdef DHD_DEBUG + if (DHD_GLOM_ON()) { + prhex("SUPERFRAME", PKTDATA(osh, pfirst), + MIN(PKTLEN(osh, pfirst), 48)); + } +#endif + + + /* Validate the superframe header */ + dptr = (uint8 *)PKTDATA(osh, pfirst); + sublen = ltoh16_ua(dptr); + check = ltoh16_ua(dptr + sizeof(uint16)); + + chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]); + seq = SDPCM_PACKET_SEQUENCE(&dptr[SDPCM_FRAMETAG_LEN]); + bus->nextlen = dptr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET]; + if ((bus->nextlen << 4) > MAX_RX_DATASZ) { + DHD_INFO(("%s: got frame w/nextlen too large (%d) seq %d\n", + __FUNCTION__, bus->nextlen, seq)); + bus->nextlen = 0; + } + doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]); + txmax = SDPCM_WINDOW_VALUE(&dptr[SDPCM_FRAMETAG_LEN]); + + errcode = 0; + if ((uint16)~(sublen^check)) { + DHD_ERROR(("%s (superframe): HW hdr error: len/check 0x%04x/0x%04x\n", + __FUNCTION__, sublen, check)); + errcode = -1; + } else if (ROUNDUP(sublen, bus->blocksize) != dlen) { + DHD_ERROR(("%s (superframe): len 0x%04x, rounded 0x%04x, expect 0x%04x\n", + __FUNCTION__, sublen, ROUNDUP(sublen, bus->blocksize), dlen)); + errcode = -1; + } else if (SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]) != SDPCM_GLOM_CHANNEL) { + DHD_ERROR(("%s (superframe): bad channel %d\n", __FUNCTION__, + SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]))); + errcode = -1; + } else if (SDPCM_GLOMDESC(&dptr[SDPCM_FRAMETAG_LEN])) { + DHD_ERROR(("%s (superframe): got second descriptor?\n", __FUNCTION__)); + errcode = -1; + } else if ((doff < SDPCM_HDRLEN) || + (doff > (PKTLEN(osh, pfirst) - SDPCM_HDRLEN))) { + DHD_ERROR(("%s (superframe): Bad data offset %d: HW %d pkt %d min %d\n", + __FUNCTION__, doff, sublen, PKTLEN(osh, pfirst), SDPCM_HDRLEN)); + errcode = -1; + } + + /* Check sequence number of superframe SW header */ + if (rxseq != seq) { + DHD_INFO(("%s: (superframe) rx_seq %d, expected %d\n", + __FUNCTION__, seq, rxseq)); + bus->rx_badseq++; + rxseq = seq; + } + + /* Check window for sanity */ + if ((uint8)(txmax - bus->tx_seq) > 0x40) { + DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n", + __FUNCTION__, txmax, bus->tx_seq)); + txmax = bus->tx_seq + 2; + } + bus->tx_max = txmax; + + /* Remove superframe header, remember offset */ + PKTPULL(osh, pfirst, doff); + sfdoff = doff; + + /* Validate all the subframe headers */ + for (num = 0, pnext = pfirst; pnext && !errcode; + num++, pnext = PKTNEXT(osh, pnext)) { + dptr = (uint8 *)PKTDATA(osh, pnext); + dlen = (uint16)PKTLEN(osh, pnext); + sublen = ltoh16_ua(dptr); + check = ltoh16_ua(dptr + sizeof(uint16)); + chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]); + doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]); +#ifdef DHD_DEBUG + if (DHD_GLOM_ON()) { + prhex("subframe", dptr, 32); + } +#endif + + if ((uint16)~(sublen^check)) { + DHD_ERROR(("%s (subframe %d): HW hdr error: " + "len/check 0x%04x/0x%04x\n", + __FUNCTION__, num, sublen, check)); + errcode = -1; + } else if ((sublen > dlen) || (sublen < SDPCM_HDRLEN)) { + DHD_ERROR(("%s (subframe %d): length mismatch: " + "len 0x%04x, expect 0x%04x\n", + __FUNCTION__, num, sublen, dlen)); + errcode = -1; + } else if ((chan != SDPCM_DATA_CHANNEL) && + (chan != SDPCM_EVENT_CHANNEL)) { + DHD_ERROR(("%s (subframe %d): bad channel %d\n", + __FUNCTION__, num, chan)); + errcode = -1; + } else if ((doff < SDPCM_HDRLEN) || (doff > sublen)) { + DHD_ERROR(("%s (subframe %d): Bad data offset %d: HW %d min %d\n", + __FUNCTION__, num, doff, sublen, SDPCM_HDRLEN)); + errcode = -1; + } + } + + if (errcode) { + /* Terminate frame on error, request a couple retries */ + if (bus->glomerr++ < 3) { + /* Restore superframe header space */ + PKTPUSH(osh, pfirst, sfdoff); + dhdsdio_rxfail(bus, TRUE, TRUE); + } else { + bus->glomerr = 0; + dhdsdio_rxfail(bus, TRUE, FALSE); + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE(osh, bus->glom, FALSE); + dhd_os_sdunlock_rxq(bus->dhd); + bus->rxglomfail++; + bus->glom = NULL; + } + bus->nextlen = 0; + return 0; + } + + /* Basic SD framing looks ok - process each packet (header) */ + save_pfirst = pfirst; + bus->glom = NULL; + plast = NULL; + + dhd_os_sdlock_rxq(bus->dhd); + for (num = 0; pfirst; rxseq++, pfirst = pnext) { + pnext = PKTNEXT(osh, pfirst); + PKTSETNEXT(osh, pfirst, NULL); + + dptr = (uint8 *)PKTDATA(osh, pfirst); + sublen = ltoh16_ua(dptr); + chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]); + seq = SDPCM_PACKET_SEQUENCE(&dptr[SDPCM_FRAMETAG_LEN]); + doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]); + + DHD_GLOM(("%s: Get subframe %d, %p(%p/%d), sublen %d chan %d seq %d\n", + __FUNCTION__, num, pfirst, PKTDATA(osh, pfirst), + PKTLEN(osh, pfirst), sublen, chan, seq)); + + ASSERT((chan == SDPCM_DATA_CHANNEL) || (chan == SDPCM_EVENT_CHANNEL)); + + if (rxseq != seq) { + DHD_GLOM(("%s: rx_seq %d, expected %d\n", + __FUNCTION__, seq, rxseq)); + bus->rx_badseq++; + rxseq = seq; + } + +#ifdef DHD_DEBUG + if (DHD_BYTES_ON() && DHD_DATA_ON()) { + prhex("Rx Subframe Data", dptr, dlen); + } +#endif + + PKTSETLEN(osh, pfirst, sublen); + PKTPULL(osh, pfirst, doff); + + if (PKTLEN(osh, pfirst) == 0) { + PKTFREE(bus->dhd->osh, pfirst, FALSE); + if (plast) { + PKTSETNEXT(osh, plast, pnext); + } else { + ASSERT(save_pfirst == pfirst); + save_pfirst = pnext; + } + continue; + } else if (dhd_prot_hdrpull(bus->dhd, &ifidx, pfirst) != 0) { + DHD_ERROR(("%s: rx protocol error\n", __FUNCTION__)); + bus->dhd->rx_errors++; + PKTFREE(osh, pfirst, FALSE); + if (plast) { + PKTSETNEXT(osh, plast, pnext); + } else { + ASSERT(save_pfirst == pfirst); + save_pfirst = pnext; + } + continue; + } + + /* this packet will go up, link back into chain and count it */ + PKTSETNEXT(osh, pfirst, pnext); + plast = pfirst; + num++; + +#ifdef DHD_DEBUG + if (DHD_GLOM_ON()) { + DHD_GLOM(("%s subframe %d to stack, %p(%p/%d) nxt/lnk %p/%p\n", + __FUNCTION__, num, pfirst, + PKTDATA(osh, pfirst), PKTLEN(osh, pfirst), + PKTNEXT(osh, pfirst), PKTLINK(pfirst))); + prhex("", (uint8 *)PKTDATA(osh, pfirst), + MIN(PKTLEN(osh, pfirst), 32)); + } +#endif /* DHD_DEBUG */ + } + dhd_os_sdunlock_rxq(bus->dhd); + if (num) { + dhd_os_sdunlock(bus->dhd); + dhd_rx_frame(bus->dhd, ifidx, save_pfirst, num); + dhd_os_sdlock(bus->dhd); + } + + bus->rxglomframes++; + bus->rxglompkts += num; + } + return num; +} + +/* Return TRUE if there may be more frames to read */ +static uint +dhdsdio_readframes(dhd_bus_t *bus, uint maxframes, bool *finished) +{ + osl_t *osh = bus->dhd->osh; + bcmsdh_info_t *sdh = bus->sdh; + + uint16 len, check; /* Extracted hardware header fields */ + uint8 chan, seq, doff; /* Extracted software header fields */ + uint8 fcbits; /* Extracted fcbits from software header */ + uint8 delta; + + void *pkt; /* Packet for event or data frames */ + uint16 pad; /* Number of pad bytes to read */ + uint16 rdlen; /* Total number of bytes to read */ + uint8 rxseq; /* Next sequence number to expect */ + uint rxleft = 0; /* Remaining number of frames allowed */ + int sdret; /* Return code from bcmsdh calls */ + uint8 txmax; /* Maximum tx sequence offered */ + bool len_consistent; /* Result of comparing readahead len and len from hw-hdr */ + uint8 *rxbuf; + int ifidx = 0; + uint rxcount = 0; /* Total frames read */ + +#if defined(DHD_DEBUG) || defined(SDTEST) + bool sdtest = FALSE; /* To limit message spew from test mode */ +#endif + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + ASSERT(maxframes); + +#ifdef SDTEST + /* Allow pktgen to override maxframes */ + if (bus->pktgen_count && (bus->pktgen_mode == DHD_PKTGEN_RECV)) { + maxframes = bus->pktgen_count; + sdtest = TRUE; + } +#endif + + /* Not finished unless we encounter no more frames indication */ + *finished = FALSE; + + + for (rxseq = bus->rx_seq, rxleft = maxframes; + !bus->rxskip && rxleft && bus->dhd->busstate != DHD_BUS_DOWN; + rxseq++, rxleft--) { + + /* Handle glomming separately */ + if (bus->glom || bus->glomd) { + uint8 cnt; + DHD_GLOM(("%s: calling rxglom: glomd %p, glom %p\n", + __FUNCTION__, bus->glomd, bus->glom)); + cnt = dhdsdio_rxglom(bus, rxseq); + DHD_GLOM(("%s: rxglom returned %d\n", __FUNCTION__, cnt)); + rxseq += cnt - 1; + rxleft = (rxleft > cnt) ? (rxleft - cnt) : 1; + continue; + } + + /* Try doing single read if we can */ + if (dhd_readahead && bus->nextlen) { + uint16 nextlen = bus->nextlen; + bus->nextlen = 0; + + if (bus->bus == SPI_BUS) { + rdlen = len = nextlen; + } + else { + rdlen = len = nextlen << 4; + + /* Pad read to blocksize for efficiency */ + if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) { + pad = bus->blocksize - (rdlen % bus->blocksize); + if ((pad <= bus->roundup) && (pad < bus->blocksize) && + ((rdlen + pad + firstread) < MAX_RX_DATASZ)) + rdlen += pad; + } else if (rdlen % DHD_SDALIGN) { + rdlen += DHD_SDALIGN - (rdlen % DHD_SDALIGN); + } + } + + /* We use bus->rxctl buffer in WinXP for initial control pkt receives. + * Later we use buffer-poll for data as well as control packets. + * This is required becuase dhd receives full frame in gSPI unlike SDIO. + * After the frame is received we have to distinguish whether it is data + * or non-data frame. + */ + /* Allocate a packet buffer */ + dhd_os_sdlock_rxq(bus->dhd); + if (!(pkt = PKTGET(osh, rdlen + DHD_SDALIGN, FALSE))) { + if (bus->bus == SPI_BUS) { + bus->usebufpool = FALSE; + bus->rxctl = bus->rxbuf; + if (dhd_alignctl) { + bus->rxctl += firstread; + if ((pad = ((uintptr)bus->rxctl % DHD_SDALIGN))) + bus->rxctl += (DHD_SDALIGN - pad); + bus->rxctl -= firstread; + } + ASSERT(bus->rxctl >= bus->rxbuf); + rxbuf = bus->rxctl; + /* Read the entire frame */ + sdret = dhd_bcmsdh_recv_buf(bus, + bcmsdh_cur_sbwad(sdh), + SDIO_FUNC_2, + F2SYNC, rxbuf, rdlen, + NULL, NULL, NULL); + bus->f2rxdata++; + ASSERT(sdret != BCME_PENDING); + + + /* Control frame failures need retransmission */ + if (sdret < 0) { + DHD_ERROR(("%s: read %d control bytes failed: %d\n", + __FUNCTION__, rdlen, sdret)); + /* dhd.rx_ctlerrs is higher level */ + bus->rxc_errors++; + dhd_os_sdunlock_rxq(bus->dhd); + dhdsdio_rxfail(bus, TRUE, + (bus->bus == SPI_BUS) ? FALSE : TRUE); + continue; + } + } else { + /* Give up on data, request rtx of events */ + DHD_ERROR(("%s (nextlen): PKTGET failed: len %d rdlen %d " + "expected rxseq %d\n", + __FUNCTION__, len, rdlen, rxseq)); + /* Just go try again w/normal header read */ + dhd_os_sdunlock_rxq(bus->dhd); + continue; + } + } else { + if (bus->bus == SPI_BUS) + bus->usebufpool = TRUE; + + ASSERT(!PKTLINK(pkt)); + PKTALIGN(osh, pkt, rdlen, DHD_SDALIGN); + rxbuf = (uint8 *)PKTDATA(osh, pkt); + /* Read the entire frame */ + sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh), + SDIO_FUNC_2, + F2SYNC, rxbuf, rdlen, + pkt, NULL, NULL); + bus->f2rxdata++; + ASSERT(sdret != BCME_PENDING); + + if (sdret < 0) { + DHD_ERROR(("%s (nextlen): read %d bytes failed: %d\n", + __FUNCTION__, rdlen, sdret)); + PKTFREE(bus->dhd->osh, pkt, FALSE); + bus->dhd->rx_errors++; + dhd_os_sdunlock_rxq(bus->dhd); + /* Force retry w/normal header read. Don't attemp NAK for + * gSPI + */ + dhdsdio_rxfail(bus, TRUE, + (bus->bus == SPI_BUS) ? FALSE : TRUE); + continue; + } + } + dhd_os_sdunlock_rxq(bus->dhd); + + /* Now check the header */ + bcopy(rxbuf, bus->rxhdr, SDPCM_HDRLEN); + + /* Extract hardware header fields */ + len = ltoh16_ua(bus->rxhdr); + check = ltoh16_ua(bus->rxhdr + sizeof(uint16)); + + /* All zeros means readahead info was bad */ + if (!(len|check)) { + DHD_INFO(("%s (nextlen): read zeros in HW header???\n", + __FUNCTION__)); + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE2(); + dhd_os_sdunlock_rxq(bus->dhd); + GSPI_PR55150_BAILOUT; + continue; + } + + /* Validate check bytes */ + if ((uint16)~(len^check)) { + DHD_ERROR(("%s (nextlen): HW hdr error: nextlen/len/check" + " 0x%04x/0x%04x/0x%04x\n", __FUNCTION__, nextlen, + len, check)); + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE2(); + dhd_os_sdunlock_rxq(bus->dhd); + bus->rx_badhdr++; + dhdsdio_rxfail(bus, FALSE, FALSE); + GSPI_PR55150_BAILOUT; + continue; + } + + /* Validate frame length */ + if (len < SDPCM_HDRLEN) { + DHD_ERROR(("%s (nextlen): HW hdr length invalid: %d\n", + __FUNCTION__, len)); + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE2(); + dhd_os_sdunlock_rxq(bus->dhd); + GSPI_PR55150_BAILOUT; + continue; + } + + /* Check for consistency with readahead info */ + len_consistent = (nextlen != (ROUNDUP(len, 16) >> 4)); + if (len_consistent) { + /* Mismatch, force retry w/normal header (may be >4K) */ + DHD_ERROR(("%s (nextlen): mismatch, nextlen %d len %d rnd %d; " + "expected rxseq %d\n", + __FUNCTION__, nextlen, len, ROUNDUP(len, 16), rxseq)); + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE2(); + dhd_os_sdunlock_rxq(bus->dhd); + dhdsdio_rxfail(bus, TRUE, (bus->bus == SPI_BUS) ? FALSE : TRUE); + GSPI_PR55150_BAILOUT; + continue; + } + + + /* Extract software header fields */ + chan = SDPCM_PACKET_CHANNEL(&bus->rxhdr[SDPCM_FRAMETAG_LEN]); + seq = SDPCM_PACKET_SEQUENCE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]); + doff = SDPCM_DOFFSET_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]); + txmax = SDPCM_WINDOW_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]); + + bus->nextlen = + bus->rxhdr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET]; + if ((bus->nextlen << 4) > MAX_RX_DATASZ) { + DHD_INFO(("%s (nextlen): got frame w/nextlen too large" + " (%d), seq %d\n", __FUNCTION__, bus->nextlen, + seq)); + bus->nextlen = 0; + } + + bus->dhd->rx_readahead_cnt ++; + /* Handle Flow Control */ + fcbits = SDPCM_FCMASK_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]); + + delta = 0; + if (~bus->flowcontrol & fcbits) { + bus->fc_xoff++; + delta = 1; + } + if (bus->flowcontrol & ~fcbits) { + bus->fc_xon++; + delta = 1; + } + + if (delta) { + bus->fc_rcvd++; + bus->flowcontrol = fcbits; + } + + /* Check and update sequence number */ + if (rxseq != seq) { + DHD_INFO(("%s (nextlen): rx_seq %d, expected %d\n", + __FUNCTION__, seq, rxseq)); + bus->rx_badseq++; + rxseq = seq; + } + + /* Check window for sanity */ + if ((uint8)(txmax - bus->tx_seq) > 0x40) { + DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n", + __FUNCTION__, txmax, bus->tx_seq)); + txmax = bus->tx_seq + 2; + } + bus->tx_max = txmax; + +#ifdef DHD_DEBUG + if (DHD_BYTES_ON() && DHD_DATA_ON()) { + prhex("Rx Data", rxbuf, len); + } else if (DHD_HDRS_ON()) { + prhex("RxHdr", bus->rxhdr, SDPCM_HDRLEN); + } +#endif + + if (chan == SDPCM_CONTROL_CHANNEL) { + if (bus->bus == SPI_BUS) { + dhdsdio_read_control(bus, rxbuf, len, doff); + if (bus->usebufpool) { + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE(bus->dhd->osh, pkt, FALSE); + dhd_os_sdunlock_rxq(bus->dhd); + } + continue; + } else { + DHD_ERROR(("%s (nextlen): readahead on control" + " packet %d?\n", __FUNCTION__, seq)); + /* Force retry w/normal header read */ + bus->nextlen = 0; + dhdsdio_rxfail(bus, FALSE, TRUE); + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE2(); + dhd_os_sdunlock_rxq(bus->dhd); + continue; + } + } + + if ((bus->bus == SPI_BUS) && !bus->usebufpool) { + DHD_ERROR(("Received %d bytes on %d channel. Running out of " + "rx pktbuf's or not yet malloced.\n", len, chan)); + continue; + } + + /* Validate data offset */ + if ((doff < SDPCM_HDRLEN) || (doff > len)) { + DHD_ERROR(("%s (nextlen): bad data offset %d: HW len %d min %d\n", + __FUNCTION__, doff, len, SDPCM_HDRLEN)); + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE2(); + dhd_os_sdunlock_rxq(bus->dhd); + ASSERT(0); + dhdsdio_rxfail(bus, FALSE, FALSE); + continue; + } + + /* All done with this one -- now deliver the packet */ + goto deliver; + } + /* gSPI frames should not be handled in fractions */ + if (bus->bus == SPI_BUS) { + break; + } + + /* Read frame header (hardware and software) */ + sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC, + bus->rxhdr, firstread, NULL, NULL, NULL); + bus->f2rxhdrs++; + ASSERT(sdret != BCME_PENDING); + + if (sdret < 0) { + DHD_ERROR(("%s: RXHEADER FAILED: %d\n", __FUNCTION__, sdret)); + bus->rx_hdrfail++; + dhdsdio_rxfail(bus, TRUE, TRUE); + continue; + } + +#ifdef DHD_DEBUG + if (DHD_BYTES_ON() || DHD_HDRS_ON()) { + prhex("RxHdr", bus->rxhdr, SDPCM_HDRLEN); + } +#endif + + /* Extract hardware header fields */ + len = ltoh16_ua(bus->rxhdr); + check = ltoh16_ua(bus->rxhdr + sizeof(uint16)); + + /* All zeros means no more frames */ + if (!(len|check)) { + *finished = TRUE; + break; + } + + /* Validate check bytes */ + if ((uint16)~(len^check)) { + DHD_ERROR(("%s: HW hdr error: len/check 0x%04x/0x%04x\n", + __FUNCTION__, len, check)); + bus->rx_badhdr++; + dhdsdio_rxfail(bus, FALSE, FALSE); + continue; + } + + /* Validate frame length */ + if (len < SDPCM_HDRLEN) { + DHD_ERROR(("%s: HW hdr length invalid: %d\n", __FUNCTION__, len)); + continue; + } + + /* Extract software header fields */ + chan = SDPCM_PACKET_CHANNEL(&bus->rxhdr[SDPCM_FRAMETAG_LEN]); + seq = SDPCM_PACKET_SEQUENCE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]); + doff = SDPCM_DOFFSET_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]); + txmax = SDPCM_WINDOW_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]); + + /* Validate data offset */ + if ((doff < SDPCM_HDRLEN) || (doff > len)) { + DHD_ERROR(("%s: Bad data offset %d: HW len %d, min %d seq %d\n", + __FUNCTION__, doff, len, SDPCM_HDRLEN, seq)); + bus->rx_badhdr++; + ASSERT(0); + dhdsdio_rxfail(bus, FALSE, FALSE); + continue; + } + + /* Save the readahead length if there is one */ + bus->nextlen = bus->rxhdr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET]; + if ((bus->nextlen << 4) > MAX_RX_DATASZ) { + DHD_INFO(("%s (nextlen): got frame w/nextlen too large (%d), seq %d\n", + __FUNCTION__, bus->nextlen, seq)); + bus->nextlen = 0; + } + + /* Handle Flow Control */ + fcbits = SDPCM_FCMASK_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]); + + delta = 0; + if (~bus->flowcontrol & fcbits) { + bus->fc_xoff++; + delta = 1; + } + if (bus->flowcontrol & ~fcbits) { + bus->fc_xon++; + delta = 1; + } + + if (delta) { + bus->fc_rcvd++; + bus->flowcontrol = fcbits; + } + + /* Check and update sequence number */ + if (rxseq != seq) { + DHD_INFO(("%s: rx_seq %d, expected %d\n", __FUNCTION__, seq, rxseq)); + bus->rx_badseq++; + rxseq = seq; + } + + /* Check window for sanity */ + if ((uint8)(txmax - bus->tx_seq) > 0x40) { + DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n", + __FUNCTION__, txmax, bus->tx_seq)); + txmax = bus->tx_seq + 2; + } + bus->tx_max = txmax; + + /* Call a separate function for control frames */ + if (chan == SDPCM_CONTROL_CHANNEL) { + dhdsdio_read_control(bus, bus->rxhdr, len, doff); + continue; + } + + ASSERT((chan == SDPCM_DATA_CHANNEL) || (chan == SDPCM_EVENT_CHANNEL) || + (chan == SDPCM_TEST_CHANNEL) || (chan == SDPCM_GLOM_CHANNEL)); + + /* Length to read */ + rdlen = (len > firstread) ? (len - firstread) : 0; + + /* May pad read to blocksize for efficiency */ + if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) { + pad = bus->blocksize - (rdlen % bus->blocksize); + if ((pad <= bus->roundup) && (pad < bus->blocksize) && + ((rdlen + pad + firstread) < MAX_RX_DATASZ)) + rdlen += pad; + } else if (rdlen % DHD_SDALIGN) { + rdlen += DHD_SDALIGN - (rdlen % DHD_SDALIGN); + } + + /* Satisfy length-alignment requirements */ + if (forcealign && (rdlen & (ALIGNMENT - 1))) + rdlen = ROUNDUP(rdlen, ALIGNMENT); + + if ((rdlen + firstread) > MAX_RX_DATASZ) { + /* Too long -- skip this frame */ + DHD_ERROR(("%s: too long: len %d rdlen %d\n", __FUNCTION__, len, rdlen)); + bus->dhd->rx_errors++; bus->rx_toolong++; + dhdsdio_rxfail(bus, FALSE, FALSE); + continue; + } + + dhd_os_sdlock_rxq(bus->dhd); + if (!(pkt = PKTGET(osh, (rdlen + firstread + DHD_SDALIGN), FALSE))) { + /* Give up on data, request rtx of events */ + DHD_ERROR(("%s: PKTGET failed: rdlen %d chan %d\n", + __FUNCTION__, rdlen, chan)); + bus->dhd->rx_dropped++; + dhd_os_sdunlock_rxq(bus->dhd); + dhdsdio_rxfail(bus, FALSE, RETRYCHAN(chan)); + continue; + } + dhd_os_sdunlock_rxq(bus->dhd); + + ASSERT(!PKTLINK(pkt)); + + /* Leave room for what we already read, and align remainder */ + ASSERT(firstread < (PKTLEN(osh, pkt))); + PKTPULL(osh, pkt, firstread); + PKTALIGN(osh, pkt, rdlen, DHD_SDALIGN); + + /* Read the remaining frame data */ + sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC, + ((uint8 *)PKTDATA(osh, pkt)), rdlen, pkt, NULL, NULL); + bus->f2rxdata++; + ASSERT(sdret != BCME_PENDING); + + if (sdret < 0) { + DHD_ERROR(("%s: read %d %s bytes failed: %d\n", __FUNCTION__, rdlen, + ((chan == SDPCM_EVENT_CHANNEL) ? "event" : + ((chan == SDPCM_DATA_CHANNEL) ? "data" : "test")), sdret)); + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE(bus->dhd->osh, pkt, FALSE); + dhd_os_sdunlock_rxq(bus->dhd); + bus->dhd->rx_errors++; + dhdsdio_rxfail(bus, TRUE, RETRYCHAN(chan)); + continue; + } + + /* Copy the already-read portion */ + PKTPUSH(osh, pkt, firstread); + bcopy(bus->rxhdr, PKTDATA(osh, pkt), firstread); + +#ifdef DHD_DEBUG + if (DHD_BYTES_ON() && DHD_DATA_ON()) { + prhex("Rx Data", PKTDATA(osh, pkt), len); + } +#endif + +deliver: + /* Save superframe descriptor and allocate packet frame */ + if (chan == SDPCM_GLOM_CHANNEL) { + if (SDPCM_GLOMDESC(&bus->rxhdr[SDPCM_FRAMETAG_LEN])) { + DHD_GLOM(("%s: got glom descriptor, %d bytes:\n", + __FUNCTION__, len)); +#ifdef DHD_DEBUG + if (DHD_GLOM_ON()) { + prhex("Glom Data", PKTDATA(osh, pkt), len); + } +#endif + PKTSETLEN(osh, pkt, len); + ASSERT(doff == SDPCM_HDRLEN); + PKTPULL(osh, pkt, SDPCM_HDRLEN); + bus->glomd = pkt; + } else { + DHD_ERROR(("%s: glom superframe w/o descriptor!\n", __FUNCTION__)); + dhdsdio_rxfail(bus, FALSE, FALSE); + } + continue; + } + + /* Fill in packet len and prio, deliver upward */ + PKTSETLEN(osh, pkt, len); + PKTPULL(osh, pkt, doff); + +#ifdef SDTEST + /* Test channel packets are processed separately */ + if (chan == SDPCM_TEST_CHANNEL) { + dhdsdio_testrcv(bus, pkt, seq); + continue; + } +#endif /* SDTEST */ + + if (PKTLEN(osh, pkt) == 0) { + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE(bus->dhd->osh, pkt, FALSE); + dhd_os_sdunlock_rxq(bus->dhd); + continue; + } else if (dhd_prot_hdrpull(bus->dhd, &ifidx, pkt) != 0) { + DHD_ERROR(("%s: rx protocol error\n", __FUNCTION__)); + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE(bus->dhd->osh, pkt, FALSE); + dhd_os_sdunlock_rxq(bus->dhd); + bus->dhd->rx_errors++; + continue; + } + + + /* Unlock during rx call */ + dhd_os_sdunlock(bus->dhd); + dhd_rx_frame(bus->dhd, ifidx, pkt, 1); + dhd_os_sdlock(bus->dhd); + } + rxcount = maxframes - rxleft; +#ifdef DHD_DEBUG + /* Message if we hit the limit */ + if (!rxleft && !sdtest) + DHD_DATA(("%s: hit rx limit of %d frames\n", __FUNCTION__, maxframes)); + else +#endif /* DHD_DEBUG */ + DHD_DATA(("%s: processed %d frames\n", __FUNCTION__, rxcount)); + /* Back off rxseq if awaiting rtx, update rx_seq */ + if (bus->rxskip) + rxseq--; + bus->rx_seq = rxseq; + + return rxcount; +} + +static uint32 +dhdsdio_hostmail(dhd_bus_t *bus) +{ + sdpcmd_regs_t *regs = bus->regs; + uint32 intstatus = 0; + uint32 hmb_data; + uint8 fcbits; + uint retries = 0; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + /* Read mailbox data and ack that we did so */ + R_SDREG(hmb_data, ®s->tohostmailboxdata, retries); + if (retries <= retry_limit) + W_SDREG(SMB_INT_ACK, ®s->tosbmailbox, retries); + bus->f1regdata += 2; + + /* Dongle recomposed rx frames, accept them again */ + if (hmb_data & HMB_DATA_NAKHANDLED) { + DHD_INFO(("Dongle reports NAK handled, expect rtx of %d\n", bus->rx_seq)); + if (!bus->rxskip) { + DHD_ERROR(("%s: unexpected NAKHANDLED!\n", __FUNCTION__)); + } + bus->rxskip = FALSE; + intstatus |= I_HMB_FRAME_IND; + } + + /* + * DEVREADY does not occur with gSPI. + */ + if (hmb_data & (HMB_DATA_DEVREADY | HMB_DATA_FWREADY)) { + bus->sdpcm_ver = (hmb_data & HMB_DATA_VERSION_MASK) >> HMB_DATA_VERSION_SHIFT; + if (bus->sdpcm_ver != SDPCM_PROT_VERSION) + DHD_ERROR(("Version mismatch, dongle reports %d, expecting %d\n", + bus->sdpcm_ver, SDPCM_PROT_VERSION)); + else + DHD_INFO(("Dongle ready, protocol version %d\n", bus->sdpcm_ver)); + } + + /* + * Flow Control has been moved into the RX headers and this out of band + * method isn't used any more. Leae this here for possibly remaining backward + * compatible with older dongles + */ + if (hmb_data & HMB_DATA_FC) { + fcbits = (hmb_data & HMB_DATA_FCDATA_MASK) >> HMB_DATA_FCDATA_SHIFT; + + if (fcbits & ~bus->flowcontrol) + bus->fc_xoff++; + if (bus->flowcontrol & ~fcbits) + bus->fc_xon++; + + bus->fc_rcvd++; + bus->flowcontrol = fcbits; + } + + /* Shouldn't be any others */ + if (hmb_data & ~(HMB_DATA_DEVREADY | + HMB_DATA_NAKHANDLED | + HMB_DATA_FC | + HMB_DATA_FWREADY | + HMB_DATA_FCDATA_MASK | + HMB_DATA_VERSION_MASK)) { + DHD_ERROR(("Unknown mailbox data content: 0x%02x\n", hmb_data)); + } + + return intstatus; +} + +bool +dhdsdio_dpc(dhd_bus_t *bus) +{ + bcmsdh_info_t *sdh = bus->sdh; + sdpcmd_regs_t *regs = bus->regs; + uint32 intstatus, newstatus = 0; + uint retries = 0; + uint rxlimit = dhd_rxbound; /* Rx frames to read before resched */ + uint txlimit = dhd_txbound; /* Tx frames to send before resched */ + uint framecnt = 0; /* Temporary counter of tx/rx frames */ + bool rxdone = TRUE; /* Flag for no more read data */ + bool resched = FALSE; /* Flag indicating resched wanted */ + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + /* Start with leftover status bits */ + intstatus = bus->intstatus; + + dhd_os_sdlock(bus->dhd); + + /* If waiting for HTAVAIL, check status */ + if (bus->clkstate == CLK_PENDING) { + int err; + uint8 clkctl, devctl = 0; + +#ifdef DHD_DEBUG + /* Check for inconsistent device control */ + devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err); + if (err) { + DHD_ERROR(("%s: error reading DEVCTL: %d\n", __FUNCTION__, err)); + bus->dhd->busstate = DHD_BUS_DOWN; + } else { + ASSERT(devctl & SBSDIO_DEVCTL_CA_INT_ONLY); + } +#endif /* DHD_DEBUG */ + + /* Read CSR, if clock on switch to AVAIL, else ignore */ + clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err); + if (err) { + DHD_ERROR(("%s: error reading CSR: %d\n", __FUNCTION__, err)); + bus->dhd->busstate = DHD_BUS_DOWN; + } + + DHD_INFO(("DPC: PENDING, devctl 0x%02x clkctl 0x%02x\n", devctl, clkctl)); + + if (SBSDIO_HTAV(clkctl)) { + devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err); + if (err) { + DHD_ERROR(("%s: error reading DEVCTL: %d\n", + __FUNCTION__, err)); + bus->dhd->busstate = DHD_BUS_DOWN; + } + devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY; + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err); + if (err) { + DHD_ERROR(("%s: error writing DEVCTL: %d\n", + __FUNCTION__, err)); + bus->dhd->busstate = DHD_BUS_DOWN; + } + bus->clkstate = CLK_AVAIL; + } else { + goto clkwait; + } + } + + BUS_WAKE(bus); + + /* Make sure backplane clock is on */ + dhdsdio_clkctl(bus, CLK_AVAIL, TRUE); + if (bus->clkstate == CLK_PENDING) + goto clkwait; + + /* Pending interrupt indicates new device status */ + if (bus->ipend) { + bus->ipend = FALSE; + R_SDREG(newstatus, ®s->intstatus, retries); + bus->f1regdata++; + if (bcmsdh_regfail(bus->sdh)) + newstatus = 0; + newstatus &= bus->hostintmask; + bus->fcstate = !!(newstatus & I_HMB_FC_STATE); + if (newstatus) { + W_SDREG(newstatus, ®s->intstatus, retries); + bus->f1regdata++; + } + } + + /* Merge new bits with previous */ + intstatus |= newstatus; + bus->intstatus = 0; + + /* Handle flow-control change: read new state in case our ack + * crossed another change interrupt. If change still set, assume + * FC ON for safety, let next loop through do the debounce. + */ + if (intstatus & I_HMB_FC_CHANGE) { + intstatus &= ~I_HMB_FC_CHANGE; + W_SDREG(I_HMB_FC_CHANGE, ®s->intstatus, retries); + R_SDREG(newstatus, ®s->intstatus, retries); + bus->f1regdata += 2; + bus->fcstate = !!(newstatus & (I_HMB_FC_STATE | I_HMB_FC_CHANGE)); + intstatus |= (newstatus & bus->hostintmask); + } + + /* Handle host mailbox indication */ + if (intstatus & I_HMB_HOST_INT) { + intstatus &= ~I_HMB_HOST_INT; + intstatus |= dhdsdio_hostmail(bus); + } + + /* Generally don't ask for these, can get CRC errors... */ + if (intstatus & I_WR_OOSYNC) { + DHD_ERROR(("Dongle reports WR_OOSYNC\n")); + intstatus &= ~I_WR_OOSYNC; + } + + if (intstatus & I_RD_OOSYNC) { + DHD_ERROR(("Dongle reports RD_OOSYNC\n")); + intstatus &= ~I_RD_OOSYNC; + } + + if (intstatus & I_SBINT) { + DHD_ERROR(("Dongle reports SBINT\n")); + intstatus &= ~I_SBINT; + } + + /* Would be active due to wake-wlan in gSPI */ + if (intstatus & I_CHIPACTIVE) { + DHD_INFO(("Dongle reports CHIPACTIVE\n")); + intstatus &= ~I_CHIPACTIVE; + } + + /* Ignore frame indications if rxskip is set */ + if (bus->rxskip) + intstatus &= ~I_HMB_FRAME_IND; + + /* On frame indication, read available frames */ + if (PKT_AVAILABLE()) { + framecnt = dhdsdio_readframes(bus, rxlimit, &rxdone); + if (rxdone || bus->rxskip) + intstatus &= ~I_HMB_FRAME_IND; + rxlimit -= MIN(framecnt, rxlimit); + } + + /* Keep still-pending events for next scheduling */ + bus->intstatus = intstatus; + +clkwait: + /* Re-enable interrupts to detect new device events (mailbox, rx frame) + * or clock availability. (Allows tx loop to check ipend if desired.) + * (Unless register access seems hosed, as we may not be able to ACK...) + */ + if (bus->intr && bus->intdis && !bcmsdh_regfail(sdh)) { + DHD_INTR(("%s: enable SDIO interrupts, rxdone %d framecnt %d\n", + __FUNCTION__, rxdone, framecnt)); + bus->intdis = FALSE; +#if defined(OOB_INTR_ONLY) + bcmsdh_oob_intr_set(1); +#endif /* (OOB_INTR_ONLY) */ + bcmsdh_intr_enable(sdh); + } + + if (DATAOK(bus) && bus->ctrl_frame_stat && (bus->clkstate == CLK_AVAIL)) { + int ret, i; + + ret = dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC, + (uint8 *)bus->ctrl_frame_buf, (uint32)bus->ctrl_frame_len, + NULL, NULL, NULL); + ASSERT(ret != BCME_PENDING); + + if (ret < 0) { + /* On failure, abort the command and terminate the frame */ + DHD_INFO(("%s: sdio error %d, abort command and terminate frame.\n", + __FUNCTION__, ret)); + bus->tx_sderrs++; + + bcmsdh_abort(sdh, SDIO_FUNC_2); + + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_FRAMECTRL, + SFC_WF_TERM, NULL); + bus->f1regdata++; + + for (i = 0; i < 3; i++) { + uint8 hi, lo; + hi = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, + SBSDIO_FUNC1_WFRAMEBCHI, NULL); + lo = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, + SBSDIO_FUNC1_WFRAMEBCLO, NULL); + bus->f1regdata += 2; + if ((hi == 0) && (lo == 0)) + break; + } + + } + if (ret == 0) { + bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP; + } + + printf("Return_dpc value is : %d\n", ret); + bus->ctrl_frame_stat = FALSE; + dhd_wait_event_wakeup(bus->dhd); + } + /* Send queued frames (limit 1 if rx may still be pending) */ + else if ((bus->clkstate == CLK_AVAIL) && !bus->fcstate && + pktq_mlen(&bus->txq, ~bus->flowcontrol) && txlimit && DATAOK(bus)) { + framecnt = rxdone ? txlimit : MIN(txlimit, dhd_txminmax); + framecnt = dhdsdio_sendfromq(bus, framecnt); + txlimit -= framecnt; + } + + /* Resched if events or tx frames are pending, else await next interrupt */ + /* On failed register access, all bets are off: no resched or interrupts */ + if ((bus->dhd->busstate == DHD_BUS_DOWN) || bcmsdh_regfail(sdh)) { + DHD_ERROR(("%s: failed backplane access over SDIO, halting operation %d \n", + __FUNCTION__, bcmsdh_regfail(sdh))); + bus->dhd->busstate = DHD_BUS_DOWN; + bus->intstatus = 0; + } else if (bus->clkstate == CLK_PENDING) { + DHD_INFO(("%s: rescheduled due to CLK_PENDING awaiting \ + I_CHIPACTIVE interrupt", __FUNCTION__)); + resched = TRUE; + } else if (bus->intstatus || bus->ipend || + (!bus->fcstate && pktq_mlen(&bus->txq, ~bus->flowcontrol) && DATAOK(bus)) || + PKT_AVAILABLE()) { /* Read multiple frames */ + resched = TRUE; + } + + + bus->dpc_sched = resched; + + /* If we're done for now, turn off clock request. */ + if ((bus->clkstate != CLK_PENDING) && bus->idletime == DHD_IDLE_IMMEDIATE) { + bus->activity = FALSE; + dhdsdio_clkctl(bus, CLK_NONE, FALSE); + } + + dhd_os_sdunlock(bus->dhd); + + return resched; +} + +bool +dhd_bus_dpc(struct dhd_bus *bus) +{ + bool resched; + + /* Call the DPC directly. */ + DHD_TRACE(("Calling dhdsdio_dpc() from %s\n", __FUNCTION__)); + resched = dhdsdio_dpc(bus); + + return resched; +} + +void +dhdsdio_isr(void *arg) +{ + dhd_bus_t *bus = (dhd_bus_t*)arg; + bcmsdh_info_t *sdh; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (!bus) { + DHD_ERROR(("%s : bus is null pointer , exit \n", __FUNCTION__)); + return; + } + sdh = bus->sdh; + + if (bus->dhd->busstate == DHD_BUS_DOWN) { + DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__)); + return; + } + /* Count the interrupt call */ + bus->intrcount++; + bus->ipend = TRUE; + + /* Shouldn't get this interrupt if we're sleeping? */ + if (bus->sleeping) { + DHD_ERROR(("INTERRUPT WHILE SLEEPING??\n")); + return; + } + + /* Disable additional interrupts (is this needed now)? */ + if (bus->intr) { + DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__)); + } else { + DHD_ERROR(("dhdsdio_isr() w/o interrupt configured!\n")); + } + + bcmsdh_intr_disable(sdh); + bus->intdis = TRUE; + +#if defined(SDIO_ISR_THREAD) + DHD_TRACE(("Calling dhdsdio_dpc() from %s\n", __FUNCTION__)); + dhd_os_wake_lock(bus->dhd); + while (dhdsdio_dpc(bus)); + dhd_os_wake_unlock(bus->dhd); +#else + bus->dpc_sched = TRUE; + dhd_sched_dpc(bus->dhd); +#endif + +} + +#ifdef SDTEST +static void +dhdsdio_pktgen_init(dhd_bus_t *bus) +{ + /* Default to specified length, or full range */ + if (dhd_pktgen_len) { + bus->pktgen_maxlen = MIN(dhd_pktgen_len, MAX_PKTGEN_LEN); + bus->pktgen_minlen = bus->pktgen_maxlen; + } else { + bus->pktgen_maxlen = MAX_PKTGEN_LEN; + bus->pktgen_minlen = 0; + } + bus->pktgen_len = (uint16)bus->pktgen_minlen; + + /* Default to per-watchdog burst with 10s print time */ + bus->pktgen_freq = 1; + bus->pktgen_print = 10000 / dhd_watchdog_ms; + bus->pktgen_count = (dhd_pktgen * dhd_watchdog_ms + 999) / 1000; + + /* Default to echo mode */ + bus->pktgen_mode = DHD_PKTGEN_ECHO; + bus->pktgen_stop = 1; +} + +static void +dhdsdio_pktgen(dhd_bus_t *bus) +{ + void *pkt; + uint8 *data; + uint pktcount; + uint fillbyte; + osl_t *osh = bus->dhd->osh; + uint16 len; + + /* Display current count if appropriate */ + if (bus->pktgen_print && (++bus->pktgen_ptick >= bus->pktgen_print)) { + bus->pktgen_ptick = 0; + printf("%s: send attempts %d rcvd %d\n", + __FUNCTION__, bus->pktgen_sent, bus->pktgen_rcvd); + } + + /* For recv mode, just make sure dongle has started sending */ + if (bus->pktgen_mode == DHD_PKTGEN_RECV) { + if (!bus->pktgen_rcvd) + dhdsdio_sdtest_set(bus, TRUE); + return; + } + + /* Otherwise, generate or request the specified number of packets */ + for (pktcount = 0; pktcount < bus->pktgen_count; pktcount++) { + /* Stop if total has been reached */ + if (bus->pktgen_total && (bus->pktgen_sent >= bus->pktgen_total)) { + bus->pktgen_count = 0; + break; + } + + /* Allocate an appropriate-sized packet */ + len = bus->pktgen_len; + if (!(pkt = PKTGET(osh, (len + SDPCM_HDRLEN + SDPCM_TEST_HDRLEN + DHD_SDALIGN), + TRUE))) {; + DHD_ERROR(("%s: PKTGET failed!\n", __FUNCTION__)); + break; + } + PKTALIGN(osh, pkt, (len + SDPCM_HDRLEN + SDPCM_TEST_HDRLEN), DHD_SDALIGN); + data = (uint8*)PKTDATA(osh, pkt) + SDPCM_HDRLEN; + + /* Write test header cmd and extra based on mode */ + switch (bus->pktgen_mode) { + case DHD_PKTGEN_ECHO: + *data++ = SDPCM_TEST_ECHOREQ; + *data++ = (uint8)bus->pktgen_sent; + break; + + case DHD_PKTGEN_SEND: + *data++ = SDPCM_TEST_DISCARD; + *data++ = (uint8)bus->pktgen_sent; + break; + + case DHD_PKTGEN_RXBURST: + *data++ = SDPCM_TEST_BURST; + *data++ = (uint8)bus->pktgen_count; + break; + + default: + DHD_ERROR(("Unrecognized pktgen mode %d\n", bus->pktgen_mode)); + PKTFREE(osh, pkt, TRUE); + bus->pktgen_count = 0; + return; + } + + /* Write test header length field */ + *data++ = (len >> 0); + *data++ = (len >> 8); + + /* Then fill in the remainder -- N/A for burst, but who cares... */ + for (fillbyte = 0; fillbyte < len; fillbyte++) + *data++ = SDPCM_TEST_FILL(fillbyte, (uint8)bus->pktgen_sent); + +#ifdef DHD_DEBUG + if (DHD_BYTES_ON() && DHD_DATA_ON()) { + data = (uint8*)PKTDATA(osh, pkt) + SDPCM_HDRLEN; + prhex("dhdsdio_pktgen: Tx Data", data, PKTLEN(osh, pkt) - SDPCM_HDRLEN); + } +#endif + + /* Send it */ + if (dhdsdio_txpkt(bus, pkt, SDPCM_TEST_CHANNEL, TRUE)) { + bus->pktgen_fail++; + if (bus->pktgen_stop && bus->pktgen_stop == bus->pktgen_fail) + bus->pktgen_count = 0; + } + bus->pktgen_sent++; + + /* Bump length if not fixed, wrap at max */ + if (++bus->pktgen_len > bus->pktgen_maxlen) + bus->pktgen_len = (uint16)bus->pktgen_minlen; + + /* Special case for burst mode: just send one request! */ + if (bus->pktgen_mode == DHD_PKTGEN_RXBURST) + break; + } +} + +static void +dhdsdio_sdtest_set(dhd_bus_t *bus, bool start) +{ + void *pkt; + uint8 *data; + osl_t *osh = bus->dhd->osh; + + /* Allocate the packet */ + if (!(pkt = PKTGET(osh, SDPCM_HDRLEN + SDPCM_TEST_HDRLEN + DHD_SDALIGN, TRUE))) { + DHD_ERROR(("%s: PKTGET failed!\n", __FUNCTION__)); + return; + } + PKTALIGN(osh, pkt, (SDPCM_HDRLEN + SDPCM_TEST_HDRLEN), DHD_SDALIGN); + data = (uint8*)PKTDATA(osh, pkt) + SDPCM_HDRLEN; + + /* Fill in the test header */ + *data++ = SDPCM_TEST_SEND; + *data++ = start; + *data++ = (bus->pktgen_maxlen >> 0); + *data++ = (bus->pktgen_maxlen >> 8); + + /* Send it */ + if (dhdsdio_txpkt(bus, pkt, SDPCM_TEST_CHANNEL, TRUE)) + bus->pktgen_fail++; +} + + +static void +dhdsdio_testrcv(dhd_bus_t *bus, void *pkt, uint seq) +{ + osl_t *osh = bus->dhd->osh; + uint8 *data; + uint pktlen; + + uint8 cmd; + uint8 extra; + uint16 len; + uint16 offset; + + /* Check for min length */ + if ((pktlen = PKTLEN(osh, pkt)) < SDPCM_TEST_HDRLEN) { + DHD_ERROR(("dhdsdio_restrcv: toss runt frame, pktlen %d\n", pktlen)); + PKTFREE(osh, pkt, FALSE); + return; + } + + /* Extract header fields */ + data = PKTDATA(osh, pkt); + cmd = *data++; + extra = *data++; + len = *data++; len += *data++ << 8; + + /* Check length for relevant commands */ + if (cmd == SDPCM_TEST_DISCARD || cmd == SDPCM_TEST_ECHOREQ || cmd == SDPCM_TEST_ECHORSP) { + if (pktlen != len + SDPCM_TEST_HDRLEN) { + DHD_ERROR(("dhdsdio_testrcv: frame length mismatch, pktlen %d seq %d" + " cmd %d extra %d len %d\n", pktlen, seq, cmd, extra, len)); + PKTFREE(osh, pkt, FALSE); + return; + } + } + + /* Process as per command */ + switch (cmd) { + case SDPCM_TEST_ECHOREQ: + /* Rx->Tx turnaround ok (even on NDIS w/current implementation) */ + *(uint8 *)(PKTDATA(osh, pkt)) = SDPCM_TEST_ECHORSP; + if (dhdsdio_txpkt(bus, pkt, SDPCM_TEST_CHANNEL, TRUE) == 0) { + bus->pktgen_sent++; + } else { + bus->pktgen_fail++; + PKTFREE(osh, pkt, FALSE); + } + bus->pktgen_rcvd++; + break; + + case SDPCM_TEST_ECHORSP: + if (bus->ext_loop) { + PKTFREE(osh, pkt, FALSE); + bus->pktgen_rcvd++; + break; + } + + for (offset = 0; offset < len; offset++, data++) { + if (*data != SDPCM_TEST_FILL(offset, extra)) { + DHD_ERROR(("dhdsdio_testrcv: echo data mismatch: " + "offset %d (len %d) expect 0x%02x rcvd 0x%02x\n", + offset, len, SDPCM_TEST_FILL(offset, extra), *data)); + break; + } + } + PKTFREE(osh, pkt, FALSE); + bus->pktgen_rcvd++; + break; + + case SDPCM_TEST_DISCARD: + PKTFREE(osh, pkt, FALSE); + bus->pktgen_rcvd++; + break; + + case SDPCM_TEST_BURST: + case SDPCM_TEST_SEND: + default: + DHD_INFO(("dhdsdio_testrcv: unsupported or unknown command, pktlen %d seq %d" + " cmd %d extra %d len %d\n", pktlen, seq, cmd, extra, len)); + PKTFREE(osh, pkt, FALSE); + break; + } + + /* For recv mode, stop at limie (and tell dongle to stop sending) */ + if (bus->pktgen_mode == DHD_PKTGEN_RECV) { + if (bus->pktgen_total && (bus->pktgen_rcvd >= bus->pktgen_total)) { + bus->pktgen_count = 0; + dhdsdio_sdtest_set(bus, FALSE); + } + } +} +#endif /* SDTEST */ + +extern bool +dhd_bus_watchdog(dhd_pub_t *dhdp) +{ + dhd_bus_t *bus; + + DHD_TIMER(("%s: Enter\n", __FUNCTION__)); + + bus = dhdp->bus; + + if (bus->dhd->dongle_reset) + return FALSE; + + /* Ignore the timer if simulating bus down */ + if (bus->sleeping) + return FALSE; + + dhd_os_sdlock(bus->dhd); + + /* Poll period: check device if appropriate. */ + if (bus->poll && (++bus->polltick >= bus->pollrate)) { + uint32 intstatus = 0; + + /* Reset poll tick */ + bus->polltick = 0; + + /* Check device if no interrupts */ + if (!bus->intr || (bus->intrcount == bus->lastintrs)) { + + if (!bus->dpc_sched) { + uint8 devpend; + devpend = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_0, + SDIOD_CCCR_INTPEND, NULL); + intstatus = devpend & (INTR_STATUS_FUNC1 | INTR_STATUS_FUNC2); + } + + /* If there is something, make like the ISR and schedule the DPC */ + if (intstatus) { + bus->pollcnt++; + bus->ipend = TRUE; + if (bus->intr) { + bcmsdh_intr_disable(bus->sdh); + } + bus->dpc_sched = TRUE; + dhd_sched_dpc(bus->dhd); + + } + } + + /* Update interrupt tracking */ + bus->lastintrs = bus->intrcount; + } + +#ifdef DHD_DEBUG + /* Poll for console output periodically */ + if (dhdp->busstate == DHD_BUS_DATA && dhd_console_ms != 0) { + bus->console.count += dhd_watchdog_ms; + if (bus->console.count >= dhd_console_ms) { + bus->console.count -= dhd_console_ms; + /* Make sure backplane clock is on */ + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + if (dhdsdio_readconsole(bus) < 0) + dhd_console_ms = 0; /* On error, stop trying */ + } + } +#endif /* DHD_DEBUG */ + +#ifdef SDTEST + /* Generate packets if configured */ + if (bus->pktgen_count && (++bus->pktgen_tick >= bus->pktgen_freq)) { + /* Make sure backplane clock is on */ + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + bus->pktgen_tick = 0; + dhdsdio_pktgen(bus); + } +#endif + + /* On idle timeout clear activity flag and/or turn off clock */ + if ((bus->idletime > 0) && (bus->clkstate == CLK_AVAIL)) { + if (++bus->idlecount >= bus->idletime) { + bus->idlecount = 0; + if (bus->activity) { + bus->activity = FALSE; + dhdsdio_clkctl(bus, CLK_NONE, FALSE); + } + } + } + + dhd_os_sdunlock(bus->dhd); + + return bus->ipend; +} + +#ifdef DHD_DEBUG +extern int +dhd_bus_console_in(dhd_pub_t *dhdp, uchar *msg, uint msglen) +{ + dhd_bus_t *bus = dhdp->bus; + uint32 addr, val; + int rv; + void *pkt; + + /* Address could be zero if CONSOLE := 0 in dongle Makefile */ + if (bus->console_addr == 0) + return BCME_UNSUPPORTED; + + /* Exclusive bus access */ + dhd_os_sdlock(bus->dhd); + + /* Don't allow input if dongle is in reset */ + if (bus->dhd->dongle_reset) { + dhd_os_sdunlock(bus->dhd); + return BCME_NOTREADY; + } + + /* Request clock to allow SDIO accesses */ + BUS_WAKE(bus); + /* No pend allowed since txpkt is called later, ht clk has to be on */ + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + + /* Zero cbuf_index */ + addr = bus->console_addr + OFFSETOF(hndrte_cons_t, cbuf_idx); + val = htol32(0); + if ((rv = dhdsdio_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0) + goto done; + + /* Write message into cbuf */ + addr = bus->console_addr + OFFSETOF(hndrte_cons_t, cbuf); + if ((rv = dhdsdio_membytes(bus, TRUE, addr, (uint8 *)msg, msglen)) < 0) + goto done; + + /* Write length into vcons_in */ + addr = bus->console_addr + OFFSETOF(hndrte_cons_t, vcons_in); + val = htol32(msglen); + if ((rv = dhdsdio_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0) + goto done; + + /* Bump dongle by sending an empty event pkt. + * sdpcm_sendup (RX) checks for virtual console input. + */ + if (((pkt = PKTGET(bus->dhd->osh, 4 + SDPCM_RESERVE, TRUE)) != NULL) && + bus->clkstate == CLK_AVAIL) + dhdsdio_txpkt(bus, pkt, SDPCM_EVENT_CHANNEL, TRUE); + +done: + if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) { + bus->activity = FALSE; + dhdsdio_clkctl(bus, CLK_NONE, TRUE); + } + + dhd_os_sdunlock(bus->dhd); + + return rv; +} +#endif /* DHD_DEBUG */ + +#ifdef DHD_DEBUG +static void +dhd_dump_cis(uint fn, uint8 *cis) +{ + uint byte, tag, tdata; + DHD_INFO(("Function %d CIS:\n", fn)); + + for (tdata = byte = 0; byte < SBSDIO_CIS_SIZE_LIMIT; byte++) { + if ((byte % 16) == 0) + DHD_INFO((" ")); + DHD_INFO(("%02x ", cis[byte])); + if ((byte % 16) == 15) + DHD_INFO(("\n")); + if (!tdata--) { + tag = cis[byte]; + if (tag == 0xff) + break; + else if (!tag) + tdata = 0; + else if ((byte + 1) < SBSDIO_CIS_SIZE_LIMIT) + tdata = cis[byte + 1] + 1; + else + DHD_INFO(("]")); + } + } + if ((byte % 16) != 15) + DHD_INFO(("\n")); +} +#endif /* DHD_DEBUG */ + +static bool +dhdsdio_chipmatch(uint16 chipid) +{ + if (chipid == BCM4325_CHIP_ID) + return TRUE; + if (chipid == BCM4329_CHIP_ID) + return TRUE; + if (chipid == BCM4315_CHIP_ID) + return TRUE; + if (chipid == BCM4319_CHIP_ID) + return TRUE; + return FALSE; +} + +static void * +dhdsdio_probe(uint16 venid, uint16 devid, uint16 bus_no, uint16 slot, + uint16 func, uint bustype, void *regsva, osl_t * osh, void *sdh) +{ + int ret; + dhd_bus_t *bus; + + /* Init global variables at run-time, not as part of the declaration. + * This is required to support init/de-init of the driver. Initialization + * of globals as part of the declaration results in non-deterministic + * behavior since the value of the globals may be different on the + * first time that the driver is initialized vs subsequent initializations. + */ + dhd_txbound = DHD_TXBOUND; + dhd_rxbound = DHD_RXBOUND; + dhd_alignctl = TRUE; + sd1idle = TRUE; + dhd_readahead = TRUE; + retrydata = FALSE; + dhd_doflow = FALSE; + dhd_dongle_memsize = 0; + dhd_txminmax = DHD_TXMINMAX; + + forcealign = TRUE; + + + dhd_common_init(); + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + DHD_INFO(("%s: venid 0x%04x devid 0x%04x\n", __FUNCTION__, venid, devid)); + + /* We make assumptions about address window mappings */ + ASSERT((uintptr)regsva == SI_ENUM_BASE); + + /* BCMSDH passes venid and devid based on CIS parsing -- but low-power start + * means early parse could fail, so here we should get either an ID + * we recognize OR (-1) indicating we must request power first. + */ + /* Check the Vendor ID */ + switch (venid) { + case 0x0000: + case VENDOR_BROADCOM: + break; + default: + DHD_ERROR(("%s: unknown vendor: 0x%04x\n", + __FUNCTION__, venid)); + return NULL; + } + + /* Check the Device ID and make sure it's one that we support */ + switch (devid) { + case BCM4325_D11DUAL_ID: /* 4325 802.11a/g id */ + case BCM4325_D11G_ID: /* 4325 802.11g 2.4Ghz band id */ + case BCM4325_D11A_ID: /* 4325 802.11a 5Ghz band id */ + DHD_INFO(("%s: found 4325 Dongle\n", __FUNCTION__)); + break; + case BCM4329_D11NDUAL_ID: /* 4329 802.11n dualband device */ + case BCM4329_D11N2G_ID: /* 4329 802.11n 2.4G device */ + case BCM4329_D11N5G_ID: /* 4329 802.11n 5G device */ + case 0x4329: + DHD_INFO(("%s: found 4329 Dongle\n", __FUNCTION__)); + break; + case BCM4315_D11DUAL_ID: /* 4315 802.11a/g id */ + case BCM4315_D11G_ID: /* 4315 802.11g id */ + case BCM4315_D11A_ID: /* 4315 802.11a id */ + DHD_INFO(("%s: found 4315 Dongle\n", __FUNCTION__)); + break; + case BCM4319_D11N_ID: /* 4319 802.11n id */ + case BCM4319_D11N2G_ID: /* 4319 802.11n2g id */ + case BCM4319_D11N5G_ID: /* 4319 802.11n5g id */ + DHD_INFO(("%s: found 4319 Dongle\n", __FUNCTION__)); + break; + case 0: + DHD_INFO(("%s: allow device id 0, will check chip internals\n", + __FUNCTION__)); + break; + + default: + DHD_ERROR(("%s: skipping 0x%04x/0x%04x, not a dongle\n", + __FUNCTION__, venid, devid)); + return NULL; + } + + if (osh == NULL) { + /* Ask the OS interface part for an OSL handle */ + if (!(osh = dhd_osl_attach(sdh, DHD_BUS))) { + DHD_ERROR(("%s: osl_attach failed!\n", __FUNCTION__)); + return NULL; + } + } + + /* Allocate private bus interface state */ + if (!(bus = MALLOC(osh, sizeof(dhd_bus_t)))) { + DHD_ERROR(("%s: MALLOC of dhd_bus_t failed\n", __FUNCTION__)); + goto fail; + } + bzero(bus, sizeof(dhd_bus_t)); + bus->sdh = sdh; + bus->cl_devid = (uint16)devid; + bus->bus = DHD_BUS; + bus->tx_seq = SDPCM_SEQUENCE_WRAP - 1; + bus->usebufpool = FALSE; /* Use bufpool if allocated, else use locally malloced rxbuf */ + + /* attempt to attach to the dongle */ + if (!(dhdsdio_probe_attach(bus, osh, sdh, regsva, devid))) { + DHD_ERROR(("%s: dhdsdio_probe_attach failed\n", __FUNCTION__)); + goto fail; + } + + /* Attach to the dhd/OS/network interface */ + if (!(bus->dhd = dhd_attach(osh, bus, SDPCM_RESERVE))) { + DHD_ERROR(("%s: dhd_attach failed\n", __FUNCTION__)); + goto fail; + } + + /* Allocate buffers */ + if (!(dhdsdio_probe_malloc(bus, osh, sdh))) { + DHD_ERROR(("%s: dhdsdio_probe_malloc failed\n", __FUNCTION__)); + goto fail; + } + + if (!(dhdsdio_probe_init(bus, osh, sdh))) { + DHD_ERROR(("%s: dhdsdio_probe_init failed\n", __FUNCTION__)); + goto fail; + } + + /* Register interrupt callback, but mask it (not operational yet). */ + DHD_INTR(("%s: disable SDIO interrupts (not interested yet)\n", __FUNCTION__)); + bcmsdh_intr_disable(sdh); + if ((ret = bcmsdh_intr_reg(sdh, dhdsdio_isr, bus)) != 0) { + DHD_ERROR(("%s: FAILED: bcmsdh_intr_reg returned %d\n", + __FUNCTION__, ret)); + goto fail; + } + DHD_INTR(("%s: registered SDIO interrupt function ok\n", __FUNCTION__)); + + DHD_INFO(("%s: completed!!\n", __FUNCTION__)); + + + /* if firmware path present try to download and bring up bus */ + if ((ret = dhd_bus_start(bus->dhd)) != 0) { +#if 1 + DHD_ERROR(("%s: failed\n", __FUNCTION__)); + goto fail; +#else + if (ret == BCME_NOTUP) { + DHD_ERROR(("%s: dongle is not responding\n", __FUNCTION__)); + goto fail; + } +#endif + } + /* Ok, have the per-port tell the stack we're open for business */ + if (dhd_net_attach(bus->dhd, 0) != 0) { + DHD_ERROR(("%s: Net attach failed!!\n", __FUNCTION__)); + goto fail; + } + + return bus; + +fail: + dhdsdio_release(bus, osh); + return NULL; +} + + +static bool +dhdsdio_probe_attach(struct dhd_bus *bus, osl_t *osh, void *sdh, void *regsva, + uint16 devid) +{ + uint8 clkctl = 0; + int err = 0; + + bus->alp_only = TRUE; + + /* Return the window to backplane enumeration space for core access */ + if (dhdsdio_set_siaddr_window(bus, SI_ENUM_BASE)) { + DHD_ERROR(("%s: FAILED to return to SI_ENUM_BASE\n", __FUNCTION__)); + } + +#ifdef DHD_DEBUG + printf("F1 signature read @0x18000000=0x%4x\n", + bcmsdh_reg_read(bus->sdh, SI_ENUM_BASE, 4)); + + +#endif /* DHD_DEBUG */ + + + /* Force PLL off until si_attach() programs PLL control regs */ + + + + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, DHD_INIT_CLKCTL1, &err); + if (!err) + clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err); + + if (err || ((clkctl & ~SBSDIO_AVBITS) != DHD_INIT_CLKCTL1)) { + DHD_ERROR(("dhdsdio_probe: ChipClkCSR access: err %d wrote 0x%02x read 0x%02x\n", + err, DHD_INIT_CLKCTL1, clkctl)); + goto fail; + } + + +#ifdef DHD_DEBUG + if (DHD_INFO_ON()) { + uint fn, numfn; + uint8 *cis[SDIOD_MAX_IOFUNCS]; + int err = 0; + + numfn = bcmsdh_query_iofnum(sdh); + ASSERT(numfn <= SDIOD_MAX_IOFUNCS); + + /* Make sure ALP is available before trying to read CIS */ + SPINWAIT(((clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, + SBSDIO_FUNC1_CHIPCLKCSR, NULL)), + !SBSDIO_ALPAV(clkctl)), PMU_MAX_TRANSITION_DLY); + + /* Now request ALP be put on the bus */ + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, + DHD_INIT_CLKCTL2, &err); + OSL_DELAY(65); + + for (fn = 0; fn <= numfn; fn++) { + if (!(cis[fn] = MALLOC(osh, SBSDIO_CIS_SIZE_LIMIT))) { + DHD_INFO(("dhdsdio_probe: fn %d cis malloc failed\n", fn)); + break; + } + bzero(cis[fn], SBSDIO_CIS_SIZE_LIMIT); + + if ((err = bcmsdh_cis_read(sdh, fn, cis[fn], SBSDIO_CIS_SIZE_LIMIT))) { + DHD_INFO(("dhdsdio_probe: fn %d cis read err %d\n", fn, err)); + MFREE(osh, cis[fn], SBSDIO_CIS_SIZE_LIMIT); + break; + } + dhd_dump_cis(fn, cis[fn]); + } + + while (fn-- > 0) { + ASSERT(cis[fn]); + MFREE(osh, cis[fn], SBSDIO_CIS_SIZE_LIMIT); + } + + if (err) { + DHD_ERROR(("dhdsdio_probe: failure reading or parsing CIS\n")); + goto fail; + } + } +#endif /* DHD_DEBUG */ + + /* si_attach() will provide an SI handle and scan the backplane */ + if (!(bus->sih = si_attach((uint)devid, osh, regsva, DHD_BUS, sdh, + &bus->vars, &bus->varsz))) { + DHD_ERROR(("%s: si_attach failed!\n", __FUNCTION__)); + goto fail; + } + + bcmsdh_chipinfo(sdh, bus->sih->chip, bus->sih->chiprev); + + if (!dhdsdio_chipmatch((uint16)bus->sih->chip)) { + DHD_ERROR(("%s: unsupported chip: 0x%04x\n", + __FUNCTION__, bus->sih->chip)); + goto fail; + } + + si_sdiod_drive_strength_init(bus->sih, osh, dhd_sdiod_drive_strength); + + + /* Get info on the ARM and SOCRAM cores... */ + if (!DHD_NOPMU(bus)) { + if ((si_setcore(bus->sih, ARM7S_CORE_ID, 0)) || + (si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) { + bus->armrev = si_corerev(bus->sih); + } else { + DHD_ERROR(("%s: failed to find ARM core!\n", __FUNCTION__)); + goto fail; + } + if (!(bus->orig_ramsize = si_socram_size(bus->sih))) { + DHD_ERROR(("%s: failed to find SOCRAM memory!\n", __FUNCTION__)); + goto fail; + } + bus->ramsize = bus->orig_ramsize; + if (dhd_dongle_memsize) + dhd_dongle_setmemsize(bus, dhd_dongle_memsize); + + DHD_ERROR(("DHD: dongle ram size is set to %d(orig %d)\n", + bus->ramsize, bus->orig_ramsize)); + } + + /* ...but normally deal with the SDPCMDEV core */ + if (!(bus->regs = si_setcore(bus->sih, PCMCIA_CORE_ID, 0)) && + !(bus->regs = si_setcore(bus->sih, SDIOD_CORE_ID, 0))) { + DHD_ERROR(("%s: failed to find SDIODEV core!\n", __FUNCTION__)); + goto fail; + } + bus->sdpcmrev = si_corerev(bus->sih); + + /* Set core control so an SDIO reset does a backplane reset */ + OR_REG(osh, &bus->regs->corecontrol, CC_BPRESEN); + + pktq_init(&bus->txq, (PRIOMASK + 1), QLEN); + + /* Locate an appropriately-aligned portion of hdrbuf */ + bus->rxhdr = (uint8 *)ROUNDUP((uintptr)&bus->hdrbuf[0], DHD_SDALIGN); + + /* Set the poll and/or interrupt flags */ + bus->intr = (bool)dhd_intr; + if ((bus->poll = (bool)dhd_poll)) + bus->pollrate = 1; + + return TRUE; + +fail: + return FALSE; +} + +static bool +dhdsdio_probe_malloc(dhd_bus_t *bus, osl_t *osh, void *sdh) +{ + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + +#ifndef DHD_USE_STATIC_BUF + if (bus->dhd->maxctl) { + bus->rxblen = ROUNDUP((bus->dhd->maxctl + SDPCM_HDRLEN), ALIGNMENT) + DHD_SDALIGN; + if (!(bus->rxbuf = MALLOC(osh, bus->rxblen))) { + DHD_ERROR(("%s: MALLOC of %d-byte rxbuf failed\n", + __FUNCTION__, bus->rxblen)); + goto fail; + } + } + + /* Allocate buffer to receive glomed packet */ + if (!(bus->databuf = MALLOC(osh, MAX_DATA_BUF))) { + DHD_ERROR(("%s: MALLOC of %d-byte databuf failed\n", + __FUNCTION__, MAX_DATA_BUF)); + /* release rxbuf which was already located as above */ + if (!bus->rxblen) MFREE(osh, bus->rxbuf, bus->rxblen); + goto fail; + } +#else + if (bus->dhd->maxctl) { + bus->rxblen = ROUNDUP((bus->dhd->maxctl + SDPCM_HDRLEN), ALIGNMENT) + DHD_SDALIGN; + if (!(bus->rxbuf = dhd_os_prealloc(DHD_PREALLOC_RXBUF, bus->rxblen))) { + DHD_ERROR(("%s: MALLOC of %d-byte rxbuf failed\n", + __FUNCTION__, bus->rxblen)); + goto fail; + } + } + /* Allocate buffer to receive glomed packet */ + if (!(bus->databuf = dhd_os_prealloc(DHD_PREALLOC_DATABUF, MAX_DATA_BUF))) { + DHD_ERROR(("%s: MALLOC of %d-byte databuf failed\n", + __FUNCTION__, MAX_DATA_BUF)); + goto fail; + } +#endif /* DHD_USE_STATIC_BUF */ + + /* Align the buffer */ + if ((uintptr)bus->databuf % DHD_SDALIGN) + bus->dataptr = bus->databuf + (DHD_SDALIGN - ((uintptr)bus->databuf % DHD_SDALIGN)); + else + bus->dataptr = bus->databuf; + + return TRUE; + +fail: + return FALSE; +} + + +static bool +dhdsdio_probe_init(dhd_bus_t *bus, osl_t *osh, void *sdh) +{ + int32 fnum; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + +#ifdef SDTEST + dhdsdio_pktgen_init(bus); +#endif /* SDTEST */ + + /* Disable F2 to clear any intermediate frame state on the dongle */ + bcmsdh_cfg_write(sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, SDIO_FUNC_ENABLE_1, NULL); + + bus->dhd->busstate = DHD_BUS_DOWN; + bus->sleeping = FALSE; + bus->rxflow = FALSE; + bus->prev_rxlim_hit = 0; + + + /* Done with backplane-dependent accesses, can drop clock... */ + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL); + + /* ...and initialize clock/power states */ + bus->clkstate = CLK_SDONLY; + bus->idletime = (int32)dhd_idletime; + bus->idleclock = DHD_IDLE_ACTIVE; + + /* Query the SD clock speed */ + if (bcmsdh_iovar_op(sdh, "sd_divisor", NULL, 0, + &bus->sd_divisor, sizeof(int32), FALSE) != BCME_OK) { + DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_divisor")); + bus->sd_divisor = -1; + } else { + DHD_INFO(("%s: Initial value for %s is %d\n", + __FUNCTION__, "sd_divisor", bus->sd_divisor)); + } + + /* Query the SD bus mode */ + if (bcmsdh_iovar_op(sdh, "sd_mode", NULL, 0, + &bus->sd_mode, sizeof(int32), FALSE) != BCME_OK) { + DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_mode")); + bus->sd_mode = -1; + } else { + DHD_INFO(("%s: Initial value for %s is %d\n", + __FUNCTION__, "sd_mode", bus->sd_mode)); + } + + /* Query the F2 block size, set roundup accordingly */ + fnum = 2; + if (bcmsdh_iovar_op(sdh, "sd_blocksize", &fnum, sizeof(int32), + &bus->blocksize, sizeof(int32), FALSE) != BCME_OK) { + bus->blocksize = 0; + DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_blocksize")); + } else { + DHD_INFO(("%s: Initial value for %s is %d\n", + __FUNCTION__, "sd_blocksize", bus->blocksize)); + } + bus->roundup = MIN(max_roundup, bus->blocksize); + + /* Query if bus module supports packet chaining, default to use if supported */ + if (bcmsdh_iovar_op(sdh, "sd_rxchain", NULL, 0, + &bus->sd_rxchain, sizeof(int32), FALSE) != BCME_OK) { + bus->sd_rxchain = FALSE; + } else { + DHD_INFO(("%s: bus module (through bcmsdh API) %s chaining\n", + __FUNCTION__, (bus->sd_rxchain ? "supports" : "does not support"))); + } + bus->use_rxchain = (bool)bus->sd_rxchain; + + return TRUE; +} + +bool +dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh, + char *fw_path, char *nv_path) +{ + bool ret; + bus->fw_path = fw_path; + bus->nv_path = nv_path; + + ret = dhdsdio_download_firmware(bus, osh, bus->sdh); + + return ret; +} + +static bool +dhdsdio_download_firmware(struct dhd_bus *bus, osl_t *osh, void *sdh) +{ + bool ret; + + /* Download the firmware */ + dhd_os_wake_lock(bus->dhd); + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + + ret = _dhdsdio_download_firmware(bus) == 0; + + dhdsdio_clkctl(bus, CLK_SDONLY, FALSE); + dhd_os_wake_unlock(bus->dhd); + return ret; +} + +/* Detach and free everything */ +static void +dhdsdio_release(dhd_bus_t *bus, osl_t *osh) +{ + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (bus) { + ASSERT(osh); + + + /* De-register interrupt handler */ + bcmsdh_intr_disable(bus->sdh); + bcmsdh_intr_dereg(bus->sdh); + + if (bus->dhd) { + + dhdsdio_release_dongle(bus, osh); + + dhd_detach(bus->dhd); + bus->dhd = NULL; + } + + dhdsdio_release_malloc(bus, osh); + + + MFREE(osh, bus, sizeof(dhd_bus_t)); + } + + if (osh) + dhd_osl_detach(osh); + + DHD_TRACE(("%s: Disconnected\n", __FUNCTION__)); +} + +static void +dhdsdio_release_malloc(dhd_bus_t *bus, osl_t *osh) +{ + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (bus->dhd && bus->dhd->dongle_reset) + return; + + if (bus->rxbuf) { +#ifndef DHD_USE_STATIC_BUF + MFREE(osh, bus->rxbuf, bus->rxblen); +#endif + bus->rxctl = bus->rxbuf = NULL; + bus->rxlen = 0; + } + + if (bus->databuf) { +#ifndef DHD_USE_STATIC_BUF + MFREE(osh, bus->databuf, MAX_DATA_BUF); +#endif + bus->databuf = NULL; + } +} + + +static void +dhdsdio_release_dongle(dhd_bus_t *bus, osl_t *osh) +{ + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (bus->dhd && bus->dhd->dongle_reset) + return; + + if (bus->sih) { + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); +#if !defined(BCMLXSDMMC) + si_watchdog(bus->sih, 4); +#endif /* !defined(BCMLXSDMMC) */ + dhdsdio_clkctl(bus, CLK_NONE, FALSE); + si_detach(bus->sih); + if (bus->vars && bus->varsz) + MFREE(osh, bus->vars, bus->varsz); + bus->vars = NULL; + } + + DHD_TRACE(("%s: Disconnected\n", __FUNCTION__)); +} + +static void +dhdsdio_disconnect(void *ptr) +{ + dhd_bus_t *bus = (dhd_bus_t *)ptr; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (bus) { + ASSERT(bus->dhd); + dhdsdio_release(bus, bus->dhd->osh); + } + + DHD_TRACE(("%s: Disconnected\n", __FUNCTION__)); +} + + +/* Register/Unregister functions are called by the main DHD entry + * point (e.g. module insertion) to link with the bus driver, in + * order to look for or await the device. + */ + +static bcmsdh_driver_t dhd_sdio = { + dhdsdio_probe, + dhdsdio_disconnect +}; + +int +dhd_bus_register(void) +{ + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + return bcmsdh_register(&dhd_sdio); +} + +void +dhd_bus_unregister(void) +{ + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + bcmsdh_unregister(); +} + +#ifdef BCMEMBEDIMAGE +static int +dhdsdio_download_code_array(struct dhd_bus *bus) +{ + int bcmerror = -1; + int offset = 0; + + DHD_INFO(("%s: download embedded firmware...\n", __FUNCTION__)); + + /* Download image */ + while ((offset + MEMBLOCK) < sizeof(dlarray)) { + bcmerror = dhdsdio_membytes(bus, TRUE, offset, dlarray + offset, MEMBLOCK); + if (bcmerror) { + DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n", + __FUNCTION__, bcmerror, MEMBLOCK, offset)); + goto err; + } + + offset += MEMBLOCK; + } + + if (offset < sizeof(dlarray)) { + bcmerror = dhdsdio_membytes(bus, TRUE, offset, + dlarray + offset, sizeof(dlarray) - offset); + if (bcmerror) { + DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n", + __FUNCTION__, bcmerror, sizeof(dlarray) - offset, offset)); + goto err; + } + } + +#ifdef DHD_DEBUG + /* Upload and compare the downloaded code */ + { + unsigned char *ularray; + + ularray = MALLOC(bus->dhd->osh, bus->ramsize); + /* Upload image to verify downloaded contents. */ + offset = 0; + memset(ularray, 0xaa, bus->ramsize); + while ((offset + MEMBLOCK) < sizeof(dlarray)) { + bcmerror = dhdsdio_membytes(bus, FALSE, offset, ularray + offset, MEMBLOCK); + if (bcmerror) { + DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n", + __FUNCTION__, bcmerror, MEMBLOCK, offset)); + goto err; + } + + offset += MEMBLOCK; + } + + if (offset < sizeof(dlarray)) { + bcmerror = dhdsdio_membytes(bus, FALSE, offset, + ularray + offset, sizeof(dlarray) - offset); + if (bcmerror) { + DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n", + __FUNCTION__, bcmerror, sizeof(dlarray) - offset, offset)); + goto err; + } + } + + if (memcmp(dlarray, ularray, sizeof(dlarray))) { + DHD_ERROR(("%s: Downloaded image is corrupted.\n", __FUNCTION__)); + ASSERT(0); + goto err; + } else + DHD_ERROR(("%s: Download, Upload and compare succeeded.\n", __FUNCTION__)); + + MFREE(bus->dhd->osh, ularray, bus->ramsize); + } +#endif /* DHD_DEBUG */ + +err: + return bcmerror; +} +#endif /* BCMEMBEDIMAGE */ + +static int +dhdsdio_download_code_file(struct dhd_bus *bus, char *fw_path) +{ + int bcmerror = -1; + int offset = 0; + uint len; + void *image = NULL; + uint8 *memblock = NULL, *memptr; + + DHD_INFO(("%s: download firmware %s\n", __FUNCTION__, fw_path)); + + image = dhd_os_open_image(fw_path); + if (image == NULL) + goto err; + + memptr = memblock = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN); + if (memblock == NULL) { + DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK)); + goto err; + } + if ((uint32)(uintptr)memblock % DHD_SDALIGN) + memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN)); + + /* Download image */ + while ((len = dhd_os_get_image_block((char*)memptr, MEMBLOCK, image))) { + bcmerror = dhdsdio_membytes(bus, TRUE, offset, memptr, len); + if (bcmerror) { + DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n", + __FUNCTION__, bcmerror, MEMBLOCK, offset)); + goto err; + } + + offset += MEMBLOCK; + } + +err: + if (memblock) + MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN); + + if (image) + dhd_os_close_image(image); + + return bcmerror; +} + +/* + * ProcessVars:Takes a buffer of "<var>=<value>\n" lines read from a file and ending in a NUL. + * Removes carriage returns, empty lines, comment lines, and converts newlines to NULs. + * Shortens buffer as needed and pads with NULs. End of buffer is marked by two NULs. +*/ + +static uint +process_nvram_vars(char *varbuf, uint len) +{ + char *dp; + bool findNewline; + int column; + uint buf_len, n; + + dp = varbuf; + + findNewline = FALSE; + column = 0; + + for (n = 0; n < len; n++) { + if (varbuf[n] == 0) + break; + if (varbuf[n] == '\r') + continue; + if (findNewline && varbuf[n] != '\n') + continue; + findNewline = FALSE; + if (varbuf[n] == '#') { + findNewline = TRUE; + continue; + } + if (varbuf[n] == '\n') { + if (column == 0) + continue; + *dp++ = 0; + column = 0; + continue; + } + *dp++ = varbuf[n]; + column++; + } + buf_len = dp - varbuf; + + while (dp < varbuf + n) + *dp++ = 0; + + return buf_len; +} + +/* + EXAMPLE: nvram_array + nvram_arry format: + name=value + Use carriage return at the end of each assignment, and an empty string with + carriage return at the end of array. + + For example: + unsigned char nvram_array[] = {"name1=value1\n", "name2=value2\n", "\n"}; + Hex values start with 0x, and mac addr format: xx:xx:xx:xx:xx:xx. + + Search "EXAMPLE: nvram_array" to see how the array is activated. +*/ + +void +dhd_bus_set_nvram_params(struct dhd_bus * bus, const char *nvram_params) +{ + bus->nvram_params = nvram_params; +} + +static int +dhdsdio_download_nvram(struct dhd_bus *bus) +{ + int bcmerror = -1; + uint len; + void * image = NULL; + char * memblock = NULL; + char *bufp; + char *nv_path; + bool nvram_file_exists; + + nv_path = bus->nv_path; + + nvram_file_exists = ((nv_path != NULL) && (nv_path[0] != '\0')); + if (!nvram_file_exists && (bus->nvram_params == NULL)) + return (0); + + if (nvram_file_exists) { + image = dhd_os_open_image(nv_path); + if (image == NULL) + goto err; + } + + memblock = MALLOC(bus->dhd->osh, MEMBLOCK); + if (memblock == NULL) { + DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", + __FUNCTION__, MEMBLOCK)); + goto err; + } + + /* Download variables */ + if (nvram_file_exists) { + len = dhd_os_get_image_block(memblock, MEMBLOCK, image); + } + else { + len = strlen(bus->nvram_params); + ASSERT(len <= MEMBLOCK); + if (len > MEMBLOCK) + len = MEMBLOCK; + memcpy(memblock, bus->nvram_params, len); + } + + if (len > 0 && len < MEMBLOCK) { + bufp = (char *)memblock; + bufp[len] = 0; + len = process_nvram_vars(bufp, len); + bufp += len; + *bufp++ = 0; + if (len) + bcmerror = dhdsdio_downloadvars(bus, memblock, len + 1); + if (bcmerror) { + DHD_ERROR(("%s: error downloading vars: %d\n", + __FUNCTION__, bcmerror)); + } + } + else { + DHD_ERROR(("%s: error reading nvram file: %d\n", + __FUNCTION__, len)); + bcmerror = BCME_SDIO_ERROR; + } + +err: + if (memblock) + MFREE(bus->dhd->osh, memblock, MEMBLOCK); + + if (image) + dhd_os_close_image(image); + + return bcmerror; +} + +static int +_dhdsdio_download_firmware(struct dhd_bus *bus) +{ + int bcmerror = -1; + + bool embed = FALSE; /* download embedded firmware */ + bool dlok = FALSE; /* download firmware succeeded */ + + /* Out immediately if no image to download */ + if ((bus->fw_path == NULL) || (bus->fw_path[0] == '\0')) { +#ifdef BCMEMBEDIMAGE + embed = TRUE; +#else + return bcmerror; +#endif + } + + /* Keep arm in reset */ + if (dhdsdio_download_state(bus, TRUE)) { + DHD_ERROR(("%s: error placing ARM core in reset\n", __FUNCTION__)); + goto err; + } + + /* External image takes precedence if specified */ + if ((bus->fw_path != NULL) && (bus->fw_path[0] != '\0')) { + if (dhdsdio_download_code_file(bus, bus->fw_path)) { + DHD_ERROR(("%s: dongle image file download failed\n", __FUNCTION__)); +#ifdef BCMEMBEDIMAGE + embed = TRUE; +#else + goto err; +#endif + } + else { + embed = FALSE; + dlok = TRUE; + } + } +#ifdef BCMEMBEDIMAGE + if (embed) { + if (dhdsdio_download_code_array(bus)) { + DHD_ERROR(("%s: dongle image array download failed\n", __FUNCTION__)); + goto err; + } + else { + dlok = TRUE; + } + } +#endif + if (!dlok) { + DHD_ERROR(("%s: dongle image download failed\n", __FUNCTION__)); + goto err; + } + + /* EXAMPLE: nvram_array */ + /* If a valid nvram_arry is specified as above, it can be passed down to dongle */ + /* dhd_bus_set_nvram_params(bus, (char *)&nvram_array); */ + + /* External nvram takes precedence if specified */ + if (dhdsdio_download_nvram(bus)) { + DHD_ERROR(("%s: dongle nvram file download failed\n", __FUNCTION__)); + } + + /* Take arm out of reset */ + if (dhdsdio_download_state(bus, FALSE)) { + DHD_ERROR(("%s: error getting out of ARM core reset\n", __FUNCTION__)); + goto err; + } + + bcmerror = 0; + +err: + return bcmerror; +} + +static int +dhd_bcmsdh_recv_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags, uint8 *buf, uint nbytes, + void *pkt, bcmsdh_cmplt_fn_t complete, void *handle) +{ + int status; + + /* 4329: GSPI check */ + status = bcmsdh_recv_buf(bus->sdh, addr, fn, flags, buf, nbytes, pkt, complete, handle); + return status; +} + +static int +dhd_bcmsdh_send_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags, uint8 *buf, uint nbytes, + void *pkt, bcmsdh_cmplt_fn_t complete, void *handle) +{ + return (bcmsdh_send_buf(bus->sdh, addr, fn, flags, buf, nbytes, pkt, complete, handle)); +} + +uint +dhd_bus_chip(struct dhd_bus *bus) +{ + ASSERT(bus->sih != NULL); + return bus->sih->chip; +} + +void * +dhd_bus_pub(struct dhd_bus *bus) +{ + return bus->dhd; +} + +void * +dhd_bus_txq(struct dhd_bus *bus) +{ + return &bus->txq; +} + +uint +dhd_bus_hdrlen(struct dhd_bus *bus) +{ + return SDPCM_HDRLEN; +} + +int +dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag) +{ + int bcmerror = 0; + dhd_bus_t *bus; + + bus = dhdp->bus; + + if (flag == TRUE) { + if (!bus->dhd->dongle_reset) { +#if !defined(IGNORE_ETH0_DOWN) + /* Force flow control as protection when stop come before ifconfig_down */ + dhd_txflowcontrol(bus->dhd, 0, ON); +#endif /* !defined(IGNORE_ETH0_DOWN) */ + /* save country settinng if was pre-setup with priv ioctl */ + dhd_os_proto_block(dhdp); + dhdcdc_query_ioctl(bus->dhd, 0, WLC_GET_COUNTRY, + bus->dhd->country_code, sizeof(bus->dhd->country_code)); + dhd_os_proto_unblock(dhdp); + /* Expect app to have torn down any connection before calling */ + /* Stop the bus, disable F2 */ + dhd_os_sdlock(dhdp); + + dhd_bus_stop(bus, FALSE); + + /* Clean tx/rx buffer pointers, detach from the dongle */ + dhdsdio_release_dongle(bus, bus->dhd->osh); + + bus->dhd->dongle_reset = TRUE; + bus->dhd->up = FALSE; + + dhd_os_sdunlock(dhdp); + + DHD_TRACE(("%s: WLAN OFF DONE\n", __FUNCTION__)); + /* App can now remove power from device */ + } else + bcmerror = BCME_SDIO_ERROR; + } else { + /* App must have restored power to device before calling */ + + DHD_TRACE(("\n\n%s: == WLAN ON ==\n", __FUNCTION__)); + + if (bus->dhd->dongle_reset) { + /* Turn on WLAN */ + dhd_os_sdlock(dhdp); + + /* Reset SD client */ + bcmsdh_reset(bus->sdh); + + /* Attempt to re-attach & download */ + if (dhdsdio_probe_attach(bus, bus->dhd->osh, bus->sdh, + (uint32 *)SI_ENUM_BASE, + bus->cl_devid)) { + /* Attempt to download binary to the dongle */ + if (dhdsdio_probe_init(bus, bus->dhd->osh, bus->sdh) && + dhdsdio_download_firmware(bus, bus->dhd->osh, bus->sdh)) { + + /* Re-init bus, enable F2 transfer */ + dhd_bus_init((dhd_pub_t *) bus->dhd, FALSE); + +#if defined(OOB_INTR_ONLY) + dhd_enable_oob_intr(bus, TRUE); +#endif /* defined(OOB_INTR_ONLY) */ + + bus->dhd->dongle_reset = FALSE; + bus->dhd->up = TRUE; + +#if !defined(IGNORE_ETH0_DOWN) + /* Restore flow control */ + dhd_txflowcontrol(bus->dhd, 0, OFF); +#endif + + DHD_TRACE(("%s: WLAN ON DONE\n", __FUNCTION__)); + } else + bcmerror = BCME_SDIO_ERROR; + } else + bcmerror = BCME_SDIO_ERROR; + dhd_os_sdunlock(dhdp); + } else { + bcmerror = BCME_NOTDOWN; + DHD_ERROR(("%s: Set DEVRESET=FALSE invoked when device is on\n", + __FUNCTION__)); + bcmerror = BCME_SDIO_ERROR; + } + } + return bcmerror; +} diff --git a/drivers/net/wireless/bcm4329/dngl_stats.h b/drivers/net/wireless/bcm4329/dngl_stats.h new file mode 100644 index 000000000000..e5db54e7edfe --- /dev/null +++ b/drivers/net/wireless/bcm4329/dngl_stats.h @@ -0,0 +1,43 @@ +/* + * Common stats definitions for clients of dongle + * ports + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: dngl_stats.h,v 1.2.140.3 2008/05/26 16:52:08 Exp $ + */ + +#ifndef _dngl_stats_h_ +#define _dngl_stats_h_ + +typedef struct { + unsigned long rx_packets; /* total packets received */ + unsigned long tx_packets; /* total packets transmitted */ + unsigned long rx_bytes; /* total bytes received */ + unsigned long tx_bytes; /* total bytes transmitted */ + unsigned long rx_errors; /* bad packets received */ + unsigned long tx_errors; /* packet transmit problems */ + unsigned long rx_dropped; /* packets dropped by dongle */ + unsigned long tx_dropped; /* packets dropped by dongle */ + unsigned long multicast; /* multicast packets received */ +} dngl_stats_t; + +#endif /* _dngl_stats_h_ */ diff --git a/drivers/net/wireless/bcm4329/hndpmu.c b/drivers/net/wireless/bcm4329/hndpmu.c new file mode 100644 index 000000000000..307347a43bde --- /dev/null +++ b/drivers/net/wireless/bcm4329/hndpmu.c @@ -0,0 +1,131 @@ +/* + * Misc utility routines for accessing PMU corerev specific features + * of the SiliconBackplane-based Broadcom chips. + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: hndpmu.c,v 1.95.2.17.4.11.2.63 2010/07/21 13:55:09 Exp $ + */ + +#include <typedefs.h> +#include <bcmdefs.h> +#include <osl.h> +#include <bcmutils.h> +#include <siutils.h> +#include <bcmdevs.h> +#include <hndsoc.h> +#include <sbchipc.h> +#include <hndpmu.h> + +/* debug/trace */ +#define PMU_ERROR(args) + +#define PMU_MSG(args) + + +/* SDIO Pad drive strength to select value mappings */ +typedef struct { + uint8 strength; /* Pad Drive Strength in mA */ + uint8 sel; /* Chip-specific select value */ +} sdiod_drive_str_t; + +/* SDIO Drive Strength to sel value table for PMU Rev 1 */ +static const sdiod_drive_str_t sdiod_drive_strength_tab1[] = { + {4, 0x2}, + {2, 0x3}, + {1, 0x0}, + {0, 0x0} }; + +/* SDIO Drive Strength to sel value table for PMU Rev 2, 3 */ +static const sdiod_drive_str_t sdiod_drive_strength_tab2[] = { + {12, 0x7}, + {10, 0x6}, + {8, 0x5}, + {6, 0x4}, + {4, 0x2}, + {2, 0x1}, + {0, 0x0} }; + +#define SDIOD_DRVSTR_KEY(chip, pmu) (((chip) << 16) | (pmu)) + +void +si_sdiod_drive_strength_init(si_t *sih, osl_t *osh, uint32 drivestrength) +{ + chipcregs_t *cc; + uint origidx, intr_val = 0; + sdiod_drive_str_t *str_tab = NULL; + uint32 str_mask = 0; + uint32 str_shift = 0; + + if (!(sih->cccaps & CC_CAP_PMU)) { + return; + } + + /* Remember original core before switch to chipc */ + cc = (chipcregs_t *) si_switch_core(sih, CC_CORE_ID, &origidx, &intr_val); + + switch (SDIOD_DRVSTR_KEY(sih->chip, sih->pmurev)) { + case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 1): + str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab1; + str_mask = 0x30000000; + str_shift = 28; + break; + case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 2): + case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 3): + case SDIOD_DRVSTR_KEY(BCM4315_CHIP_ID, 4): + str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab2; + str_mask = 0x00003800; + str_shift = 11; + break; + + default: + PMU_MSG(("No SDIO Drive strength init done for chip %x rev %d pmurev %d\n", + sih->chip, sih->chiprev, sih->pmurev)); + + break; + } + + if (str_tab != NULL) { + uint32 drivestrength_sel = 0; + uint32 cc_data_temp; + int i; + + for (i = 0; str_tab[i].strength != 0; i ++) { + if (drivestrength >= str_tab[i].strength) { + drivestrength_sel = str_tab[i].sel; + break; + } + } + + W_REG(osh, &cc->chipcontrol_addr, 1); + cc_data_temp = R_REG(osh, &cc->chipcontrol_data); + cc_data_temp &= ~str_mask; + drivestrength_sel <<= str_shift; + cc_data_temp |= drivestrength_sel; + W_REG(osh, &cc->chipcontrol_data, cc_data_temp); + + PMU_MSG(("SDIO: %dmA drive strength selected, set to 0x%08x\n", + drivestrength, cc_data_temp)); + } + + /* Return to original core */ + si_restore_core(sih, origidx, intr_val); +} diff --git a/drivers/net/wireless/bcm4329/include/Makefile b/drivers/net/wireless/bcm4329/include/Makefile new file mode 100644 index 000000000000..439ead14a0e6 --- /dev/null +++ b/drivers/net/wireless/bcm4329/include/Makefile @@ -0,0 +1,21 @@ +# +# include/Makefile +# +# Copyright 2005, Broadcom, Inc. +# +# $Id: Makefile,v 13.5 2005/02/17 19:11:31 Exp $ +# + +SRCBASE = .. + +TARGETS = epivers.h + + +all release: + bash epivers.sh + +clean: + rm -rf ${TARGETS} *.prev + + +.PHONY: all release clean diff --git a/drivers/net/wireless/bcm4329/include/aidmp.h b/drivers/net/wireless/bcm4329/include/aidmp.h new file mode 100644 index 000000000000..a927e5dae586 --- /dev/null +++ b/drivers/net/wireless/bcm4329/include/aidmp.h @@ -0,0 +1,368 @@ +/* + * Broadcom AMBA Interconnect definitions. + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: aidmp.h,v 13.2.10.1 2008/05/07 20:32:12 Exp $ + */ + + +#ifndef _AIDMP_H +#define _AIDMP_H + + +#define MFGID_ARM 0x43b +#define MFGID_BRCM 0x4bf +#define MFGID_MIPS 0x4a7 + + +#define CC_SIM 0 +#define CC_EROM 1 +#define CC_CORESIGHT 9 +#define CC_VERIF 0xb +#define CC_OPTIMO 0xd +#define CC_GEN 0xe +#define CC_PRIMECELL 0xf + + +#define ER_EROMENTRY 0x000 +#define ER_REMAPCONTROL 0xe00 +#define ER_REMAPSELECT 0xe04 +#define ER_MASTERSELECT 0xe10 +#define ER_ITCR 0xf00 +#define ER_ITIP 0xf04 + + +#define ER_TAG 0xe +#define ER_TAG1 0x6 +#define ER_VALID 1 +#define ER_CI 0 +#define ER_MP 2 +#define ER_ADD 4 +#define ER_END 0xe +#define ER_BAD 0xffffffff + + +#define CIA_MFG_MASK 0xfff00000 +#define CIA_MFG_SHIFT 20 +#define CIA_CID_MASK 0x000fff00 +#define CIA_CID_SHIFT 8 +#define CIA_CCL_MASK 0x000000f0 +#define CIA_CCL_SHIFT 4 + + +#define CIB_REV_MASK 0xff000000 +#define CIB_REV_SHIFT 24 +#define CIB_NSW_MASK 0x00f80000 +#define CIB_NSW_SHIFT 19 +#define CIB_NMW_MASK 0x0007c000 +#define CIB_NMW_SHIFT 14 +#define CIB_NSP_MASK 0x00003e00 +#define CIB_NSP_SHIFT 9 +#define CIB_NMP_MASK 0x000001f0 +#define CIB_NMP_SHIFT 4 + + +#define MPD_MUI_MASK 0x0000ff00 +#define MPD_MUI_SHIFT 8 +#define MPD_MP_MASK 0x000000f0 +#define MPD_MP_SHIFT 4 + + +#define AD_ADDR_MASK 0xfffff000 +#define AD_SP_MASK 0x00000f00 +#define AD_SP_SHIFT 8 +#define AD_ST_MASK 0x000000c0 +#define AD_ST_SHIFT 6 +#define AD_ST_SLAVE 0x00000000 +#define AD_ST_BRIDGE 0x00000040 +#define AD_ST_SWRAP 0x00000080 +#define AD_ST_MWRAP 0x000000c0 +#define AD_SZ_MASK 0x00000030 +#define AD_SZ_SHIFT 4 +#define AD_SZ_4K 0x00000000 +#define AD_SZ_8K 0x00000010 +#define AD_SZ_16K 0x00000020 +#define AD_SZ_SZD 0x00000030 +#define AD_AG32 0x00000008 +#define AD_ADDR_ALIGN 0x00000fff +#define AD_SZ_BASE 0x00001000 + + +#define SD_SZ_MASK 0xfffff000 +#define SD_SG32 0x00000008 +#define SD_SZ_ALIGN 0x00000fff + + +#ifndef _LANGUAGE_ASSEMBLY + +typedef volatile struct _aidmp { + uint32 oobselina30; + uint32 oobselina74; + uint32 PAD[6]; + uint32 oobselinb30; + uint32 oobselinb74; + uint32 PAD[6]; + uint32 oobselinc30; + uint32 oobselinc74; + uint32 PAD[6]; + uint32 oobselind30; + uint32 oobselind74; + uint32 PAD[38]; + uint32 oobselouta30; + uint32 oobselouta74; + uint32 PAD[6]; + uint32 oobseloutb30; + uint32 oobseloutb74; + uint32 PAD[6]; + uint32 oobseloutc30; + uint32 oobseloutc74; + uint32 PAD[6]; + uint32 oobseloutd30; + uint32 oobseloutd74; + uint32 PAD[38]; + uint32 oobsynca; + uint32 oobseloutaen; + uint32 PAD[6]; + uint32 oobsyncb; + uint32 oobseloutben; + uint32 PAD[6]; + uint32 oobsyncc; + uint32 oobseloutcen; + uint32 PAD[6]; + uint32 oobsyncd; + uint32 oobseloutden; + uint32 PAD[38]; + uint32 oobaextwidth; + uint32 oobainwidth; + uint32 oobaoutwidth; + uint32 PAD[5]; + uint32 oobbextwidth; + uint32 oobbinwidth; + uint32 oobboutwidth; + uint32 PAD[5]; + uint32 oobcextwidth; + uint32 oobcinwidth; + uint32 oobcoutwidth; + uint32 PAD[5]; + uint32 oobdextwidth; + uint32 oobdinwidth; + uint32 oobdoutwidth; + uint32 PAD[37]; + uint32 ioctrlset; + uint32 ioctrlclear; + uint32 ioctrl; + uint32 PAD[61]; + uint32 iostatus; + uint32 PAD[127]; + uint32 ioctrlwidth; + uint32 iostatuswidth; + uint32 PAD[62]; + uint32 resetctrl; + uint32 resetstatus; + uint32 resetreadid; + uint32 resetwriteid; + uint32 PAD[60]; + uint32 errlogctrl; + uint32 errlogdone; + uint32 errlogstatus; + uint32 errlogaddrlo; + uint32 errlogaddrhi; + uint32 errlogid; + uint32 errloguser; + uint32 errlogflags; + uint32 PAD[56]; + uint32 intstatus; + uint32 PAD[127]; + uint32 config; + uint32 PAD[63]; + uint32 itcr; + uint32 PAD[3]; + uint32 itipooba; + uint32 itipoobb; + uint32 itipoobc; + uint32 itipoobd; + uint32 PAD[4]; + uint32 itipoobaout; + uint32 itipoobbout; + uint32 itipoobcout; + uint32 itipoobdout; + uint32 PAD[4]; + uint32 itopooba; + uint32 itopoobb; + uint32 itopoobc; + uint32 itopoobd; + uint32 PAD[4]; + uint32 itopoobain; + uint32 itopoobbin; + uint32 itopoobcin; + uint32 itopoobdin; + uint32 PAD[4]; + uint32 itopreset; + uint32 PAD[15]; + uint32 peripherialid4; + uint32 peripherialid5; + uint32 peripherialid6; + uint32 peripherialid7; + uint32 peripherialid0; + uint32 peripherialid1; + uint32 peripherialid2; + uint32 peripherialid3; + uint32 componentid0; + uint32 componentid1; + uint32 componentid2; + uint32 componentid3; +} aidmp_t; + +#endif + + +#define OOB_BUSCONFIG 0x020 +#define OOB_STATUSA 0x100 +#define OOB_STATUSB 0x104 +#define OOB_STATUSC 0x108 +#define OOB_STATUSD 0x10c +#define OOB_ENABLEA0 0x200 +#define OOB_ENABLEA1 0x204 +#define OOB_ENABLEA2 0x208 +#define OOB_ENABLEA3 0x20c +#define OOB_ENABLEB0 0x280 +#define OOB_ENABLEB1 0x284 +#define OOB_ENABLEB2 0x288 +#define OOB_ENABLEB3 0x28c +#define OOB_ENABLEC0 0x300 +#define OOB_ENABLEC1 0x304 +#define OOB_ENABLEC2 0x308 +#define OOB_ENABLEC3 0x30c +#define OOB_ENABLED0 0x380 +#define OOB_ENABLED1 0x384 +#define OOB_ENABLED2 0x388 +#define OOB_ENABLED3 0x38c +#define OOB_ITCR 0xf00 +#define OOB_ITIPOOBA 0xf10 +#define OOB_ITIPOOBB 0xf14 +#define OOB_ITIPOOBC 0xf18 +#define OOB_ITIPOOBD 0xf1c +#define OOB_ITOPOOBA 0xf30 +#define OOB_ITOPOOBB 0xf34 +#define OOB_ITOPOOBC 0xf38 +#define OOB_ITOPOOBD 0xf3c + + +#define AI_OOBSELINA30 0x000 +#define AI_OOBSELINA74 0x004 +#define AI_OOBSELINB30 0x020 +#define AI_OOBSELINB74 0x024 +#define AI_OOBSELINC30 0x040 +#define AI_OOBSELINC74 0x044 +#define AI_OOBSELIND30 0x060 +#define AI_OOBSELIND74 0x064 +#define AI_OOBSELOUTA30 0x100 +#define AI_OOBSELOUTA74 0x104 +#define AI_OOBSELOUTB30 0x120 +#define AI_OOBSELOUTB74 0x124 +#define AI_OOBSELOUTC30 0x140 +#define AI_OOBSELOUTC74 0x144 +#define AI_OOBSELOUTD30 0x160 +#define AI_OOBSELOUTD74 0x164 +#define AI_OOBSYNCA 0x200 +#define AI_OOBSELOUTAEN 0x204 +#define AI_OOBSYNCB 0x220 +#define AI_OOBSELOUTBEN 0x224 +#define AI_OOBSYNCC 0x240 +#define AI_OOBSELOUTCEN 0x244 +#define AI_OOBSYNCD 0x260 +#define AI_OOBSELOUTDEN 0x264 +#define AI_OOBAEXTWIDTH 0x300 +#define AI_OOBAINWIDTH 0x304 +#define AI_OOBAOUTWIDTH 0x308 +#define AI_OOBBEXTWIDTH 0x320 +#define AI_OOBBINWIDTH 0x324 +#define AI_OOBBOUTWIDTH 0x328 +#define AI_OOBCEXTWIDTH 0x340 +#define AI_OOBCINWIDTH 0x344 +#define AI_OOBCOUTWIDTH 0x348 +#define AI_OOBDEXTWIDTH 0x360 +#define AI_OOBDINWIDTH 0x364 +#define AI_OOBDOUTWIDTH 0x368 +#define AI_IOCTRLSET 0x400 +#define AI_IOCTRLCLEAR 0x404 +#define AI_IOCTRL 0x408 +#define AI_IOSTATUS 0x500 +#define AI_IOCTRLWIDTH 0x700 +#define AI_IOSTATUSWIDTH 0x704 +#define AI_RESETCTRL 0x800 +#define AI_RESETSTATUS 0x804 +#define AI_RESETREADID 0x808 +#define AI_RESETWRITEID 0x80c +#define AI_ERRLOGCTRL 0xa00 +#define AI_ERRLOGDONE 0xa04 +#define AI_ERRLOGSTATUS 0xa08 +#define AI_ERRLOGADDRLO 0xa0c +#define AI_ERRLOGADDRHI 0xa10 +#define AI_ERRLOGID 0xa14 +#define AI_ERRLOGUSER 0xa18 +#define AI_ERRLOGFLAGS 0xa1c +#define AI_INTSTATUS 0xa00 +#define AI_CONFIG 0xe00 +#define AI_ITCR 0xf00 +#define AI_ITIPOOBA 0xf10 +#define AI_ITIPOOBB 0xf14 +#define AI_ITIPOOBC 0xf18 +#define AI_ITIPOOBD 0xf1c +#define AI_ITIPOOBAOUT 0xf30 +#define AI_ITIPOOBBOUT 0xf34 +#define AI_ITIPOOBCOUT 0xf38 +#define AI_ITIPOOBDOUT 0xf3c +#define AI_ITOPOOBA 0xf50 +#define AI_ITOPOOBB 0xf54 +#define AI_ITOPOOBC 0xf58 +#define AI_ITOPOOBD 0xf5c +#define AI_ITOPOOBAIN 0xf70 +#define AI_ITOPOOBBIN 0xf74 +#define AI_ITOPOOBCIN 0xf78 +#define AI_ITOPOOBDIN 0xf7c +#define AI_ITOPRESET 0xf90 +#define AI_PERIPHERIALID4 0xfd0 +#define AI_PERIPHERIALID5 0xfd4 +#define AI_PERIPHERIALID6 0xfd8 +#define AI_PERIPHERIALID7 0xfdc +#define AI_PERIPHERIALID0 0xfe0 +#define AI_PERIPHERIALID1 0xfe4 +#define AI_PERIPHERIALID2 0xfe8 +#define AI_PERIPHERIALID3 0xfec +#define AI_COMPONENTID0 0xff0 +#define AI_COMPONENTID1 0xff4 +#define AI_COMPONENTID2 0xff8 +#define AI_COMPONENTID3 0xffc + + +#define AIRC_RESET 1 + + +#define AICFG_OOB 0x00000020 +#define AICFG_IOS 0x00000010 +#define AICFG_IOC 0x00000008 +#define AICFG_TO 0x00000004 +#define AICFG_ERRL 0x00000002 +#define AICFG_RST 0x00000001 + +#endif diff --git a/drivers/net/wireless/bcm4329/include/bcmcdc.h b/drivers/net/wireless/bcm4329/include/bcmcdc.h new file mode 100644 index 000000000000..c2a860beab24 --- /dev/null +++ b/drivers/net/wireless/bcm4329/include/bcmcdc.h @@ -0,0 +1,100 @@ +/* + * CDC network driver ioctl/indication encoding + * Broadcom 802.11abg Networking Device Driver + * + * Definitions subject to change without notice. + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: bcmcdc.h,v 13.14.16.3.16.4 2009/04/12 16:58:45 Exp $ + */ +#include <proto/ethernet.h> + +typedef struct cdc_ioctl { + uint32 cmd; /* ioctl command value */ + uint32 len; /* lower 16: output buflen; upper 16: input buflen (excludes header) */ + uint32 flags; /* flag defns given below */ + uint32 status; /* status code returned from the device */ +} cdc_ioctl_t; + +/* Max valid buffer size that can be sent to the dongle */ +#define CDC_MAX_MSG_SIZE ETHER_MAX_LEN + +/* len field is divided into input and output buffer lengths */ +#define CDCL_IOC_OUTLEN_MASK 0x0000FFFF /* maximum or expected response length, */ + /* excluding IOCTL header */ +#define CDCL_IOC_OUTLEN_SHIFT 0 +#define CDCL_IOC_INLEN_MASK 0xFFFF0000 /* input buffer length, excluding IOCTL header */ +#define CDCL_IOC_INLEN_SHIFT 16 + +/* CDC flag definitions */ +#define CDCF_IOC_ERROR 0x01 /* 0=success, 1=ioctl cmd failed */ +#define CDCF_IOC_SET 0x02 /* 0=get, 1=set cmd */ +#define CDCF_IOC_IF_MASK 0xF000 /* I/F index */ +#define CDCF_IOC_IF_SHIFT 12 +#define CDCF_IOC_ID_MASK 0xFFFF0000 /* used to uniquely id an ioctl req/resp pairing */ +#define CDCF_IOC_ID_SHIFT 16 /* # of bits of shift for ID Mask */ + +#define CDC_IOC_IF_IDX(flags) (((flags) & CDCF_IOC_IF_MASK) >> CDCF_IOC_IF_SHIFT) +#define CDC_IOC_ID(flags) (((flags) & CDCF_IOC_ID_MASK) >> CDCF_IOC_ID_SHIFT) + +#define CDC_GET_IF_IDX(hdr) \ + ((int)((((hdr)->flags) & CDCF_IOC_IF_MASK) >> CDCF_IOC_IF_SHIFT)) +#define CDC_SET_IF_IDX(hdr, idx) \ + ((hdr)->flags = (((hdr)->flags & ~CDCF_IOC_IF_MASK) | ((idx) << CDCF_IOC_IF_SHIFT))) + +/* + * BDC header + * + * The BDC header is used on data packets to convey priority across USB. + */ + +#define BDC_HEADER_LEN 4 + +#define BDC_PROTO_VER 1 /* Protocol version */ + +#define BDC_FLAG_VER_MASK 0xf0 /* Protocol version mask */ +#define BDC_FLAG_VER_SHIFT 4 /* Protocol version shift */ + +#define BDC_FLAG__UNUSED 0x03 /* Unassigned */ +#define BDC_FLAG_SUM_GOOD 0x04 /* Dongle has verified good RX checksums */ +#define BDC_FLAG_SUM_NEEDED 0x08 /* Dongle needs to do TX checksums */ + +#define BDC_PRIORITY_MASK 0x7 + +#define BDC_FLAG2_FC_FLAG 0x10 /* flag to indicate if pkt contains */ + /* FLOW CONTROL info only */ +#define BDC_PRIORITY_FC_SHIFT 4 /* flow control info shift */ + +#define BDC_FLAG2_IF_MASK 0x0f /* APSTA: interface on which the packet was received */ +#define BDC_FLAG2_IF_SHIFT 0 + +#define BDC_GET_IF_IDX(hdr) \ + ((int)((((hdr)->flags2) & BDC_FLAG2_IF_MASK) >> BDC_FLAG2_IF_SHIFT)) +#define BDC_SET_IF_IDX(hdr, idx) \ + ((hdr)->flags2 = (((hdr)->flags2 & ~BDC_FLAG2_IF_MASK) | ((idx) << BDC_FLAG2_IF_SHIFT))) + +struct bdc_header { + uint8 flags; /* Flags */ + uint8 priority; /* 802.1d Priority 0:2 bits, 4:7 flow control info for usb */ + uint8 flags2; + uint8 rssi; +}; diff --git a/drivers/net/wireless/bcm4329/include/bcmdefs.h b/drivers/net/wireless/bcm4329/include/bcmdefs.h new file mode 100644 index 000000000000..f4e99461971b --- /dev/null +++ b/drivers/net/wireless/bcm4329/include/bcmdefs.h @@ -0,0 +1,114 @@ +/* + * Misc system wide definitions + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * $Id: bcmdefs.h,v 13.38.4.10.2.7.6.11 2010/02/01 05:51:55 Exp $ + */ + + +#ifndef _bcmdefs_h_ +#define _bcmdefs_h_ + +#define STATIC static + +#define SI_BUS 0 +#define PCI_BUS 1 +#define PCMCIA_BUS 2 +#define SDIO_BUS 3 +#define JTAG_BUS 4 +#define USB_BUS 5 +#define SPI_BUS 6 + + +#ifdef BCMBUSTYPE +#define BUSTYPE(bus) (BCMBUSTYPE) +#else +#define BUSTYPE(bus) (bus) +#endif + + +#ifdef BCMCHIPTYPE +#define CHIPTYPE(bus) (BCMCHIPTYPE) +#else +#define CHIPTYPE(bus) (bus) +#endif + + + +#if defined(BCMSPROMBUS) +#define SPROMBUS (BCMSPROMBUS) +#elif defined(SI_PCMCIA_SROM) +#define SPROMBUS (PCMCIA_BUS) +#else +#define SPROMBUS (PCI_BUS) +#endif + + +#ifdef BCMCHIPID +#define CHIPID(chip) (BCMCHIPID) +#else +#define CHIPID(chip) (chip) +#endif + + +#define DMADDR_MASK_32 0x0 +#define DMADDR_MASK_30 0xc0000000 +#define DMADDR_MASK_0 0xffffffff + +#define DMADDRWIDTH_30 30 +#define DMADDRWIDTH_32 32 +#define DMADDRWIDTH_63 63 +#define DMADDRWIDTH_64 64 + + +#define BCMEXTRAHDROOM 164 + + +#define BCMDONGLEHDRSZ 12 +#define BCMDONGLEPADSZ 16 + +#define BCMDONGLEOVERHEAD (BCMDONGLEHDRSZ + BCMDONGLEPADSZ) + + + +#define BITFIELD_MASK(width) \ + (((unsigned)1 << (width)) - 1) +#define GFIELD(val, field) \ + (((val) >> field ## _S) & field ## _M) +#define SFIELD(val, field, bits) \ + (((val) & (~(field ## _M << field ## _S))) | \ + ((unsigned)(bits) << field ## _S)) + + +#ifdef BCMSMALL +#undef BCMSPACE +#define bcmspace FALSE +#else +#define BCMSPACE +#define bcmspace TRUE +#endif + + +#define MAXSZ_NVRAM_VARS 4096 + +#define LOCATOR_EXTERN static + +#endif diff --git a/drivers/net/wireless/bcm4329/include/bcmdevs.h b/drivers/net/wireless/bcm4329/include/bcmdevs.h new file mode 100644 index 000000000000..14853f17795c --- /dev/null +++ b/drivers/net/wireless/bcm4329/include/bcmdevs.h @@ -0,0 +1,124 @@ +/* + * Broadcom device-specific manifest constants. + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: bcmdevs.h,v 13.172.4.5.4.10.2.36 2010/05/25 08:33:44 Exp $ + */ + + +#ifndef _BCMDEVS_H +#define _BCMDEVS_H + + +#define VENDOR_EPIGRAM 0xfeda +#define VENDOR_BROADCOM 0x14e4 +#define VENDOR_SI_IMAGE 0x1095 +#define VENDOR_TI 0x104c +#define VENDOR_RICOH 0x1180 +#define VENDOR_JMICRON 0x197b + + +#define VENDOR_BROADCOM_PCMCIA 0x02d0 + + +#define VENDOR_BROADCOM_SDIO 0x00BF + + +#define BCM_DNGL_VID 0xa5c +#define BCM_DNGL_BL_PID_4320 0xbd11 +#define BCM_DNGL_BL_PID_4328 0xbd12 +#define BCM_DNGL_BL_PID_4322 0xbd13 +#define BCM_DNGL_BL_PID_4325 0xbd14 +#define BCM_DNGL_BL_PID_4315 0xbd15 +#define BCM_DNGL_BL_PID_4319 0xbd16 +#define BCM_DNGL_BDC_PID 0xbdc + +#define BCM4325_D11DUAL_ID 0x431b +#define BCM4325_D11G_ID 0x431c +#define BCM4325_D11A_ID 0x431d +#define BCM4329_D11NDUAL_ID 0x432e +#define BCM4329_D11N2G_ID 0x432f +#define BCM4329_D11N5G_ID 0x4330 +#define BCM4336_D11N_ID 0x4343 +#define BCM4315_D11DUAL_ID 0x4334 +#define BCM4315_D11G_ID 0x4335 +#define BCM4315_D11A_ID 0x4336 +#define BCM4319_D11N_ID 0x4337 +#define BCM4319_D11N2G_ID 0x4338 +#define BCM4319_D11N5G_ID 0x4339 + + +#define SDIOH_FPGA_ID 0x43f2 +#define SPIH_FPGA_ID 0x43f5 +#define BCM4710_DEVICE_ID 0x4710 +#define BCM27XX_SDIOH_ID 0x2702 +#define PCIXX21_FLASHMEDIA0_ID 0x8033 +#define PCIXX21_SDIOH0_ID 0x8034 +#define PCIXX21_FLASHMEDIA_ID 0x803b +#define PCIXX21_SDIOH_ID 0x803c +#define R5C822_SDIOH_ID 0x0822 +#define JMICRON_SDIOH_ID 0x2381 + + +#define BCM4306_CHIP_ID 0x4306 +#define BCM4311_CHIP_ID 0x4311 +#define BCM4312_CHIP_ID 0x4312 +#define BCM4315_CHIP_ID 0x4315 +#define BCM4318_CHIP_ID 0x4318 +#define BCM4319_CHIP_ID 0x4319 +#define BCM4320_CHIP_ID 0x4320 +#define BCM4321_CHIP_ID 0x4321 +#define BCM4322_CHIP_ID 0x4322 +#define BCM4325_CHIP_ID 0x4325 +#define BCM4328_CHIP_ID 0x4328 +#define BCM4329_CHIP_ID 0x4329 +#define BCM4336_CHIP_ID 0x4336 +#define BCM4402_CHIP_ID 0x4402 +#define BCM4704_CHIP_ID 0x4704 +#define BCM4710_CHIP_ID 0x4710 +#define BCM4712_CHIP_ID 0x4712 +#define BCM4785_CHIP_ID 0x4785 +#define BCM5350_CHIP_ID 0x5350 +#define BCM5352_CHIP_ID 0x5352 +#define BCM5354_CHIP_ID 0x5354 +#define BCM5365_CHIP_ID 0x5365 + + + +#define BCM4303_PKG_ID 2 +#define BCM4309_PKG_ID 1 +#define BCM4712LARGE_PKG_ID 0 +#define BCM4712SMALL_PKG_ID 1 +#define BCM4712MID_PKG_ID 2 +#define BCM4328USBD11G_PKG_ID 2 +#define BCM4328USBDUAL_PKG_ID 3 +#define BCM4328SDIOD11G_PKG_ID 4 +#define BCM4328SDIODUAL_PKG_ID 5 +#define BCM4329_289PIN_PKG_ID 0 +#define BCM4329_182PIN_PKG_ID 1 +#define BCM5354E_PKG_ID 1 +#define HDLSIM5350_PKG_ID 1 +#define HDLSIM_PKG_ID 14 +#define HWSIM_PKG_ID 15 + + +#endif diff --git a/drivers/net/wireless/bcm4329/include/bcmendian.h b/drivers/net/wireless/bcm4329/include/bcmendian.h new file mode 100644 index 000000000000..ae468383aa74 --- /dev/null +++ b/drivers/net/wireless/bcm4329/include/bcmendian.h @@ -0,0 +1,205 @@ +/* + * Byte order utilities + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: bcmendian.h,v 1.31.302.1.16.1 2009/02/03 18:34:31 Exp $ + * + * This file by default provides proper behavior on little-endian architectures. + * On big-endian architectures, IL_BIGENDIAN should be defined. + */ + + +#ifndef _BCMENDIAN_H_ +#define _BCMENDIAN_H_ + +#include <typedefs.h> + + +#define BCMSWAP16(val) \ + ((uint16)((((uint16)(val) & (uint16)0x00ffU) << 8) | \ + (((uint16)(val) & (uint16)0xff00U) >> 8))) + + +#define BCMSWAP32(val) \ + ((uint32)((((uint32)(val) & (uint32)0x000000ffU) << 24) | \ + (((uint32)(val) & (uint32)0x0000ff00U) << 8) | \ + (((uint32)(val) & (uint32)0x00ff0000U) >> 8) | \ + (((uint32)(val) & (uint32)0xff000000U) >> 24))) + + +#define BCMSWAP32BY16(val) \ + ((uint32)((((uint32)(val) & (uint32)0x0000ffffU) << 16) | \ + (((uint32)(val) & (uint32)0xffff0000U) >> 16))) + + +static INLINE uint16 +bcmswap16(uint16 val) +{ + return BCMSWAP16(val); +} + +static INLINE uint32 +bcmswap32(uint32 val) +{ + return BCMSWAP32(val); +} + +static INLINE uint32 +bcmswap32by16(uint32 val) +{ + return BCMSWAP32BY16(val); +} + + + + +static INLINE void +bcmswap16_buf(uint16 *buf, uint len) +{ + len = len / 2; + + while (len--) { + *buf = bcmswap16(*buf); + buf++; + } +} + +#ifndef hton16 +#ifndef IL_BIGENDIAN +#define HTON16(i) BCMSWAP16(i) +#define HTON32(i) BCMSWAP32(i) +#define hton16(i) bcmswap16(i) +#define hton32(i) bcmswap32(i) +#define ntoh16(i) bcmswap16(i) +#define ntoh32(i) bcmswap32(i) +#define HTOL16(i) (i) +#define HTOL32(i) (i) +#define ltoh16(i) (i) +#define ltoh32(i) (i) +#define htol16(i) (i) +#define htol32(i) (i) +#else +#define HTON16(i) (i) +#define HTON32(i) (i) +#define hton16(i) (i) +#define hton32(i) (i) +#define ntoh16(i) (i) +#define ntoh32(i) (i) +#define HTOL16(i) BCMSWAP16(i) +#define HTOL32(i) BCMSWAP32(i) +#define ltoh16(i) bcmswap16(i) +#define ltoh32(i) bcmswap32(i) +#define htol16(i) bcmswap16(i) +#define htol32(i) bcmswap32(i) +#endif +#endif + +#ifndef IL_BIGENDIAN +#define ltoh16_buf(buf, i) +#define htol16_buf(buf, i) +#else +#define ltoh16_buf(buf, i) bcmswap16_buf((uint16 *)buf, i) +#define htol16_buf(buf, i) bcmswap16_buf((uint16 *)buf, i) +#endif + + +static INLINE void +htol16_ua_store(uint16 val, uint8 *bytes) +{ + bytes[0] = val & 0xff; + bytes[1] = val >> 8; +} + + +static INLINE void +htol32_ua_store(uint32 val, uint8 *bytes) +{ + bytes[0] = val & 0xff; + bytes[1] = (val >> 8) & 0xff; + bytes[2] = (val >> 16) & 0xff; + bytes[3] = val >> 24; +} + + +static INLINE void +hton16_ua_store(uint16 val, uint8 *bytes) +{ + bytes[0] = val >> 8; + bytes[1] = val & 0xff; +} + + +static INLINE void +hton32_ua_store(uint32 val, uint8 *bytes) +{ + bytes[0] = val >> 24; + bytes[1] = (val >> 16) & 0xff; + bytes[2] = (val >> 8) & 0xff; + bytes[3] = val & 0xff; +} + +#define _LTOH16_UA(cp) ((cp)[0] | ((cp)[1] << 8)) +#define _LTOH32_UA(cp) ((cp)[0] | ((cp)[1] << 8) | ((cp)[2] << 16) | ((cp)[3] << 24)) +#define _NTOH16_UA(cp) (((cp)[0] << 8) | (cp)[1]) +#define _NTOH32_UA(cp) (((cp)[0] << 24) | ((cp)[1] << 16) | ((cp)[2] << 8) | (cp)[3]) + + +static INLINE uint16 +ltoh16_ua(const void *bytes) +{ + return _LTOH16_UA((const uint8 *)bytes); +} + + +static INLINE uint32 +ltoh32_ua(const void *bytes) +{ + return _LTOH32_UA((const uint8 *)bytes); +} + + +static INLINE uint16 +ntoh16_ua(const void *bytes) +{ + return _NTOH16_UA((const uint8 *)bytes); +} + + +static INLINE uint32 +ntoh32_ua(const void *bytes) +{ + return _NTOH32_UA((const uint8 *)bytes); +} + +#define ltoh_ua(ptr) \ + (sizeof(*(ptr)) == sizeof(uint8) ? *(const uint8 *)ptr : \ + sizeof(*(ptr)) == sizeof(uint16) ? _LTOH16_UA((const uint8 *)ptr) : \ + sizeof(*(ptr)) == sizeof(uint32) ? _LTOH32_UA((const uint8 *)ptr) : \ + 0xfeedf00d) + +#define ntoh_ua(ptr) \ + (sizeof(*(ptr)) == sizeof(uint8) ? *(const uint8 *)ptr : \ + sizeof(*(ptr)) == sizeof(uint16) ? _NTOH16_UA((const uint8 *)ptr) : \ + sizeof(*(ptr)) == sizeof(uint32) ? _NTOH32_UA((const uint8 *)ptr) : \ + 0xfeedf00d) + +#endif diff --git a/drivers/net/wireless/bcm4329/include/bcmpcispi.h b/drivers/net/wireless/bcm4329/include/bcmpcispi.h new file mode 100644 index 000000000000..7d98fb7cbdc8 --- /dev/null +++ b/drivers/net/wireless/bcm4329/include/bcmpcispi.h @@ -0,0 +1,205 @@ +/* + * Broadcom PCI-SPI Host Controller Register Definitions + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: bcmpcispi.h,v 13.11.8.3 2008/07/09 21:23:29 Exp $ + */ + +/* cpp contortions to concatenate w/arg prescan */ +#ifndef PAD +#define _PADLINE(line) pad ## line +#define _XSTR(line) _PADLINE(line) +#define PAD _XSTR(__LINE__) +#endif /* PAD */ + +/* ++---------------------------------------------------------------------------+ +| | +| 7 6 5 4 3 2 1 0 | +| 0x0000 SPI_CTRL SPIE SPE 0 MSTR CPOL CPHA SPR1 SPR0 | +| 0x0004 SPI_STAT SPIF WCOL ST1 ST0 WFFUL WFEMP RFFUL RFEMP | +| 0x0008 SPI_DATA Bits 31:0, data to send out on MOSI | +| 0x000C SPI_EXT ICNT1 ICNT0 BSWAP *HSMODE ESPR1 ESPR0 | +| 0x0020 GPIO_OE 0=input, 1=output PWR_OE CS_OE | +| 0x0024 GPIO_DATA CARD:1=missing, 0=present CARD PWR_DAT CS_DAT | +| 0x0040 INT_EDGE 0=level, 1=edge DEV_E SPI_E | +| 0x0044 INT_POL 1=active high, 0=active low DEV_P SPI_P | +| 0x0048 INTMASK DEV SPI | +| 0x004C INTSTATUS DEV SPI | +| 0x0060 HEXDISP Reset value: 0x14e443f5. In hexdisp mode, value | +| shows on the Raggedstone1 4-digit 7-segment display. | +| 0x0064 CURRENT_MA Low 16 bits indicate card current consumption in mA | +| 0x006C DISP_SEL Display mode (0=hexdisp, 1=current) DSP | +| 0x00C0 PLL_CTL bit31=ext_clk, remainder unused. | +| 0x00C4 PLL_STAT LOCK | +| 0x00C8 CLK_FREQ | +| 0x00CC CLK_CNT | +| | +| *Notes: HSMODE is not implemented, never set this bit! | +| BSWAP is available in rev >= 8 | +| | ++---------------------------------------------------------------------------+ +*/ + +typedef volatile struct { + uint32 spih_ctrl; /* 0x00 SPI Control Register */ + uint32 spih_stat; /* 0x04 SPI Status Register */ + uint32 spih_data; /* 0x08 SPI Data Register, 32-bits wide */ + uint32 spih_ext; /* 0x0C SPI Extension Register */ + uint32 PAD[4]; /* 0x10-0x1F PADDING */ + + uint32 spih_gpio_ctrl; /* 0x20 SPI GPIO Control Register */ + uint32 spih_gpio_data; /* 0x24 SPI GPIO Data Register */ + uint32 PAD[6]; /* 0x28-0x3F PADDING */ + + uint32 spih_int_edge; /* 0x40 SPI Interrupt Edge Register (0=Level, 1=Edge) */ + uint32 spih_int_pol; /* 0x44 SPI Interrupt Polarity Register (0=Active Low, */ + /* 1=Active High) */ + uint32 spih_int_mask; /* 0x48 SPI Interrupt Mask */ + uint32 spih_int_status; /* 0x4C SPI Interrupt Status */ + uint32 PAD[4]; /* 0x50-0x5F PADDING */ + + uint32 spih_hex_disp; /* 0x60 SPI 4-digit hex display value */ + uint32 spih_current_ma; /* 0x64 SPI SD card current consumption in mA */ + uint32 PAD[1]; /* 0x68 PADDING */ + uint32 spih_disp_sel; /* 0x6c SPI 4-digit hex display mode select (1=current) */ + uint32 PAD[4]; /* 0x70-0x7F PADDING */ + uint32 PAD[8]; /* 0x80-0x9F PADDING */ + uint32 PAD[8]; /* 0xA0-0xBF PADDING */ + uint32 spih_pll_ctrl; /* 0xC0 PLL Control Register */ + uint32 spih_pll_status; /* 0xC4 PLL Status Register */ + uint32 spih_xtal_freq; /* 0xC8 External Clock Frequency in units of 10000Hz */ + uint32 spih_clk_count; /* 0xCC External Clock Count Register */ + +} spih_regs_t; + +typedef volatile struct { + uint32 cfg_space[0x40]; /* 0x000-0x0FF PCI Configuration Space (Read Only) */ + uint32 P_IMG_CTRL0; /* 0x100 PCI Image0 Control Register */ + + uint32 P_BA0; /* 0x104 32 R/W PCI Image0 Base Address register */ + uint32 P_AM0; /* 0x108 32 R/W PCI Image0 Address Mask register */ + uint32 P_TA0; /* 0x10C 32 R/W PCI Image0 Translation Address register */ + uint32 P_IMG_CTRL1; /* 0x110 32 R/W PCI Image1 Control register */ + uint32 P_BA1; /* 0x114 32 R/W PCI Image1 Base Address register */ + uint32 P_AM1; /* 0x118 32 R/W PCI Image1 Address Mask register */ + uint32 P_TA1; /* 0x11C 32 R/W PCI Image1 Translation Address register */ + uint32 P_IMG_CTRL2; /* 0x120 32 R/W PCI Image2 Control register */ + uint32 P_BA2; /* 0x124 32 R/W PCI Image2 Base Address register */ + uint32 P_AM2; /* 0x128 32 R/W PCI Image2 Address Mask register */ + uint32 P_TA2; /* 0x12C 32 R/W PCI Image2 Translation Address register */ + uint32 P_IMG_CTRL3; /* 0x130 32 R/W PCI Image3 Control register */ + uint32 P_BA3; /* 0x134 32 R/W PCI Image3 Base Address register */ + uint32 P_AM3; /* 0x138 32 R/W PCI Image3 Address Mask register */ + uint32 P_TA3; /* 0x13C 32 R/W PCI Image3 Translation Address register */ + uint32 P_IMG_CTRL4; /* 0x140 32 R/W PCI Image4 Control register */ + uint32 P_BA4; /* 0x144 32 R/W PCI Image4 Base Address register */ + uint32 P_AM4; /* 0x148 32 R/W PCI Image4 Address Mask register */ + uint32 P_TA4; /* 0x14C 32 R/W PCI Image4 Translation Address register */ + uint32 P_IMG_CTRL5; /* 0x150 32 R/W PCI Image5 Control register */ + uint32 P_BA5; /* 0x154 32 R/W PCI Image5 Base Address register */ + uint32 P_AM5; /* 0x158 32 R/W PCI Image5 Address Mask register */ + uint32 P_TA5; /* 0x15C 32 R/W PCI Image5 Translation Address register */ + uint32 P_ERR_CS; /* 0x160 32 R/W PCI Error Control and Status register */ + uint32 P_ERR_ADDR; /* 0x164 32 R PCI Erroneous Address register */ + uint32 P_ERR_DATA; /* 0x168 32 R PCI Erroneous Data register */ + + uint32 PAD[5]; /* 0x16C-0x17F PADDING */ + + uint32 WB_CONF_SPC_BAR; /* 0x180 32 R WISHBONE Configuration Space Base Address */ + uint32 W_IMG_CTRL1; /* 0x184 32 R/W WISHBONE Image1 Control register */ + uint32 W_BA1; /* 0x188 32 R/W WISHBONE Image1 Base Address register */ + uint32 W_AM1; /* 0x18C 32 R/W WISHBONE Image1 Address Mask register */ + uint32 W_TA1; /* 0x190 32 R/W WISHBONE Image1 Translation Address reg */ + uint32 W_IMG_CTRL2; /* 0x194 32 R/W WISHBONE Image2 Control register */ + uint32 W_BA2; /* 0x198 32 R/W WISHBONE Image2 Base Address register */ + uint32 W_AM2; /* 0x19C 32 R/W WISHBONE Image2 Address Mask register */ + uint32 W_TA2; /* 0x1A0 32 R/W WISHBONE Image2 Translation Address reg */ + uint32 W_IMG_CTRL3; /* 0x1A4 32 R/W WISHBONE Image3 Control register */ + uint32 W_BA3; /* 0x1A8 32 R/W WISHBONE Image3 Base Address register */ + uint32 W_AM3; /* 0x1AC 32 R/W WISHBONE Image3 Address Mask register */ + uint32 W_TA3; /* 0x1B0 32 R/W WISHBONE Image3 Translation Address reg */ + uint32 W_IMG_CTRL4; /* 0x1B4 32 R/W WISHBONE Image4 Control register */ + uint32 W_BA4; /* 0x1B8 32 R/W WISHBONE Image4 Base Address register */ + uint32 W_AM4; /* 0x1BC 32 R/W WISHBONE Image4 Address Mask register */ + uint32 W_TA4; /* 0x1C0 32 R/W WISHBONE Image4 Translation Address reg */ + uint32 W_IMG_CTRL5; /* 0x1C4 32 R/W WISHBONE Image5 Control register */ + uint32 W_BA5; /* 0x1C8 32 R/W WISHBONE Image5 Base Address register */ + uint32 W_AM5; /* 0x1CC 32 R/W WISHBONE Image5 Address Mask register */ + uint32 W_TA5; /* 0x1D0 32 R/W WISHBONE Image5 Translation Address reg */ + uint32 W_ERR_CS; /* 0x1D4 32 R/W WISHBONE Error Control and Status reg */ + uint32 W_ERR_ADDR; /* 0x1D8 32 R WISHBONE Erroneous Address register */ + uint32 W_ERR_DATA; /* 0x1DC 32 R WISHBONE Erroneous Data register */ + uint32 CNF_ADDR; /* 0x1E0 32 R/W Configuration Cycle register */ + uint32 CNF_DATA; /* 0x1E4 32 R/W Configuration Cycle Generation Data reg */ + + uint32 INT_ACK; /* 0x1E8 32 R Interrupt Acknowledge register */ + uint32 ICR; /* 0x1EC 32 R/W Interrupt Control register */ + uint32 ISR; /* 0x1F0 32 R/W Interrupt Status register */ +} spih_pciregs_t; + +/* + * PCI Core interrupt enable and status bit definitions. + */ + +/* PCI Core ICR Register bit definitions */ +#define PCI_INT_PROP_EN (1 << 0) /* Interrupt Propagation Enable */ +#define PCI_WB_ERR_INT_EN (1 << 1) /* Wishbone Error Interrupt Enable */ +#define PCI_PCI_ERR_INT_EN (1 << 2) /* PCI Error Interrupt Enable */ +#define PCI_PAR_ERR_INT_EN (1 << 3) /* Parity Error Interrupt Enable */ +#define PCI_SYS_ERR_INT_EN (1 << 4) /* System Error Interrupt Enable */ +#define PCI_SOFTWARE_RESET (1U << 31) /* Software reset of the PCI Core. */ + + +/* PCI Core ISR Register bit definitions */ +#define PCI_INT_PROP_ST (1 << 0) /* Interrupt Propagation Status */ +#define PCI_WB_ERR_INT_ST (1 << 1) /* Wishbone Error Interrupt Status */ +#define PCI_PCI_ERR_INT_ST (1 << 2) /* PCI Error Interrupt Status */ +#define PCI_PAR_ERR_INT_ST (1 << 3) /* Parity Error Interrupt Status */ +#define PCI_SYS_ERR_INT_ST (1 << 4) /* System Error Interrupt Status */ + + +/* Registers on the Wishbone bus */ +#define SPIH_CTLR_INTR (1 << 0) /* SPI Host Controller Core Interrupt */ +#define SPIH_DEV_INTR (1 << 1) /* SPI Device Interrupt */ +#define SPIH_WFIFO_INTR (1 << 2) /* SPI Tx FIFO Empty Intr (FPGA Rev >= 8) */ + +/* GPIO Bit definitions */ +#define SPIH_CS (1 << 0) /* SPI Chip Select (active low) */ +#define SPIH_SLOT_POWER (1 << 1) /* SD Card Slot Power Enable */ +#define SPIH_CARD_DETECT (1 << 2) /* SD Card Detect */ + +/* SPI Status Register Bit definitions */ +#define SPIH_STATE_MASK 0x30 /* SPI Transfer State Machine state mask */ +#define SPIH_STATE_SHIFT 4 /* SPI Transfer State Machine state shift */ +#define SPIH_WFFULL (1 << 3) /* SPI Write FIFO Full */ +#define SPIH_WFEMPTY (1 << 2) /* SPI Write FIFO Empty */ +#define SPIH_RFFULL (1 << 1) /* SPI Read FIFO Full */ +#define SPIH_RFEMPTY (1 << 0) /* SPI Read FIFO Empty */ + +#define SPIH_EXT_CLK (1U << 31) /* Use External Clock as PLL Clock source. */ + +#define SPIH_PLL_NO_CLK (1 << 1) /* Set to 1 if the PLL's input clock is lost. */ +#define SPIH_PLL_LOCKED (1 << 3) /* Set to 1 when the PLL is locked. */ + +/* Spin bit loop bound check */ +#define SPI_SPIN_BOUND 0xf4240 /* 1 million */ diff --git a/drivers/net/wireless/bcm4329/include/bcmperf.h b/drivers/net/wireless/bcm4329/include/bcmperf.h new file mode 100644 index 000000000000..2a78784e85d3 --- /dev/null +++ b/drivers/net/wireless/bcm4329/include/bcmperf.h @@ -0,0 +1,36 @@ +/* + * Performance counters software interface. + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: bcmperf.h,v 13.5 2007/09/14 22:00:59 Exp $ + */ +/* essai */ +#ifndef _BCMPERF_H_ +#define _BCMPERF_H_ +/* get cache hits and misses */ +#define BCMPERF_ENABLE_INSTRCOUNT() +#define BCMPERF_ENABLE_ICACHE_MISS() +#define BCMPERF_ENABLE_ICACHE_HIT() +#define BCMPERF_GETICACHE_MISS(x) ((x) = 0) +#define BCMPERF_GETICACHE_HIT(x) ((x) = 0) +#define BCMPERF_GETINSTRCOUNT(x) ((x) = 0) +#endif /* _BCMPERF_H_ */ diff --git a/drivers/net/wireless/bcm4329/include/bcmsdbus.h b/drivers/net/wireless/bcm4329/include/bcmsdbus.h new file mode 100644 index 000000000000..b7b67bc66248 --- /dev/null +++ b/drivers/net/wireless/bcm4329/include/bcmsdbus.h @@ -0,0 +1,117 @@ +/* + * Definitions for API from sdio common code (bcmsdh) to individual + * host controller drivers. + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: bcmsdbus.h,v 13.11.14.2.6.6 2009/10/27 17:20:28 Exp $ + */ + +#ifndef _sdio_api_h_ +#define _sdio_api_h_ + + +#define SDIOH_API_RC_SUCCESS (0x00) +#define SDIOH_API_RC_FAIL (0x01) +#define SDIOH_API_SUCCESS(status) (status == 0) + +#define SDIOH_READ 0 /* Read request */ +#define SDIOH_WRITE 1 /* Write request */ + +#define SDIOH_DATA_FIX 0 /* Fixed addressing */ +#define SDIOH_DATA_INC 1 /* Incremental addressing */ + +#define SDIOH_CMD_TYPE_NORMAL 0 /* Normal command */ +#define SDIOH_CMD_TYPE_APPEND 1 /* Append command */ +#define SDIOH_CMD_TYPE_CUTTHRU 2 /* Cut-through command */ + +#define SDIOH_DATA_PIO 0 /* PIO mode */ +#define SDIOH_DATA_DMA 1 /* DMA mode */ + + +typedef int SDIOH_API_RC; + +/* SDio Host structure */ +typedef struct sdioh_info sdioh_info_t; + +/* callback function, taking one arg */ +typedef void (*sdioh_cb_fn_t)(void *); + +/* attach, return handler on success, NULL if failed. + * The handler shall be provided by all subsequent calls. No local cache + * cfghdl points to the starting address of pci device mapped memory + */ +extern sdioh_info_t * sdioh_attach(osl_t *osh, void *cfghdl, uint irq); +extern SDIOH_API_RC sdioh_detach(osl_t *osh, sdioh_info_t *si); +extern SDIOH_API_RC sdioh_interrupt_register(sdioh_info_t *si, sdioh_cb_fn_t fn, void *argh); +extern SDIOH_API_RC sdioh_interrupt_deregister(sdioh_info_t *si); + +/* query whether SD interrupt is enabled or not */ +extern SDIOH_API_RC sdioh_interrupt_query(sdioh_info_t *si, bool *onoff); + +/* enable or disable SD interrupt */ +extern SDIOH_API_RC sdioh_interrupt_set(sdioh_info_t *si, bool enable_disable); + +#if defined(DHD_DEBUG) +extern bool sdioh_interrupt_pending(sdioh_info_t *si); +#endif + +/* read or write one byte using cmd52 */ +extern SDIOH_API_RC sdioh_request_byte(sdioh_info_t *si, uint rw, uint fnc, uint addr, uint8 *byte); + +/* read or write 2/4 bytes using cmd53 */ +extern SDIOH_API_RC sdioh_request_word(sdioh_info_t *si, uint cmd_type, uint rw, uint fnc, + uint addr, uint32 *word, uint nbyte); + +/* read or write any buffer using cmd53 */ +extern SDIOH_API_RC sdioh_request_buffer(sdioh_info_t *si, uint pio_dma, uint fix_inc, + uint rw, uint fnc_num, uint32 addr, uint regwidth, uint32 buflen, uint8 *buffer, + void *pkt); + +/* get cis data */ +extern SDIOH_API_RC sdioh_cis_read(sdioh_info_t *si, uint fuc, uint8 *cis, uint32 length); + +extern SDIOH_API_RC sdioh_cfg_read(sdioh_info_t *si, uint fuc, uint32 addr, uint8 *data); +extern SDIOH_API_RC sdioh_cfg_write(sdioh_info_t *si, uint fuc, uint32 addr, uint8 *data); + +/* query number of io functions */ +extern uint sdioh_query_iofnum(sdioh_info_t *si); + +/* handle iovars */ +extern int sdioh_iovar_op(sdioh_info_t *si, const char *name, + void *params, int plen, void *arg, int len, bool set); + +/* Issue abort to the specified function and clear controller as needed */ +extern int sdioh_abort(sdioh_info_t *si, uint fnc); + +/* Start and Stop SDIO without re-enumerating the SD card. */ +extern int sdioh_start(sdioh_info_t *si, int stage); +extern int sdioh_stop(sdioh_info_t *si); + +/* Reset and re-initialize the device */ +extern int sdioh_sdio_reset(sdioh_info_t *si); + +/* Helper function */ +void *bcmsdh_get_sdioh(bcmsdh_info_t *sdh); + + + +#endif /* _sdio_api_h_ */ diff --git a/drivers/net/wireless/bcm4329/include/bcmsdh.h b/drivers/net/wireless/bcm4329/include/bcmsdh.h new file mode 100644 index 000000000000..f5dee5c58445 --- /dev/null +++ b/drivers/net/wireless/bcm4329/include/bcmsdh.h @@ -0,0 +1,208 @@ +/* + * SDIO host client driver interface of Broadcom HNBU + * export functions to client drivers + * abstract OS and BUS specific details of SDIO + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: bcmsdh.h,v 13.35.14.7.6.8 2009/10/14 04:22:25 Exp $ + */ + +#ifndef _bcmsdh_h_ +#define _bcmsdh_h_ + +#define BCMSDH_ERROR_VAL 0x0001 /* Error */ +#define BCMSDH_INFO_VAL 0x0002 /* Info */ +extern const uint bcmsdh_msglevel; + +#define BCMSDH_ERROR(x) +#define BCMSDH_INFO(x) + +/* forward declarations */ +typedef struct bcmsdh_info bcmsdh_info_t; +typedef void (*bcmsdh_cb_fn_t)(void *); + +/* Attach and build an interface to the underlying SD host driver. + * - Allocates resources (structs, arrays, mem, OS handles, etc) needed by bcmsdh. + * - Returns the bcmsdh handle and virtual address base for register access. + * The returned handle should be used in all subsequent calls, but the bcmsh + * implementation may maintain a single "default" handle (e.g. the first or + * most recent one) to enable single-instance implementations to pass NULL. + */ +extern bcmsdh_info_t *bcmsdh_attach(osl_t *osh, void *cfghdl, void **regsva, uint irq); + +/* Detach - freeup resources allocated in attach */ +extern int bcmsdh_detach(osl_t *osh, void *sdh); + +/* Query if SD device interrupts are enabled */ +extern bool bcmsdh_intr_query(void *sdh); + +/* Enable/disable SD interrupt */ +extern int bcmsdh_intr_enable(void *sdh); +extern int bcmsdh_intr_disable(void *sdh); + +/* Register/deregister device interrupt handler. */ +extern int bcmsdh_intr_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh); +extern int bcmsdh_intr_dereg(void *sdh); + +#if defined(DHD_DEBUG) +/* Query pending interrupt status from the host controller */ +extern bool bcmsdh_intr_pending(void *sdh); +#endif + +#ifdef BCMLXSDMMC +extern int bcmsdh_claim_host_and_lock(void *sdh); +extern int bcmsdh_release_host_and_unlock(void *sdh); +#endif /* BCMLXSDMMC */ + +/* Register a callback to be called if and when bcmsdh detects + * device removal. No-op in the case of non-removable/hardwired devices. + */ +extern int bcmsdh_devremove_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh); + +/* Access SDIO address space (e.g. CCCR) using CMD52 (single-byte interface). + * fn: function number + * addr: unmodified SDIO-space address + * data: data byte to write + * err: pointer to error code (or NULL) + */ +extern uint8 bcmsdh_cfg_read(void *sdh, uint func, uint32 addr, int *err); +extern void bcmsdh_cfg_write(void *sdh, uint func, uint32 addr, uint8 data, int *err); + +/* Read/Write 4bytes from/to cfg space */ +extern uint32 bcmsdh_cfg_read_word(void *sdh, uint fnc_num, uint32 addr, int *err); +extern void bcmsdh_cfg_write_word(void *sdh, uint fnc_num, uint32 addr, uint32 data, int *err); + +/* Read CIS content for specified function. + * fn: function whose CIS is being requested (0 is common CIS) + * cis: pointer to memory location to place results + * length: number of bytes to read + * Internally, this routine uses the values from the cis base regs (0x9-0xB) + * to form an SDIO-space address to read the data from. + */ +extern int bcmsdh_cis_read(void *sdh, uint func, uint8 *cis, uint length); + +/* Synchronous access to device (client) core registers via CMD53 to F1. + * addr: backplane address (i.e. >= regsva from attach) + * size: register width in bytes (2 or 4) + * data: data for register write + */ +extern uint32 bcmsdh_reg_read(void *sdh, uint32 addr, uint size); +extern uint32 bcmsdh_reg_write(void *sdh, uint32 addr, uint size, uint32 data); + +/* Indicate if last reg read/write failed */ +extern bool bcmsdh_regfail(void *sdh); + +/* Buffer transfer to/from device (client) core via cmd53. + * fn: function number + * addr: backplane address (i.e. >= regsva from attach) + * flags: backplane width, address increment, sync/async + * buf: pointer to memory data buffer + * nbytes: number of bytes to transfer to/from buf + * pkt: pointer to packet associated with buf (if any) + * complete: callback function for command completion (async only) + * handle: handle for completion callback (first arg in callback) + * Returns 0 or error code. + * NOTE: Async operation is not currently supported. + */ +typedef void (*bcmsdh_cmplt_fn_t)(void *handle, int status, bool sync_waiting); +extern int bcmsdh_send_buf(void *sdh, uint32 addr, uint fn, uint flags, + uint8 *buf, uint nbytes, void *pkt, + bcmsdh_cmplt_fn_t complete, void *handle); +extern int bcmsdh_recv_buf(void *sdh, uint32 addr, uint fn, uint flags, + uint8 *buf, uint nbytes, void *pkt, + bcmsdh_cmplt_fn_t complete, void *handle); + +/* Flags bits */ +#define SDIO_REQ_4BYTE 0x1 /* Four-byte target (backplane) width (vs. two-byte) */ +#define SDIO_REQ_FIXED 0x2 /* Fixed address (FIFO) (vs. incrementing address) */ +#define SDIO_REQ_ASYNC 0x4 /* Async request (vs. sync request) */ + +/* Pending (non-error) return code */ +#define BCME_PENDING 1 + +/* Read/write to memory block (F1, no FIFO) via CMD53 (sync only). + * rw: read or write (0/1) + * addr: direct SDIO address + * buf: pointer to memory data buffer + * nbytes: number of bytes to transfer to/from buf + * Returns 0 or error code. + */ +extern int bcmsdh_rwdata(void *sdh, uint rw, uint32 addr, uint8 *buf, uint nbytes); + +/* Issue an abort to the specified function */ +extern int bcmsdh_abort(void *sdh, uint fn); + +/* Start SDIO Host Controller communication */ +extern int bcmsdh_start(void *sdh, int stage); + +/* Stop SDIO Host Controller communication */ +extern int bcmsdh_stop(void *sdh); + +/* Returns the "Device ID" of target device on the SDIO bus. */ +extern int bcmsdh_query_device(void *sdh); + +/* Returns the number of IO functions reported by the device */ +extern uint bcmsdh_query_iofnum(void *sdh); + +/* Miscellaneous knob tweaker. */ +extern int bcmsdh_iovar_op(void *sdh, const char *name, + void *params, int plen, void *arg, int len, bool set); + +/* Reset and reinitialize the device */ +extern int bcmsdh_reset(bcmsdh_info_t *sdh); + +/* helper functions */ + +extern void *bcmsdh_get_sdioh(bcmsdh_info_t *sdh); + +/* callback functions */ +typedef struct { + /* attach to device */ + void *(*attach)(uint16 vend_id, uint16 dev_id, uint16 bus, uint16 slot, + uint16 func, uint bustype, void * regsva, osl_t * osh, + void * param); + /* detach from device */ + void (*detach)(void *ch); +} bcmsdh_driver_t; + +/* platform specific/high level functions */ +extern int bcmsdh_register(bcmsdh_driver_t *driver); +extern void bcmsdh_unregister(void); +extern bool bcmsdh_chipmatch(uint16 vendor, uint16 device); +extern void bcmsdh_device_remove(void * sdh); + +#if defined(OOB_INTR_ONLY) +extern int bcmsdh_register_oob_intr(void * dhdp); +extern void bcmsdh_unregister_oob_intr(void); +extern void bcmsdh_oob_intr_set(bool enable); +#endif /* defined(OOB_INTR_ONLY) */ +/* Function to pass device-status bits to DHD. */ +extern uint32 bcmsdh_get_dstatus(void *sdh); + +/* Function to return current window addr */ +extern uint32 bcmsdh_cur_sbwad(void *sdh); + +/* Function to pass chipid and rev to lower layers for controlling pr's */ +extern void bcmsdh_chipinfo(void *sdh, uint32 chip, uint32 chiprev); + + +#endif /* _bcmsdh_h_ */ diff --git a/drivers/net/wireless/bcm4329/include/bcmsdh_sdmmc.h b/drivers/net/wireless/bcm4329/include/bcmsdh_sdmmc.h new file mode 100644 index 000000000000..4e6d1b5bd94f --- /dev/null +++ b/drivers/net/wireless/bcm4329/include/bcmsdh_sdmmc.h @@ -0,0 +1,122 @@ +/* + * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: bcmsdh_sdmmc.h,v 13.1.2.1.8.7 2009/10/27 18:22:52 Exp $ + */ + +#ifndef __BCMSDH_SDMMC_H__ +#define __BCMSDH_SDMMC_H__ + +#define sd_err(x) +#define sd_trace(x) +#define sd_info(x) +#define sd_debug(x) +#define sd_data(x) +#define sd_ctrl(x) + +#define sd_sync_dma(sd, read, nbytes) +#define sd_init_dma(sd) +#define sd_ack_intr(sd) +#define sd_wakeup(sd); + +/* Allocate/init/free per-OS private data */ +extern int sdioh_sdmmc_osinit(sdioh_info_t *sd); +extern void sdioh_sdmmc_osfree(sdioh_info_t *sd); + +#define sd_log(x) + +#define SDIOH_ASSERT(exp) \ + do { if (!(exp)) \ + printf("!!!ASSERT fail: file %s lines %d", __FILE__, __LINE__); \ + } while (0) + +#define BLOCK_SIZE_4318 64 +#define BLOCK_SIZE_4328 512 + +/* internal return code */ +#define SUCCESS 0 +#define ERROR 1 + +/* private bus modes */ +#define SDIOH_MODE_SD4 2 +#define CLIENT_INTR 0x100 /* Get rid of this! */ + +struct sdioh_info { + osl_t *osh; /* osh handler */ + bool client_intr_enabled; /* interrupt connnected flag */ + bool intr_handler_valid; /* client driver interrupt handler valid */ + sdioh_cb_fn_t intr_handler; /* registered interrupt handler */ + void *intr_handler_arg; /* argument to call interrupt handler */ + uint16 intmask; /* Current active interrupts */ + void *sdos_info; /* Pointer to per-OS private data */ + + uint irq; /* Client irq */ + int intrcount; /* Client interrupts */ + + bool sd_use_dma; /* DMA on CMD53 */ + bool sd_blockmode; /* sd_blockmode == FALSE => 64 Byte Cmd 53s. */ + /* Must be on for sd_multiblock to be effective */ + bool use_client_ints; /* If this is false, make sure to restore */ + int sd_mode; /* SD1/SD4/SPI */ + int client_block_size[SDIOD_MAX_IOFUNCS]; /* Blocksize */ + uint8 num_funcs; /* Supported funcs on client */ + uint32 com_cis_ptr; + uint32 func_cis_ptr[SDIOD_MAX_IOFUNCS]; + uint max_dma_len; + uint max_dma_descriptors; /* DMA Descriptors supported by this controller. */ +// SDDMA_DESCRIPTOR SGList[32]; /* Scatter/Gather DMA List */ +}; + +/************************************************************ + * Internal interfaces: per-port references into bcmsdh_sdmmc.c + */ + +/* Global message bits */ +extern uint sd_msglevel; + +/* OS-independent interrupt handler */ +extern bool check_client_intr(sdioh_info_t *sd); + +/* Core interrupt enable/disable of device interrupts */ +extern void sdioh_sdmmc_devintr_on(sdioh_info_t *sd); +extern void sdioh_sdmmc_devintr_off(sdioh_info_t *sd); + + +/************************************************************** + * Internal interfaces: bcmsdh_sdmmc.c references to per-port code + */ + +/* Register mapping routines */ +extern uint32 *sdioh_sdmmc_reg_map(osl_t *osh, int32 addr, int size); +extern void sdioh_sdmmc_reg_unmap(osl_t *osh, int32 addr, int size); + +/* Interrupt (de)registration routines */ +extern int sdioh_sdmmc_register_irq(sdioh_info_t *sd, uint irq); +extern void sdioh_sdmmc_free_irq(uint irq, sdioh_info_t *sd); + +typedef struct _BCMSDH_SDMMC_INSTANCE { + sdioh_info_t *sd; + struct sdio_func *func[SDIOD_MAX_IOFUNCS]; +} BCMSDH_SDMMC_INSTANCE, *PBCMSDH_SDMMC_INSTANCE; + +#endif /* __BCMSDH_SDMMC_H__ */ diff --git a/drivers/net/wireless/bcm4329/include/bcmsdpcm.h b/drivers/net/wireless/bcm4329/include/bcmsdpcm.h new file mode 100644 index 000000000000..77aca4500ad8 --- /dev/null +++ b/drivers/net/wireless/bcm4329/include/bcmsdpcm.h @@ -0,0 +1,263 @@ +/* + * Broadcom SDIO/PCMCIA + * Software-specific definitions shared between device and host side + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: bcmsdpcm.h,v 1.1.2.4 2010/07/02 01:15:46 Exp $ + */ + +#ifndef _bcmsdpcm_h_ +#define _bcmsdpcm_h_ + +/* + * Software allocation of To SB Mailbox resources + */ + +/* intstatus bits */ +#define I_SMB_NAK I_SMB_SW0 /* To SB Mailbox Frame NAK */ +#define I_SMB_INT_ACK I_SMB_SW1 /* To SB Mailbox Host Interrupt ACK */ +#define I_SMB_USE_OOB I_SMB_SW2 /* To SB Mailbox Use OOB Wakeup */ +#define I_SMB_DEV_INT I_SMB_SW3 /* To SB Mailbox Miscellaneous Interrupt */ + +/* tosbmailbox bits corresponding to intstatus bits */ +#define SMB_NAK (1 << 0) /* To SB Mailbox Frame NAK */ +#define SMB_INT_ACK (1 << 1) /* To SB Mailbox Host Interrupt ACK */ +#define SMB_USE_OOB (1 << 2) /* To SB Mailbox Use OOB Wakeup */ +#define SMB_DEV_INT (1 << 3) /* To SB Mailbox Miscellaneous Interrupt */ +#define SMB_MASK 0x0000000f /* To SB Mailbox Mask */ + +/* tosbmailboxdata */ +#define SMB_DATA_VERSION_MASK 0x00ff0000 /* host protocol version (sent with F2 enable) */ +#define SMB_DATA_VERSION_SHIFT 16 /* host protocol version (sent with F2 enable) */ + +/* + * Software allocation of To Host Mailbox resources + */ + +/* intstatus bits */ +#define I_HMB_FC_STATE I_HMB_SW0 /* To Host Mailbox Flow Control State */ +#define I_HMB_FC_CHANGE I_HMB_SW1 /* To Host Mailbox Flow Control State Changed */ +#define I_HMB_FRAME_IND I_HMB_SW2 /* To Host Mailbox Frame Indication */ +#define I_HMB_HOST_INT I_HMB_SW3 /* To Host Mailbox Miscellaneous Interrupt */ + +/* tohostmailbox bits corresponding to intstatus bits */ +#define HMB_FC_ON (1 << 0) /* To Host Mailbox Flow Control State */ +#define HMB_FC_CHANGE (1 << 1) /* To Host Mailbox Flow Control State Changed */ +#define HMB_FRAME_IND (1 << 2) /* To Host Mailbox Frame Indication */ +#define HMB_HOST_INT (1 << 3) /* To Host Mailbox Miscellaneous Interrupt */ +#define HMB_MASK 0x0000000f /* To Host Mailbox Mask */ + +/* tohostmailboxdata */ +#define HMB_DATA_NAKHANDLED 1 /* we're ready to retransmit NAK'd frame to host */ +#define HMB_DATA_DEVREADY 2 /* we're ready to to talk to host after enable */ +#define HMB_DATA_FC 4 /* per prio flowcontrol update flag to host */ +#define HMB_DATA_FWREADY 8 /* firmware is ready for protocol activity */ + +#define HMB_DATA_FCDATA_MASK 0xff000000 /* per prio flowcontrol data */ +#define HMB_DATA_FCDATA_SHIFT 24 /* per prio flowcontrol data */ + +#define HMB_DATA_VERSION_MASK 0x00ff0000 /* device protocol version (with devready) */ +#define HMB_DATA_VERSION_SHIFT 16 /* device protocol version (with devready) */ + +/* + * Software-defined protocol header + */ + +/* Current protocol version */ +#define SDPCM_PROT_VERSION 4 + +/* SW frame header */ +#define SDPCM_SEQUENCE_MASK 0x000000ff /* Sequence Number Mask */ +#define SDPCM_PACKET_SEQUENCE(p) (((uint8 *)p)[0] & 0xff) /* p starts w/SW Header */ + +#define SDPCM_CHANNEL_MASK 0x00000f00 /* Channel Number Mask */ +#define SDPCM_CHANNEL_SHIFT 8 /* Channel Number Shift */ +#define SDPCM_PACKET_CHANNEL(p) (((uint8 *)p)[1] & 0x0f) /* p starts w/SW Header */ + +#define SDPCM_FLAGS_MASK 0x0000f000 /* Mask of flag bits */ +#define SDPCM_FLAGS_SHIFT 12 /* Flag bits shift */ +#define SDPCM_PACKET_FLAGS(p) ((((uint8 *)p)[1] & 0xf0) >> 4) /* p starts w/SW Header */ + +/* Next Read Len: lookahead length of next frame, in 16-byte units (rounded up) */ +#define SDPCM_NEXTLEN_MASK 0x00ff0000 /* Next Read Len Mask */ +#define SDPCM_NEXTLEN_SHIFT 16 /* Next Read Len Shift */ +#define SDPCM_NEXTLEN_VALUE(p) ((((uint8 *)p)[2] & 0xff) << 4) /* p starts w/SW Header */ +#define SDPCM_NEXTLEN_OFFSET 2 + +/* Data Offset from SOF (HW Tag, SW Tag, Pad) */ +#define SDPCM_DOFFSET_OFFSET 3 /* Data Offset */ +#define SDPCM_DOFFSET_VALUE(p) (((uint8 *)p)[SDPCM_DOFFSET_OFFSET] & 0xff) +#define SDPCM_DOFFSET_MASK 0xff000000 +#define SDPCM_DOFFSET_SHIFT 24 + +#define SDPCM_FCMASK_OFFSET 4 /* Flow control */ +#define SDPCM_FCMASK_VALUE(p) (((uint8 *)p)[SDPCM_FCMASK_OFFSET ] & 0xff) +#define SDPCM_WINDOW_OFFSET 5 /* Credit based fc */ +#define SDPCM_WINDOW_VALUE(p) (((uint8 *)p)[SDPCM_WINDOW_OFFSET] & 0xff) +#define SDPCM_VERSION_OFFSET 6 /* Version # */ +#define SDPCM_VERSION_VALUE(p) (((uint8 *)p)[SDPCM_VERSION_OFFSET] & 0xff) +#define SDPCM_UNUSED_OFFSET 7 /* Spare */ +#define SDPCM_UNUSED_VALUE(p) (((uint8 *)p)[SDPCM_UNUSED_OFFSET] & 0xff) + +#define SDPCM_SWHEADER_LEN 8 /* SW header is 64 bits */ + +/* logical channel numbers */ +#define SDPCM_CONTROL_CHANNEL 0 /* Control Request/Response Channel Id */ +#define SDPCM_EVENT_CHANNEL 1 /* Asyc Event Indication Channel Id */ +#define SDPCM_DATA_CHANNEL 2 /* Data Xmit/Recv Channel Id */ +#define SDPCM_GLOM_CHANNEL 3 /* For coalesced packets (superframes) */ +#define SDPCM_TEST_CHANNEL 15 /* Reserved for test/debug packets */ +#define SDPCM_MAX_CHANNEL 15 + +#define SDPCM_SEQUENCE_WRAP 256 /* wrap-around val for eight-bit frame seq number */ + +#define SDPCM_FLAG_RESVD0 0x01 +#define SDPCM_FLAG_RESVD1 0x02 +#define SDPCM_FLAG_GSPI_TXENAB 0x04 +#define SDPCM_FLAG_GLOMDESC 0x08 /* Superframe descriptor mask */ + +/* For GLOM_CHANNEL frames, use a flag to indicate descriptor frame */ +#define SDPCM_GLOMDESC_FLAG (SDPCM_FLAG_GLOMDESC << SDPCM_FLAGS_SHIFT) + +#define SDPCM_GLOMDESC(p) (((uint8 *)p)[1] & 0x80) + +/* For TEST_CHANNEL packets, define another 4-byte header */ +#define SDPCM_TEST_HDRLEN 4 /* Generally: Cmd(1), Ext(1), Len(2); + * Semantics of Ext byte depend on command. + * Len is current or requested frame length, not + * including test header; sent little-endian. + */ +#define SDPCM_TEST_DISCARD 0x01 /* Receiver discards. Ext is a pattern id. */ +#define SDPCM_TEST_ECHOREQ 0x02 /* Echo request. Ext is a pattern id. */ +#define SDPCM_TEST_ECHORSP 0x03 /* Echo response. Ext is a pattern id. */ +#define SDPCM_TEST_BURST 0x04 /* Receiver to send a burst. Ext is a frame count */ +#define SDPCM_TEST_SEND 0x05 /* Receiver sets send mode. Ext is boolean on/off */ + +/* Handy macro for filling in datagen packets with a pattern */ +#define SDPCM_TEST_FILL(byteno, id) ((uint8)(id + byteno)) + +/* + * Software counters (first part matches hardware counters) + */ + +typedef volatile struct { + uint32 cmd52rd; /* Cmd52RdCount, SDIO: cmd52 reads */ + uint32 cmd52wr; /* Cmd52WrCount, SDIO: cmd52 writes */ + uint32 cmd53rd; /* Cmd53RdCount, SDIO: cmd53 reads */ + uint32 cmd53wr; /* Cmd53WrCount, SDIO: cmd53 writes */ + uint32 abort; /* AbortCount, SDIO: aborts */ + uint32 datacrcerror; /* DataCrcErrorCount, SDIO: frames w/CRC error */ + uint32 rdoutofsync; /* RdOutOfSyncCount, SDIO/PCMCIA: Rd Frm out of sync */ + uint32 wroutofsync; /* RdOutOfSyncCount, SDIO/PCMCIA: Wr Frm out of sync */ + uint32 writebusy; /* WriteBusyCount, SDIO: device asserted "busy" */ + uint32 readwait; /* ReadWaitCount, SDIO: no data ready for a read cmd */ + uint32 readterm; /* ReadTermCount, SDIO: read frame termination cmds */ + uint32 writeterm; /* WriteTermCount, SDIO: write frames termination cmds */ + uint32 rxdescuflo; /* receive descriptor underflows */ + uint32 rxfifooflo; /* receive fifo overflows */ + uint32 txfifouflo; /* transmit fifo underflows */ + uint32 runt; /* runt (too short) frames recv'd from bus */ + uint32 badlen; /* frame's rxh len does not match its hw tag len */ + uint32 badcksum; /* frame's hw tag chksum doesn't agree with len value */ + uint32 seqbreak; /* break in sequence # space from one rx frame to the next */ + uint32 rxfcrc; /* frame rx header indicates crc error */ + uint32 rxfwoos; /* frame rx header indicates write out of sync */ + uint32 rxfwft; /* frame rx header indicates write frame termination */ + uint32 rxfabort; /* frame rx header indicates frame aborted */ + uint32 woosint; /* write out of sync interrupt */ + uint32 roosint; /* read out of sync interrupt */ + uint32 rftermint; /* read frame terminate interrupt */ + uint32 wftermint; /* write frame terminate interrupt */ +} sdpcmd_cnt_t; + +/* + * Register Access Macros + */ + +#define SDIODREV_IS(var, val) ((var) == (val)) +#define SDIODREV_GE(var, val) ((var) >= (val)) +#define SDIODREV_GT(var, val) ((var) > (val)) +#define SDIODREV_LT(var, val) ((var) < (val)) +#define SDIODREV_LE(var, val) ((var) <= (val)) + +#define SDIODDMAREG32(h, dir, chnl) \ + ((dir) == DMA_TX ? \ + (void *)(uintptr)&((h)->regs->dma.sdiod32.dma32regs[chnl].xmt) : \ + (void *)(uintptr)&((h)->regs->dma.sdiod32.dma32regs[chnl].rcv)) + +#define SDIODDMAREG64(h, dir, chnl) \ + ((dir) == DMA_TX ? \ + (void *)(uintptr)&((h)->regs->dma.sdiod64.dma64regs[chnl].xmt) : \ + (void *)(uintptr)&((h)->regs->dma.sdiod64.dma64regs[chnl].rcv)) + +#define SDIODDMAREG(h, dir, chnl) \ + (SDIODREV_LT((h)->corerev, 1) ? \ + SDIODDMAREG32((h), (dir), (chnl)) : \ + SDIODDMAREG64((h), (dir), (chnl))) + +#define PCMDDMAREG(h, dir, chnl) \ + ((dir) == DMA_TX ? \ + (void *)(uintptr)&((h)->regs->dma.pcm32.dmaregs.xmt) : \ + (void *)(uintptr)&((h)->regs->dma.pcm32.dmaregs.rcv)) + +#define SDPCMDMAREG(h, dir, chnl, coreid) \ + ((coreid) == SDIOD_CORE_ID ? \ + SDIODDMAREG(h, dir, chnl) : \ + PCMDDMAREG(h, dir, chnl)) + +#define SDIODFIFOREG(h, corerev) \ + (SDIODREV_LT((corerev), 1) ? \ + ((dma32diag_t *)(uintptr)&((h)->regs->dma.sdiod32.dmafifo)) : \ + ((dma32diag_t *)(uintptr)&((h)->regs->dma.sdiod64.dmafifo))) + +#define PCMDFIFOREG(h) \ + ((dma32diag_t *)(uintptr)&((h)->regs->dma.pcm32.dmafifo)) + +#define SDPCMFIFOREG(h, coreid, corerev) \ + ((coreid) == SDIOD_CORE_ID ? \ + SDIODFIFOREG(h, corerev) : \ + PCMDFIFOREG(h)) + +/* + * Shared structure between dongle and the host + * The structure contains pointers to trap or assert information shared with the host + */ +#define SDPCM_SHARED_VERSION 0x0002 +#define SDPCM_SHARED_VERSION_MASK 0x00FF +#define SDPCM_SHARED_ASSERT_BUILT 0x0100 +#define SDPCM_SHARED_ASSERT 0x0200 +#define SDPCM_SHARED_TRAP 0x0400 + +typedef struct { + uint32 flags; + uint32 trap_addr; + uint32 assert_exp_addr; + uint32 assert_file_addr; + uint32 assert_line; + uint32 console_addr; /* Address of hndrte_cons_t */ + uint32 msgtrace_addr; + uint8 tag[32]; +} sdpcm_shared_t; + +extern sdpcm_shared_t sdpcm_shared; + +#endif /* _bcmsdpcm_h_ */ diff --git a/drivers/net/wireless/bcm4329/include/bcmsdspi.h b/drivers/net/wireless/bcm4329/include/bcmsdspi.h new file mode 100644 index 000000000000..eaae10d8bf19 --- /dev/null +++ b/drivers/net/wireless/bcm4329/include/bcmsdspi.h @@ -0,0 +1,131 @@ +/* + * SD-SPI Protocol Conversion - BCMSDH->SPI Translation Layer + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: bcmsdspi.h,v 13.8.10.2 2008/06/30 21:09:40 Exp $ + */ + +/* global msglevel for debug messages - bitvals come from sdiovar.h */ + +#define sd_err(x) +#define sd_trace(x) +#define sd_info(x) +#define sd_debug(x) +#define sd_data(x) +#define sd_ctrl(x) + +#define sd_log(x) + +#define SDIOH_ASSERT(exp) \ + do { if (!(exp)) \ + printf("!!!ASSERT fail: file %s lines %d", __FILE__, __LINE__); \ + } while (0) + +#define BLOCK_SIZE_4318 64 +#define BLOCK_SIZE_4328 512 + +/* internal return code */ +#define SUCCESS 0 +#undef ERROR +#define ERROR 1 + +/* private bus modes */ +#define SDIOH_MODE_SPI 0 + +#define USE_BLOCKMODE 0x2 /* Block mode can be single block or multi */ +#define USE_MULTIBLOCK 0x4 + +struct sdioh_info { + uint cfg_bar; /* pci cfg address for bar */ + uint32 caps; /* cached value of capabilities reg */ + uint bar0; /* BAR0 for PCI Device */ + osl_t *osh; /* osh handler */ + void *controller; /* Pointer to SPI Controller's private data struct */ + + uint lockcount; /* nest count of sdspi_lock() calls */ + bool client_intr_enabled; /* interrupt connnected flag */ + bool intr_handler_valid; /* client driver interrupt handler valid */ + sdioh_cb_fn_t intr_handler; /* registered interrupt handler */ + void *intr_handler_arg; /* argument to call interrupt handler */ + bool initialized; /* card initialized */ + uint32 target_dev; /* Target device ID */ + uint32 intmask; /* Current active interrupts */ + void *sdos_info; /* Pointer to per-OS private data */ + + uint32 controller_type; /* Host controller type */ + uint8 version; /* Host Controller Spec Compliance Version */ + uint irq; /* Client irq */ + uint32 intrcount; /* Client interrupts */ + uint32 local_intrcount; /* Controller interrupts */ + bool host_init_done; /* Controller initted */ + bool card_init_done; /* Client SDIO interface initted */ + bool polled_mode; /* polling for command completion */ + + bool sd_use_dma; /* DMA on CMD53 */ + bool sd_blockmode; /* sd_blockmode == FALSE => 64 Byte Cmd 53s. */ + /* Must be on for sd_multiblock to be effective */ + bool use_client_ints; /* If this is false, make sure to restore */ + bool got_hcint; /* Host Controller interrupt. */ + /* polling hack in wl_linux.c:wl_timer() */ + int adapter_slot; /* Maybe dealing with multiple slots/controllers */ + int sd_mode; /* SD1/SD4/SPI */ + int client_block_size[SDIOD_MAX_IOFUNCS]; /* Blocksize */ + uint32 data_xfer_count; /* Current register transfer size */ + uint32 cmd53_wr_data; /* Used to pass CMD53 write data */ + uint32 card_response; /* Used to pass back response status byte */ + uint32 card_rsp_data; /* Used to pass back response data word */ + uint16 card_rca; /* Current Address */ + uint8 num_funcs; /* Supported funcs on client */ + uint32 com_cis_ptr; + uint32 func_cis_ptr[SDIOD_MAX_IOFUNCS]; + void *dma_buf; + ulong dma_phys; + int r_cnt; /* rx count */ + int t_cnt; /* tx_count */ +}; + +/************************************************************ + * Internal interfaces: per-port references into bcmsdspi.c + */ + +/* Global message bits */ +extern uint sd_msglevel; + +/************************************************************** + * Internal interfaces: bcmsdspi.c references to per-port code + */ + +/* Register mapping routines */ +extern uint32 *spi_reg_map(osl_t *osh, uintptr addr, int size); +extern void spi_reg_unmap(osl_t *osh, uintptr addr, int size); + +/* Interrupt (de)registration routines */ +extern int spi_register_irq(sdioh_info_t *sd, uint irq); +extern void spi_free_irq(uint irq, sdioh_info_t *sd); + +/* OS-specific interrupt wrappers (atomic interrupt enable/disable) */ +extern void spi_lock(sdioh_info_t *sd); +extern void spi_unlock(sdioh_info_t *sd); + +/* Allocate/init/free per-OS private data */ +extern int spi_osinit(sdioh_info_t *sd); +extern void spi_osfree(sdioh_info_t *sd); diff --git a/drivers/net/wireless/bcm4329/include/bcmsdstd.h b/drivers/net/wireless/bcm4329/include/bcmsdstd.h new file mode 100644 index 000000000000..974b3d41698d --- /dev/null +++ b/drivers/net/wireless/bcm4329/include/bcmsdstd.h @@ -0,0 +1,223 @@ +/* + * 'Standard' SDIO HOST CONTROLLER driver + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: bcmsdstd.h,v 13.16.18.1.16.3 2009/12/10 01:09:23 Exp $ + */ + +/* global msglevel for debug messages - bitvals come from sdiovar.h */ + +#define sd_err(x) do { if (sd_msglevel & SDH_ERROR_VAL) printf x; } while (0) +#define sd_trace(x) +#define sd_info(x) +#define sd_debug(x) +#define sd_data(x) +#define sd_ctrl(x) +#define sd_dma(x) + +#define sd_sync_dma(sd, read, nbytes) +#define sd_init_dma(sd) +#define sd_ack_intr(sd) +#define sd_wakeup(sd); +/* Allocate/init/free per-OS private data */ +extern int sdstd_osinit(sdioh_info_t *sd); +extern void sdstd_osfree(sdioh_info_t *sd); + +#define sd_log(x) + +#define SDIOH_ASSERT(exp) \ + do { if (!(exp)) \ + printf("!!!ASSERT fail: file %s lines %d", __FILE__, __LINE__); \ + } while (0) + +#define BLOCK_SIZE_4318 64 +#define BLOCK_SIZE_4328 512 + +/* internal return code */ +#define SUCCESS 0 +#define ERROR 1 + +/* private bus modes */ +#define SDIOH_MODE_SPI 0 +#define SDIOH_MODE_SD1 1 +#define SDIOH_MODE_SD4 2 + +#define MAX_SLOTS 6 /* For PCI: Only 6 BAR entries => 6 slots */ +#define SDIOH_REG_WINSZ 0x100 /* Number of registers in Standard Host Controller */ + +#define SDIOH_TYPE_ARASAN_HDK 1 +#define SDIOH_TYPE_BCM27XX 2 +#define SDIOH_TYPE_TI_PCIXX21 4 /* TI PCIxx21 Standard Host Controller */ +#define SDIOH_TYPE_RICOH_R5C822 5 /* Ricoh Co Ltd R5C822 SD/SDIO/MMC/MS/MSPro Host Adapter */ +#define SDIOH_TYPE_JMICRON 6 /* JMicron Standard SDIO Host Controller */ + +/* For linux, allow yielding for dongle */ +#define BCMSDYIELD + +/* Expected card status value for CMD7 */ +#define SDIOH_CMD7_EXP_STATUS 0x00001E00 + +#define RETRIES_LARGE 100000 +#define RETRIES_SMALL 100 + + +#define USE_BLOCKMODE 0x2 /* Block mode can be single block or multi */ +#define USE_MULTIBLOCK 0x4 + +#define USE_FIFO 0x8 /* Fifo vs non-fifo */ + +#define CLIENT_INTR 0x100 /* Get rid of this! */ + + +struct sdioh_info { + uint cfg_bar; /* pci cfg address for bar */ + uint32 caps; /* cached value of capabilities reg */ + uint32 curr_caps; /* max current capabilities reg */ + + osl_t *osh; /* osh handler */ + volatile char *mem_space; /* pci device memory va */ + uint lockcount; /* nest count of sdstd_lock() calls */ + bool client_intr_enabled; /* interrupt connnected flag */ + bool intr_handler_valid; /* client driver interrupt handler valid */ + sdioh_cb_fn_t intr_handler; /* registered interrupt handler */ + void *intr_handler_arg; /* argument to call interrupt handler */ + bool initialized; /* card initialized */ + uint target_dev; /* Target device ID */ + uint16 intmask; /* Current active interrupts */ + void *sdos_info; /* Pointer to per-OS private data */ + + uint32 controller_type; /* Host controller type */ + uint8 version; /* Host Controller Spec Compliance Version */ + uint irq; /* Client irq */ + int intrcount; /* Client interrupts */ + int local_intrcount; /* Controller interrupts */ + bool host_init_done; /* Controller initted */ + bool card_init_done; /* Client SDIO interface initted */ + bool polled_mode; /* polling for command completion */ + + bool sd_blockmode; /* sd_blockmode == FALSE => 64 Byte Cmd 53s. */ + /* Must be on for sd_multiblock to be effective */ + bool use_client_ints; /* If this is false, make sure to restore */ + /* polling hack in wl_linux.c:wl_timer() */ + int adapter_slot; /* Maybe dealing with multiple slots/controllers */ + int sd_mode; /* SD1/SD4/SPI */ + int client_block_size[SDIOD_MAX_IOFUNCS]; /* Blocksize */ + uint32 data_xfer_count; /* Current transfer */ + uint16 card_rca; /* Current Address */ + int8 sd_dma_mode; /* DMA Mode (PIO, SDMA, ... ADMA2) on CMD53 */ + uint8 num_funcs; /* Supported funcs on client */ + uint32 com_cis_ptr; + uint32 func_cis_ptr[SDIOD_MAX_IOFUNCS]; + void *dma_buf; /* DMA Buffer virtual address */ + ulong dma_phys; /* DMA Buffer physical address */ + void *adma2_dscr_buf; /* ADMA2 Descriptor Buffer virtual address */ + ulong adma2_dscr_phys; /* ADMA2 Descriptor Buffer physical address */ + + /* adjustments needed to make the dma align properly */ + void *dma_start_buf; + ulong dma_start_phys; + uint alloced_dma_size; + void *adma2_dscr_start_buf; + ulong adma2_dscr_start_phys; + uint alloced_adma2_dscr_size; + + int r_cnt; /* rx count */ + int t_cnt; /* tx_count */ + bool got_hcint; /* local interrupt flag */ + uint16 last_intrstatus; /* to cache intrstatus */ +}; + +#define DMA_MODE_NONE 0 +#define DMA_MODE_SDMA 1 +#define DMA_MODE_ADMA1 2 +#define DMA_MODE_ADMA2 3 +#define DMA_MODE_ADMA2_64 4 +#define DMA_MODE_AUTO -1 + +#define USE_DMA(sd) ((bool)((sd->sd_dma_mode > 0) ? TRUE : FALSE)) + +/* SDIO Host Control Register DMA Mode Definitions */ +#define SDIOH_SDMA_MODE 0 +#define SDIOH_ADMA1_MODE 1 +#define SDIOH_ADMA2_MODE 2 +#define SDIOH_ADMA2_64_MODE 3 + +#define ADMA2_ATTRIBUTE_VALID (1 << 0) /* ADMA Descriptor line valid */ +#define ADMA2_ATTRIBUTE_END (1 << 1) /* End of Descriptor */ +#define ADMA2_ATTRIBUTE_INT (1 << 2) /* Interrupt when line is done */ +#define ADMA2_ATTRIBUTE_ACT_NOP (0 << 4) /* Skip current line, go to next. */ +#define ADMA2_ATTRIBUTE_ACT_RSV (1 << 4) /* Same as NOP */ +#define ADMA1_ATTRIBUTE_ACT_SET (1 << 4) /* ADMA1 Only - set transfer length */ +#define ADMA2_ATTRIBUTE_ACT_TRAN (2 << 4) /* Transfer Data of one descriptor line. */ +#define ADMA2_ATTRIBUTE_ACT_LINK (3 << 4) /* Link Descriptor */ + +/* ADMA2 Descriptor Table Entry for 32-bit Address */ +typedef struct adma2_dscr_32b { + uint32 len_attr; + uint32 phys_addr; +} adma2_dscr_32b_t; + +/* ADMA1 Descriptor Table Entry */ +typedef struct adma1_dscr { + uint32 phys_addr_attr; +} adma1_dscr_t; + +/************************************************************ + * Internal interfaces: per-port references into bcmsdstd.c + */ + +/* Global message bits */ +extern uint sd_msglevel; + +/* OS-independent interrupt handler */ +extern bool check_client_intr(sdioh_info_t *sd); + +/* Core interrupt enable/disable of device interrupts */ +extern void sdstd_devintr_on(sdioh_info_t *sd); +extern void sdstd_devintr_off(sdioh_info_t *sd); + +/* Enable/disable interrupts for local controller events */ +extern void sdstd_intrs_on(sdioh_info_t *sd, uint16 norm, uint16 err); +extern void sdstd_intrs_off(sdioh_info_t *sd, uint16 norm, uint16 err); + +/* Wait for specified interrupt and error bits to be set */ +extern void sdstd_spinbits(sdioh_info_t *sd, uint16 norm, uint16 err); + + +/************************************************************** + * Internal interfaces: bcmsdstd.c references to per-port code + */ + +/* Register mapping routines */ +extern uint32 *sdstd_reg_map(osl_t *osh, int32 addr, int size); +extern void sdstd_reg_unmap(osl_t *osh, int32 addr, int size); + +/* Interrupt (de)registration routines */ +extern int sdstd_register_irq(sdioh_info_t *sd, uint irq); +extern void sdstd_free_irq(uint irq, sdioh_info_t *sd); + +/* OS-specific interrupt wrappers (atomic interrupt enable/disable) */ +extern void sdstd_lock(sdioh_info_t *sd); +extern void sdstd_unlock(sdioh_info_t *sd); + +/* OS-specific wait-for-interrupt-or-status */ +extern uint16 sdstd_waitbits(sdioh_info_t *sd, uint16 norm, uint16 err, bool yield); diff --git a/drivers/net/wireless/bcm4329/include/bcmspi.h b/drivers/net/wireless/bcm4329/include/bcmspi.h new file mode 100644 index 000000000000..2e2bc935716f --- /dev/null +++ b/drivers/net/wireless/bcm4329/include/bcmspi.h @@ -0,0 +1,36 @@ +/* + * Broadcom SPI Low-Level Hardware Driver API + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: bcmspi.h,v 13.3.10.2 2008/06/30 21:09:40 Exp $ + */ + +extern void spi_devintr_off(sdioh_info_t *sd); +extern void spi_devintr_on(sdioh_info_t *sd); +extern bool spi_start_clock(sdioh_info_t *sd, uint16 new_sd_divisor); +extern bool spi_controller_highspeed_mode(sdioh_info_t *sd, bool hsmode); +extern bool spi_check_client_intr(sdioh_info_t *sd, int *is_dev_intr); +extern bool spi_hw_attach(sdioh_info_t *sd); +extern bool spi_hw_detach(sdioh_info_t *sd); +extern void spi_sendrecv(sdioh_info_t *sd, uint8 *msg_out, uint8 *msg_in, int msglen); +extern void spi_spinbits(sdioh_info_t *sd); +extern void spi_waitbits(sdioh_info_t *sd, bool yield); diff --git a/drivers/net/wireless/bcm4329/include/bcmspibrcm.h b/drivers/net/wireless/bcm4329/include/bcmspibrcm.h new file mode 100644 index 000000000000..9dce878d11e2 --- /dev/null +++ b/drivers/net/wireless/bcm4329/include/bcmspibrcm.h @@ -0,0 +1,134 @@ +/* + * SD-SPI Protocol Conversion - BCMSDH->gSPI Translation Layer + * + * Copyright (C) 2010, Broadcom Corporation + * All Rights Reserved. + * + * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom Corporation; + * the contents of this file may not be disclosed to third parties, copied + * or duplicated in any form, in whole or in part, without the prior + * written permission of Broadcom Corporation. + * + * $Id: bcmspibrcm.h,v 1.4.4.1.4.3.6.1 2008/09/27 17:03:25 Exp $ + */ + +/* global msglevel for debug messages - bitvals come from sdiovar.h */ + +#define sd_err(x) +#define sd_trace(x) +#define sd_info(x) +#define sd_debug(x) +#define sd_data(x) +#define sd_ctrl(x) + +#define sd_log(x) + +#define SDIOH_ASSERT(exp) \ + do { if (!(exp)) \ + printf("!!!ASSERT fail: file %s lines %d", __FILE__, __LINE__); \ + } while (0) + +#define BLOCK_SIZE_F1 64 +#define BLOCK_SIZE_F2 2048 +#define BLOCK_SIZE_F3 2048 + +/* internal return code */ +#define SUCCESS 0 +#undef ERROR +#define ERROR 1 +#define ERROR_UF 2 +#define ERROR_OF 3 + +/* private bus modes */ +#define SDIOH_MODE_SPI 0 + +#define USE_BLOCKMODE 0x2 /* Block mode can be single block or multi */ +#define USE_MULTIBLOCK 0x4 + +struct sdioh_info { + uint cfg_bar; /* pci cfg address for bar */ + uint32 caps; /* cached value of capabilities reg */ + void *bar0; /* BAR0 for PCI Device */ + osl_t *osh; /* osh handler */ + void *controller; /* Pointer to SPI Controller's private data struct */ + + uint lockcount; /* nest count of spi_lock() calls */ + bool client_intr_enabled; /* interrupt connnected flag */ + bool intr_handler_valid; /* client driver interrupt handler valid */ + sdioh_cb_fn_t intr_handler; /* registered interrupt handler */ + void *intr_handler_arg; /* argument to call interrupt handler */ + bool initialized; /* card initialized */ + uint32 target_dev; /* Target device ID */ + uint32 intmask; /* Current active interrupts */ + void *sdos_info; /* Pointer to per-OS private data */ + + uint32 controller_type; /* Host controller type */ + uint8 version; /* Host Controller Spec Compliance Version */ + uint irq; /* Client irq */ + uint32 intrcount; /* Client interrupts */ + uint32 local_intrcount; /* Controller interrupts */ + bool host_init_done; /* Controller initted */ + bool card_init_done; /* Client SDIO interface initted */ + bool polled_mode; /* polling for command completion */ + + bool sd_use_dma; /* DMA on CMD53 */ + bool sd_blockmode; /* sd_blockmode == FALSE => 64 Byte Cmd 53s. */ + /* Must be on for sd_multiblock to be effective */ + bool use_client_ints; /* If this is false, make sure to restore */ + /* polling hack in wl_linux.c:wl_timer() */ + int adapter_slot; /* Maybe dealing with multiple slots/controllers */ + int sd_mode; /* SD1/SD4/SPI */ + int client_block_size[SPI_MAX_IOFUNCS]; /* Blocksize */ + uint32 data_xfer_count; /* Current transfer */ + uint16 card_rca; /* Current Address */ + uint8 num_funcs; /* Supported funcs on client */ + uint32 card_dstatus; /* 32bit device status */ + uint32 com_cis_ptr; + uint32 func_cis_ptr[SPI_MAX_IOFUNCS]; + void *dma_buf; + ulong dma_phys; + int r_cnt; /* rx count */ + int t_cnt; /* tx_count */ + uint32 wordlen; /* host processor 16/32bits */ + uint32 prev_fun; + uint32 chip; + uint32 chiprev; + bool resp_delay_all; + bool dwordmode; + + struct spierrstats_t spierrstats; +}; + +/************************************************************ + * Internal interfaces: per-port references into bcmspibrcm.c + */ + +/* Global message bits */ +extern uint sd_msglevel; + +/************************************************************** + * Internal interfaces: bcmspibrcm.c references to per-port code + */ + +/* Interrupt (de)registration routines */ +extern int spi_register_irq(sdioh_info_t *sd, uint irq); +extern void spi_free_irq(uint irq, sdioh_info_t *sd); + +/* OS-specific interrupt wrappers (atomic interrupt enable/disable) */ +extern void spi_lock(sdioh_info_t *sd); +extern void spi_unlock(sdioh_info_t *sd); + +/* Allocate/init/free per-OS private data */ +extern int spi_osinit(sdioh_info_t *sd); +extern void spi_osfree(sdioh_info_t *sd); + +#define SPI_RW_FLAG_M BITFIELD_MASK(1) /* Bit [31] - R/W Command Bit */ +#define SPI_RW_FLAG_S 31 +#define SPI_ACCESS_M BITFIELD_MASK(1) /* Bit [30] - Fixed/Incr Access */ +#define SPI_ACCESS_S 30 +#define SPI_FUNCTION_M BITFIELD_MASK(2) /* Bit [29:28] - Function Number */ +#define SPI_FUNCTION_S 28 +#define SPI_REG_ADDR_M BITFIELD_MASK(17) /* Bit [27:11] - Address */ +#define SPI_REG_ADDR_S 11 +#define SPI_LEN_M BITFIELD_MASK(11) /* Bit [10:0] - Packet length */ +#define SPI_LEN_S 0 diff --git a/drivers/net/wireless/bcm4329/include/bcmutils.h b/drivers/net/wireless/bcm4329/include/bcmutils.h new file mode 100644 index 000000000000..f85ed351d663 --- /dev/null +++ b/drivers/net/wireless/bcm4329/include/bcmutils.h @@ -0,0 +1,637 @@ +/* + * Misc useful os-independent macros and functions. + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * $Id: bcmutils.h,v 13.184.4.6.2.1.18.25 2010/04/26 06:05:24 Exp $ + */ + + +#ifndef _bcmutils_h_ +#define _bcmutils_h_ + +#ifdef __cplusplus +extern "C" { +#endif + + +#define _BCM_U 0x01 +#define _BCM_L 0x02 +#define _BCM_D 0x04 +#define _BCM_C 0x08 +#define _BCM_P 0x10 +#define _BCM_S 0x20 +#define _BCM_X 0x40 +#define _BCM_SP 0x80 + +extern const unsigned char bcm_ctype[]; +#define bcm_ismask(x) (bcm_ctype[(int)(unsigned char)(x)]) + +#define bcm_isalnum(c) ((bcm_ismask(c)&(_BCM_U|_BCM_L|_BCM_D)) != 0) +#define bcm_isalpha(c) ((bcm_ismask(c)&(_BCM_U|_BCM_L)) != 0) +#define bcm_iscntrl(c) ((bcm_ismask(c)&(_BCM_C)) != 0) +#define bcm_isdigit(c) ((bcm_ismask(c)&(_BCM_D)) != 0) +#define bcm_isgraph(c) ((bcm_ismask(c)&(_BCM_P|_BCM_U|_BCM_L|_BCM_D)) != 0) +#define bcm_islower(c) ((bcm_ismask(c)&(_BCM_L)) != 0) +#define bcm_isprint(c) ((bcm_ismask(c)&(_BCM_P|_BCM_U|_BCM_L|_BCM_D|_BCM_SP)) != 0) +#define bcm_ispunct(c) ((bcm_ismask(c)&(_BCM_P)) != 0) +#define bcm_isspace(c) ((bcm_ismask(c)&(_BCM_S)) != 0) +#define bcm_isupper(c) ((bcm_ismask(c)&(_BCM_U)) != 0) +#define bcm_isxdigit(c) ((bcm_ismask(c)&(_BCM_D|_BCM_X)) != 0) +#define bcm_tolower(c) (bcm_isupper((c)) ? ((c) + 'a' - 'A') : (c)) +#define bcm_toupper(c) (bcm_islower((c)) ? ((c) + 'A' - 'a') : (c)) + + + +struct bcmstrbuf { + char *buf; + unsigned int size; + char *origbuf; + unsigned int origsize; +}; + + +#ifdef BCMDRIVER +#include <osl.h> + +#define GPIO_PIN_NOTDEFINED 0x20 + + +#define SPINWAIT(exp, us) { \ + uint countdown = (us) + 9; \ + while ((exp) && (countdown >= 10)) {\ + OSL_DELAY(10); \ + countdown -= 10; \ + } \ +} + + + +#ifndef PKTQ_LEN_DEFAULT +#define PKTQ_LEN_DEFAULT 128 +#endif +#ifndef PKTQ_MAX_PREC +#define PKTQ_MAX_PREC 16 +#endif + +typedef struct pktq_prec { + void *head; + void *tail; + uint16 len; + uint16 max; +} pktq_prec_t; + + + +struct pktq { + uint16 num_prec; + uint16 hi_prec; + uint16 max; + uint16 len; + + struct pktq_prec q[PKTQ_MAX_PREC]; +}; + + +struct spktq { + uint16 num_prec; + uint16 hi_prec; + uint16 max; + uint16 len; + + struct pktq_prec q[1]; +}; + +#define PKTQ_PREC_ITER(pq, prec) for (prec = (pq)->num_prec - 1; prec >= 0; prec--) + + + + +struct ether_addr; + +extern int ether_isbcast(const void *ea); +extern int ether_isnulladdr(const void *ea); + + + +#define pktq_psetmax(pq, prec, _max) ((pq)->q[prec].max = (_max)) +#define pktq_plen(pq, prec) ((pq)->q[prec].len) +#define pktq_pavail(pq, prec) ((pq)->q[prec].max - (pq)->q[prec].len) +#define pktq_pfull(pq, prec) ((pq)->q[prec].len >= (pq)->q[prec].max) +#define pktq_pempty(pq, prec) ((pq)->q[prec].len == 0) + +#define pktq_ppeek(pq, prec) ((pq)->q[prec].head) +#define pktq_ppeek_tail(pq, prec) ((pq)->q[prec].tail) + +extern void *pktq_penq(struct pktq *pq, int prec, void *p); +extern void *pktq_penq_head(struct pktq *pq, int prec, void *p); +extern void *pktq_pdeq(struct pktq *pq, int prec); +extern void *pktq_pdeq_tail(struct pktq *pq, int prec); + +extern bool pktq_pdel(struct pktq *pq, void *p, int prec); + + +extern void pktq_pflush(osl_t *osh, struct pktq *pq, int prec, bool dir); + +extern void pktq_flush(osl_t *osh, struct pktq *pq, bool dir); + + + +extern int pktq_mlen(struct pktq *pq, uint prec_bmp); +extern void *pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out); + + + +#define pktq_len(pq) ((int)(pq)->len) +#define pktq_max(pq) ((int)(pq)->max) +#define pktq_avail(pq) ((int)((pq)->max - (pq)->len)) +#define pktq_full(pq) ((pq)->len >= (pq)->max) +#define pktq_empty(pq) ((pq)->len == 0) + + +#define pktenq(pq, p) pktq_penq(((struct pktq *)pq), 0, (p)) +#define pktenq_head(pq, p) pktq_penq_head(((struct pktq *)pq), 0, (p)) +#define pktdeq(pq) pktq_pdeq(((struct pktq *)pq), 0) +#define pktdeq_tail(pq) pktq_pdeq_tail(((struct pktq *)pq), 0) +#define pktqinit(pq, len) pktq_init(((struct pktq *)pq), 1, len) + +extern void pktq_init(struct pktq *pq, int num_prec, int max_len); + +extern void *pktq_deq(struct pktq *pq, int *prec_out); +extern void *pktq_deq_tail(struct pktq *pq, int *prec_out); +extern void *pktq_peek(struct pktq *pq, int *prec_out); +extern void *pktq_peek_tail(struct pktq *pq, int *prec_out); + + + +extern uint pktcopy(osl_t *osh, void *p, uint offset, int len, uchar *buf); +extern uint pktfrombuf(osl_t *osh, void *p, uint offset, int len, uchar *buf); +extern uint pkttotlen(osl_t *osh, void *p); +extern void *pktlast(osl_t *osh, void *p); +extern uint pktsegcnt(osl_t *osh, void *p); + + +extern uint pktsetprio(void *pkt, bool update_vtag); +#define PKTPRIO_VDSCP 0x100 +#define PKTPRIO_VLAN 0x200 +#define PKTPRIO_UPD 0x400 +#define PKTPRIO_DSCP 0x800 + + +extern int bcm_atoi(char *s); +extern ulong bcm_strtoul(char *cp, char **endp, uint base); +extern char *bcmstrstr(char *haystack, char *needle); +extern char *bcmstrcat(char *dest, const char *src); +extern char *bcmstrncat(char *dest, const char *src, uint size); +extern ulong wchar2ascii(char *abuf, ushort *wbuf, ushort wbuflen, ulong abuflen); +char* bcmstrtok(char **string, const char *delimiters, char *tokdelim); +int bcmstricmp(const char *s1, const char *s2); +int bcmstrnicmp(const char* s1, const char* s2, int cnt); + + + +extern char *bcm_ether_ntoa(const struct ether_addr *ea, char *buf); +extern int bcm_ether_atoe(char *p, struct ether_addr *ea); + + +struct ipv4_addr; +extern char *bcm_ip_ntoa(struct ipv4_addr *ia, char *buf); + + +extern void bcm_mdelay(uint ms); + +extern char *getvar(char *vars, const char *name); +extern int getintvar(char *vars, const char *name); +extern uint getgpiopin(char *vars, char *pin_name, uint def_pin); +#define bcm_perf_enable() +#define bcmstats(fmt) +#define bcmlog(fmt, a1, a2) +#define bcmdumplog(buf, size) *buf = '\0' +#define bcmdumplogent(buf, idx) -1 + +#define bcmtslog(tstamp, fmt, a1, a2) +#define bcmprinttslogs() +#define bcmprinttstamp(us) + + + + +typedef struct bcm_iovar { + const char *name; + uint16 varid; + uint16 flags; + uint16 type; + uint16 minlen; +} bcm_iovar_t; + + + + +#define IOV_GET 0 +#define IOV_SET 1 + + +#define IOV_GVAL(id) ((id)*2) +#define IOV_SVAL(id) (((id)*2)+IOV_SET) +#define IOV_ISSET(actionid) ((actionid & IOV_SET) == IOV_SET) + + + +extern const bcm_iovar_t *bcm_iovar_lookup(const bcm_iovar_t *table, const char *name); +extern int bcm_iovar_lencheck(const bcm_iovar_t *table, void *arg, int len, bool set); + +#endif + + +#define IOVT_VOID 0 +#define IOVT_BOOL 1 +#define IOVT_INT8 2 +#define IOVT_UINT8 3 +#define IOVT_INT16 4 +#define IOVT_UINT16 5 +#define IOVT_INT32 6 +#define IOVT_UINT32 7 +#define IOVT_BUFFER 8 +#define BCM_IOVT_VALID(type) (((unsigned int)(type)) <= IOVT_BUFFER) + + +#define BCM_IOV_TYPE_INIT { \ + "void", \ + "bool", \ + "int8", \ + "uint8", \ + "int16", \ + "uint16", \ + "int32", \ + "uint32", \ + "buffer", \ + "" } + +#define BCM_IOVT_IS_INT(type) (\ + (type == IOVT_BOOL) || \ + (type == IOVT_INT8) || \ + (type == IOVT_UINT8) || \ + (type == IOVT_INT16) || \ + (type == IOVT_UINT16) || \ + (type == IOVT_INT32) || \ + (type == IOVT_UINT32)) + + + +#define BCME_STRLEN 64 +#define VALID_BCMERROR(e) ((e <= 0) && (e >= BCME_LAST)) + + + + +#define BCME_OK 0 +#define BCME_ERROR -1 +#define BCME_BADARG -2 +#define BCME_BADOPTION -3 +#define BCME_NOTUP -4 +#define BCME_NOTDOWN -5 +#define BCME_NOTAP -6 +#define BCME_NOTSTA -7 +#define BCME_BADKEYIDX -8 +#define BCME_RADIOOFF -9 +#define BCME_NOTBANDLOCKED -10 +#define BCME_NOCLK -11 +#define BCME_BADRATESET -12 +#define BCME_BADBAND -13 +#define BCME_BUFTOOSHORT -14 +#define BCME_BUFTOOLONG -15 +#define BCME_BUSY -16 +#define BCME_NOTASSOCIATED -17 +#define BCME_BADSSIDLEN -18 +#define BCME_OUTOFRANGECHAN -19 +#define BCME_BADCHAN -20 +#define BCME_BADADDR -21 +#define BCME_NORESOURCE -22 +#define BCME_UNSUPPORTED -23 +#define BCME_BADLEN -24 +#define BCME_NOTREADY -25 +#define BCME_EPERM -26 +#define BCME_NOMEM -27 +#define BCME_ASSOCIATED -28 +#define BCME_RANGE -29 +#define BCME_NOTFOUND -30 +#define BCME_WME_NOT_ENABLED -31 +#define BCME_TSPEC_NOTFOUND -32 +#define BCME_ACM_NOTSUPPORTED -33 +#define BCME_NOT_WME_ASSOCIATION -34 +#define BCME_SDIO_ERROR -35 +#define BCME_DONGLE_DOWN -36 +#define BCME_VERSION -37 +#define BCME_TXFAIL -38 +#define BCME_RXFAIL -39 +#define BCME_NODEVICE -40 +#define BCME_UNFINISHED -41 +#define BCME_LAST BCME_UNFINISHED + + +#define BCMERRSTRINGTABLE { \ + "OK", \ + "Undefined error", \ + "Bad Argument", \ + "Bad Option", \ + "Not up", \ + "Not down", \ + "Not AP", \ + "Not STA", \ + "Bad Key Index", \ + "Radio Off", \ + "Not band locked", \ + "No clock", \ + "Bad Rate valueset", \ + "Bad Band", \ + "Buffer too short", \ + "Buffer too long", \ + "Busy", \ + "Not Associated", \ + "Bad SSID len", \ + "Out of Range Channel", \ + "Bad Channel", \ + "Bad Address", \ + "Not Enough Resources", \ + "Unsupported", \ + "Bad length", \ + "Not Ready", \ + "Not Permitted", \ + "No Memory", \ + "Associated", \ + "Not In Range", \ + "Not Found", \ + "WME Not Enabled", \ + "TSPEC Not Found", \ + "ACM Not Supported", \ + "Not WME Association", \ + "SDIO Bus Error", \ + "Dongle Not Accessible", \ + "Incorrect version", \ + "TX Failure", \ + "RX Failure", \ + "Device Not Present", \ + "Command not finished", \ +} + +#ifndef ABS +#define ABS(a) (((a) < 0)?-(a):(a)) +#endif + +#ifndef MIN +#define MIN(a, b) (((a) < (b))?(a):(b)) +#endif + +#ifndef MAX +#define MAX(a, b) (((a) > (b))?(a):(b)) +#endif + +#define CEIL(x, y) (((x) + ((y)-1)) / (y)) +#define ROUNDUP(x, y) ((((x)+((y)-1))/(y))*(y)) +#define ISALIGNED(a, x) (((a) & ((x)-1)) == 0) +#define ALIGN_ADDR(addr, boundary) (void *)(((uintptr)(addr) + (boundary) - 1) \ + & ~((boundary) - 1)) +#define ISPOWEROF2(x) ((((x)-1)&(x)) == 0) +#define VALID_MASK(mask) !((mask) & ((mask) + 1)) +#ifndef OFFSETOF +#define OFFSETOF(type, member) ((uint)(uintptr)&((type *)0)->member) +#endif +#ifndef ARRAYSIZE +#define ARRAYSIZE(a) (sizeof(a)/sizeof(a[0])) +#endif + + +#ifndef setbit +#ifndef NBBY +#define NBBY 8 +#endif +#define setbit(a, i) (((uint8 *)a)[(i)/NBBY] |= 1<<((i)%NBBY)) +#define clrbit(a, i) (((uint8 *)a)[(i)/NBBY] &= ~(1<<((i)%NBBY))) +#define isset(a, i) (((const uint8 *)a)[(i)/NBBY] & (1<<((i)%NBBY))) +#define isclr(a, i) ((((const uint8 *)a)[(i)/NBBY] & (1<<((i)%NBBY))) == 0) +#endif + +#define NBITS(type) (sizeof(type) * 8) +#define NBITVAL(nbits) (1 << (nbits)) +#define MAXBITVAL(nbits) ((1 << (nbits)) - 1) +#define NBITMASK(nbits) MAXBITVAL(nbits) +#define MAXNBVAL(nbyte) MAXBITVAL((nbyte) * 8) + + +#define MUX(pred, true, false) ((pred) ? (true) : (false)) + + +#define MODDEC(x, bound) MUX((x) == 0, (bound) - 1, (x) - 1) +#define MODINC(x, bound) MUX((x) == (bound) - 1, 0, (x) + 1) + + +#define MODDEC_POW2(x, bound) (((x) - 1) & ((bound) - 1)) +#define MODINC_POW2(x, bound) (((x) + 1) & ((bound) - 1)) + + +#define MODADD(x, y, bound) \ + MUX((x) + (y) >= (bound), (x) + (y) - (bound), (x) + (y)) +#define MODSUB(x, y, bound) \ + MUX(((int)(x)) - ((int)(y)) < 0, (x) - (y) + (bound), (x) - (y)) + + +#define MODADD_POW2(x, y, bound) (((x) + (y)) & ((bound) - 1)) +#define MODSUB_POW2(x, y, bound) (((x) - (y)) & ((bound) - 1)) + + +#define CRC8_INIT_VALUE 0xff +#define CRC8_GOOD_VALUE 0x9f +#define CRC16_INIT_VALUE 0xffff +#define CRC16_GOOD_VALUE 0xf0b8 +#define CRC32_INIT_VALUE 0xffffffff +#define CRC32_GOOD_VALUE 0xdebb20e3 + + +typedef struct bcm_bit_desc { + uint32 bit; + const char* name; +} bcm_bit_desc_t; + + +typedef struct bcm_tlv { + uint8 id; + uint8 len; + uint8 data[1]; +} bcm_tlv_t; + + +#define bcm_valid_tlv(elt, buflen) ((buflen) >= 2 && (int)(buflen) >= (int)(2 + (elt)->len)) + + +#define ETHER_ADDR_STR_LEN 18 + + +#ifdef IL_BIGENDIAN +static INLINE uint32 +load32_ua(uint8 *a) +{ + return ((a[0] << 24) | (a[1] << 16) | (a[2] << 8) | a[3]); +} + +static INLINE void +store32_ua(uint8 *a, uint32 v) +{ + a[0] = (v >> 24) & 0xff; + a[1] = (v >> 16) & 0xff; + a[2] = (v >> 8) & 0xff; + a[3] = v & 0xff; +} + +static INLINE uint16 +load16_ua(uint8 *a) +{ + return ((a[0] << 8) | a[1]); +} + +static INLINE void +store16_ua(uint8 *a, uint16 v) +{ + a[0] = (v >> 8) & 0xff; + a[1] = v & 0xff; +} + +#else + +static INLINE uint32 +load32_ua(uint8 *a) +{ + return ((a[3] << 24) | (a[2] << 16) | (a[1] << 8) | a[0]); +} + +static INLINE void +store32_ua(uint8 *a, uint32 v) +{ + a[3] = (v >> 24) & 0xff; + a[2] = (v >> 16) & 0xff; + a[1] = (v >> 8) & 0xff; + a[0] = v & 0xff; +} + +static INLINE uint16 +load16_ua(uint8 *a) +{ + return ((a[1] << 8) | a[0]); +} + +static INLINE void +store16_ua(uint8 *a, uint16 v) +{ + a[1] = (v >> 8) & 0xff; + a[0] = v & 0xff; +} + +#endif + + + +static INLINE void +xor_128bit_block(const uint8 *src1, const uint8 *src2, uint8 *dst) +{ + if ( +#ifdef __i386__ + 1 || +#endif + (((uintptr)src1 | (uintptr)src2 | (uintptr)dst) & 3) == 0) { + + + ((uint32 *)dst)[0] = ((uint32 *)src1)[0] ^ ((uint32 *)src2)[0]; + ((uint32 *)dst)[1] = ((uint32 *)src1)[1] ^ ((uint32 *)src2)[1]; + ((uint32 *)dst)[2] = ((uint32 *)src1)[2] ^ ((uint32 *)src2)[2]; + ((uint32 *)dst)[3] = ((uint32 *)src1)[3] ^ ((uint32 *)src2)[3]; + } else { + + int k; + for (k = 0; k < 16; k++) + dst[k] = src1[k] ^ src2[k]; + } +} + + + +extern uint8 hndcrc8(uint8 *p, uint nbytes, uint8 crc); +extern uint16 hndcrc16(uint8 *p, uint nbytes, uint16 crc); +extern uint32 hndcrc32(uint8 *p, uint nbytes, uint32 crc); + +#if defined(DHD_DEBUG) || defined(WLMSG_PRHDRS) || defined(WLMSG_PRPKT) || \ + defined(WLMSG_ASSOC) +extern int bcm_format_flags(const bcm_bit_desc_t *bd, uint32 flags, char* buf, int len); +extern int bcm_format_hex(char *str, const void *bytes, int len); +extern void prhex(const char *msg, uchar *buf, uint len); +#endif +extern char *bcm_brev_str(uint32 brev, char *buf); +extern void printbig(char *buf); + + +extern bcm_tlv_t *bcm_next_tlv(bcm_tlv_t *elt, int *buflen); +extern bcm_tlv_t *bcm_parse_tlvs(void *buf, int buflen, uint key); +extern bcm_tlv_t *bcm_parse_ordered_tlvs(void *buf, int buflen, uint key); + + +extern const char *bcmerrorstr(int bcmerror); + + +typedef uint32 mbool; +#define mboolset(mb, bit) ((mb) |= (bit)) +#define mboolclr(mb, bit) ((mb) &= ~(bit)) +#define mboolisset(mb, bit) (((mb) & (bit)) != 0) +#define mboolmaskset(mb, mask, val) ((mb) = (((mb) & ~(mask)) | (val))) + + +extern uint16 bcm_qdbm_to_mw(uint8 qdbm); +extern uint8 bcm_mw_to_qdbm(uint16 mw); + + +struct fielddesc { + const char *nameandfmt; + uint32 offset; + uint32 len; +}; + +extern void bcm_binit(struct bcmstrbuf *b, char *buf, uint size); +extern int bcm_bprintf(struct bcmstrbuf *b, const char *fmt, ...); +extern void bcm_inc_bytes(uchar *num, int num_bytes, uint8 amount); +extern int bcm_cmp_bytes(uchar *arg1, uchar *arg2, uint8 nbytes); +extern void bcm_print_bytes(char *name, const uchar *cdata, int len); + +typedef uint32 (*bcmutl_rdreg_rtn)(void *arg0, uint arg1, uint32 offset); +extern uint bcmdumpfields(bcmutl_rdreg_rtn func_ptr, void *arg0, uint arg1, struct fielddesc *str, + char *buf, uint32 bufsize); + +extern uint bcm_mkiovar(char *name, char *data, uint datalen, char *buf, uint len); +extern uint bcm_bitcount(uint8 *bitmap, uint bytelength); + +#if defined(WLTINYDUMP) || defined(WLMSG_INFORM) || defined(WLMSG_ASSOC) || \ + defined(WLMSG_PRPKT) || defined(WLMSG_WSEC) +extern int bcm_format_ssid(char* buf, const uchar ssid[], uint ssid_len); +#endif + + +#define SSID_FMT_BUF_LEN ((4 * DOT11_MAX_SSID_LEN) + 1) + +#ifdef __cplusplus + } +#endif + +#endif diff --git a/drivers/net/wireless/bcm4329/include/bcmwifi.h b/drivers/net/wireless/bcm4329/include/bcmwifi.h new file mode 100644 index 000000000000..038aedcdb3c8 --- /dev/null +++ b/drivers/net/wireless/bcm4329/include/bcmwifi.h @@ -0,0 +1,154 @@ +/* + * Misc utility routines for WL and Apps + * This header file housing the define and function prototype use by + * both the wl driver, tools & Apps. + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * $Id: bcmwifi.h,v 1.15.30.4 2010/03/10 20:10:52 Exp $ + */ + + +#ifndef _bcmwifi_h_ +#define _bcmwifi_h_ + + + +typedef uint16 chanspec_t; + + +#define CH_UPPER_SB 0x01 +#define CH_LOWER_SB 0x02 +#define CH_EWA_VALID 0x04 +#define CH_20MHZ_APART 4 +#define CH_10MHZ_APART 2 +#define CH_5MHZ_APART 1 +#define CH_MAX_2G_CHANNEL 14 +#define WLC_MAX_2G_CHANNEL CH_MAX_2G_CHANNEL +#define MAXCHANNEL 224 + +#define WL_CHANSPEC_CHAN_MASK 0x00ff +#define WL_CHANSPEC_CHAN_SHIFT 0 + +#define WL_CHANSPEC_CTL_SB_MASK 0x0300 +#define WL_CHANSPEC_CTL_SB_SHIFT 8 +#define WL_CHANSPEC_CTL_SB_LOWER 0x0100 +#define WL_CHANSPEC_CTL_SB_UPPER 0x0200 +#define WL_CHANSPEC_CTL_SB_NONE 0x0300 + +#define WL_CHANSPEC_BW_MASK 0x0C00 +#define WL_CHANSPEC_BW_SHIFT 10 +#define WL_CHANSPEC_BW_10 0x0400 +#define WL_CHANSPEC_BW_20 0x0800 +#define WL_CHANSPEC_BW_40 0x0C00 + +#define WL_CHANSPEC_BAND_MASK 0xf000 +#define WL_CHANSPEC_BAND_SHIFT 12 +#define WL_CHANSPEC_BAND_5G 0x1000 +#define WL_CHANSPEC_BAND_2G 0x2000 +#define INVCHANSPEC 255 + + +#define WF_CHAN_FACTOR_2_4_G 4814 +#define WF_CHAN_FACTOR_5_G 10000 +#define WF_CHAN_FACTOR_4_G 8000 + + +#define LOWER_20_SB(channel) ((channel > CH_10MHZ_APART) ? (channel - CH_10MHZ_APART) : 0) +#define UPPER_20_SB(channel) ((channel < (MAXCHANNEL - CH_10MHZ_APART)) ? \ + (channel + CH_10MHZ_APART) : 0) +#define CHSPEC_WLCBANDUNIT(chspec) (CHSPEC_IS5G(chspec) ? BAND_5G_INDEX : BAND_2G_INDEX) +#define CH20MHZ_CHSPEC(channel) (chanspec_t)((chanspec_t)(channel) | WL_CHANSPEC_BW_20 | \ + WL_CHANSPEC_CTL_SB_NONE | (((channel) <= CH_MAX_2G_CHANNEL) ? \ + WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G)) +#define NEXT_20MHZ_CHAN(channel) ((channel < (MAXCHANNEL - CH_20MHZ_APART)) ? \ + (channel + CH_20MHZ_APART) : 0) +#define CH40MHZ_CHSPEC(channel, ctlsb) (chanspec_t) \ + ((channel) | (ctlsb) | WL_CHANSPEC_BW_40 | \ + ((channel) <= CH_MAX_2G_CHANNEL ? WL_CHANSPEC_BAND_2G : \ + WL_CHANSPEC_BAND_5G)) +#define CHSPEC_CHANNEL(chspec) ((uint8)(chspec & WL_CHANSPEC_CHAN_MASK)) +#define CHSPEC_BAND(chspec) (chspec & WL_CHANSPEC_BAND_MASK) + +#ifdef WL20MHZ_ONLY + +#define CHSPEC_CTL_SB(chspec) WL_CHANSPEC_CTL_SB_NONE +#define CHSPEC_BW(chspec) WL_CHANSPEC_BW_20 +#define CHSPEC_IS10(chspec) 0 +#define CHSPEC_IS20(chspec) 1 +#ifndef CHSPEC_IS40 +#define CHSPEC_IS40(chspec) 0 +#endif + +#else + +#define CHSPEC_CTL_SB(chspec) (chspec & WL_CHANSPEC_CTL_SB_MASK) +#define CHSPEC_BW(chspec) (chspec & WL_CHANSPEC_BW_MASK) +#define CHSPEC_IS10(chspec) ((chspec & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_10) +#define CHSPEC_IS20(chspec) ((chspec & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_20) +#ifndef CHSPEC_IS40 +#define CHSPEC_IS40(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_40) +#endif + +#endif + +#define CHSPEC_IS5G(chspec) ((chspec & WL_CHANSPEC_BAND_MASK) == WL_CHANSPEC_BAND_5G) +#define CHSPEC_IS2G(chspec) ((chspec & WL_CHANSPEC_BAND_MASK) == WL_CHANSPEC_BAND_2G) +#define CHSPEC_SB_NONE(chspec) ((chspec & WL_CHANSPEC_CTL_SB_MASK) == WL_CHANSPEC_CTL_SB_NONE) +#define CHSPEC_SB_UPPER(chspec) ((chspec & WL_CHANSPEC_CTL_SB_MASK) == WL_CHANSPEC_CTL_SB_UPPER) +#define CHSPEC_SB_LOWER(chspec) ((chspec & WL_CHANSPEC_CTL_SB_MASK) == WL_CHANSPEC_CTL_SB_LOWER) +#define CHSPEC_CTL_CHAN(chspec) ((CHSPEC_SB_LOWER(chspec)) ? \ + (LOWER_20_SB(((chspec) & WL_CHANSPEC_CHAN_MASK))) : \ + (UPPER_20_SB(((chspec) & WL_CHANSPEC_CHAN_MASK)))) + +#define CHSPEC2WLC_BAND(chspec) (CHSPEC_IS5G((chspec))? WLC_BAND_5G: WLC_BAND_2G) + +#define CHANSPEC_STR_LEN 8 + + +#define WLC_MAXRATE 108 +#define WLC_RATE_1M 2 +#define WLC_RATE_2M 4 +#define WLC_RATE_5M5 11 +#define WLC_RATE_11M 22 +#define WLC_RATE_6M 12 +#define WLC_RATE_9M 18 +#define WLC_RATE_12M 24 +#define WLC_RATE_18M 36 +#define WLC_RATE_24M 48 +#define WLC_RATE_36M 72 +#define WLC_RATE_48M 96 +#define WLC_RATE_54M 108 + +#define WLC_2G_25MHZ_OFFSET 5 + + +extern char * wf_chspec_ntoa(chanspec_t chspec, char *buf); + + +extern chanspec_t wf_chspec_aton(char *a); + + +extern int wf_mhz2channel(uint freq, uint start_factor); + + +extern int wf_channel2mhz(uint channel, uint start_factor); + +#endif diff --git a/drivers/net/wireless/bcm4329/include/dhdioctl.h b/drivers/net/wireless/bcm4329/include/dhdioctl.h new file mode 100644 index 000000000000..980a14301003 --- /dev/null +++ b/drivers/net/wireless/bcm4329/include/dhdioctl.h @@ -0,0 +1,123 @@ +/* + * Definitions for ioctls to access DHD iovars. + * Based on wlioctl.h (for Broadcom 802.11abg driver). + * (Moves towards generic ioctls for BCM drivers/iovars.) + * + * Definitions subject to change without notice. + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: dhdioctl.h,v 13.7.8.1.4.1.16.5 2010/05/21 21:49:38 Exp $ + */ + +#ifndef _dhdioctl_h_ +#define _dhdioctl_h_ + +#include <typedefs.h> + + +/* require default structure packing */ +#define BWL_DEFAULT_PACKING +#include <packed_section_start.h> + + +/* Linux network driver ioctl encoding */ +typedef struct dhd_ioctl { + uint cmd; /* common ioctl definition */ + void *buf; /* pointer to user buffer */ + uint len; /* length of user buffer */ + bool set; /* get or set request (optional) */ + uint used; /* bytes read or written (optional) */ + uint needed; /* bytes needed (optional) */ + uint driver; /* to identify target driver */ +} dhd_ioctl_t; + +/* per-driver magic numbers */ +#define DHD_IOCTL_MAGIC 0x00444944 + +/* bump this number if you change the ioctl interface */ +#define DHD_IOCTL_VERSION 1 + +#define DHD_IOCTL_MAXLEN 8192 /* max length ioctl buffer required */ +#define DHD_IOCTL_SMLEN 256 /* "small" length ioctl buffer required */ + +/* common ioctl definitions */ +#define DHD_GET_MAGIC 0 +#define DHD_GET_VERSION 1 +#define DHD_GET_VAR 2 +#define DHD_SET_VAR 3 + +/* message levels */ +#define DHD_ERROR_VAL 0x0001 +#define DHD_TRACE_VAL 0x0002 +#define DHD_INFO_VAL 0x0004 +#define DHD_DATA_VAL 0x0008 +#define DHD_CTL_VAL 0x0010 +#define DHD_TIMER_VAL 0x0020 +#define DHD_HDRS_VAL 0x0040 +#define DHD_BYTES_VAL 0x0080 +#define DHD_INTR_VAL 0x0100 +#define DHD_LOG_VAL 0x0200 +#define DHD_GLOM_VAL 0x0400 +#define DHD_EVENT_VAL 0x0800 +#define DHD_BTA_VAL 0x1000 +#define DHD_ISCAN_VAL 0x2000 + +#ifdef SDTEST +/* For pktgen iovar */ +typedef struct dhd_pktgen { + uint version; /* To allow structure change tracking */ + uint freq; /* Max ticks between tx/rx attempts */ + uint count; /* Test packets to send/rcv each attempt */ + uint print; /* Print counts every <print> attempts */ + uint total; /* Total packets (or bursts) */ + uint minlen; /* Minimum length of packets to send */ + uint maxlen; /* Maximum length of packets to send */ + uint numsent; /* Count of test packets sent */ + uint numrcvd; /* Count of test packets received */ + uint numfail; /* Count of test send failures */ + uint mode; /* Test mode (type of test packets) */ + uint stop; /* Stop after this many tx failures */ +} dhd_pktgen_t; + +/* Version in case structure changes */ +#define DHD_PKTGEN_VERSION 2 + +/* Type of test packets to use */ +#define DHD_PKTGEN_ECHO 1 /* Send echo requests */ +#define DHD_PKTGEN_SEND 2 /* Send discard packets */ +#define DHD_PKTGEN_RXBURST 3 /* Request dongle send N packets */ +#define DHD_PKTGEN_RECV 4 /* Continuous rx from continuous tx dongle */ +#endif /* SDTEST */ + +/* Enter idle immediately (no timeout) */ +#define DHD_IDLE_IMMEDIATE (-1) + +/* Values for idleclock iovar: other values are the sd_divisor to use when idle */ +#define DHD_IDLE_ACTIVE 0 /* Do not request any SD clock change when idle */ +#define DHD_IDLE_STOP (-1) /* Request SD clock be stopped (and use SD1 mode) */ + + +/* require default structure packing */ +#include <packed_section_end.h> + + +#endif /* _dhdioctl_h_ */ diff --git a/drivers/net/wireless/bcm4329/include/epivers.h b/drivers/net/wireless/bcm4329/include/epivers.h new file mode 100644 index 000000000000..92dc32635a25 --- /dev/null +++ b/drivers/net/wireless/bcm4329/include/epivers.h @@ -0,0 +1,48 @@ +/* + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: epivers.h.in,v 13.25 2005/10/28 18:35:33 Exp $ + * +*/ + + +#ifndef _epivers_h_ +#define _epivers_h_ + +#define EPI_MAJOR_VERSION 4 + +#define EPI_MINOR_VERSION 218 + +#define EPI_RC_NUMBER 248 + +#define EPI_INCREMENTAL_NUMBER 6 + +#define EPI_BUILD_NUMBER 0 + +#define EPI_VERSION 4, 218, 248, 6 + +#define EPI_VERSION_NUM 0x04daf806 + + +#define EPI_VERSION_STR "4.218.248.6" +#define EPI_ROUTER_VERSION_STR "4.219.248.6" + +#endif diff --git a/drivers/net/wireless/bcm4329/include/hndpmu.h b/drivers/net/wireless/bcm4329/include/hndpmu.h new file mode 100644 index 000000000000..e829b3df2d0b --- /dev/null +++ b/drivers/net/wireless/bcm4329/include/hndpmu.h @@ -0,0 +1,34 @@ +/* + * HND SiliconBackplane PMU support. + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: hndpmu.h,v 13.14.4.3.4.3.8.7 2010/04/09 13:20:51 Exp $ + */ + +#ifndef _hndpmu_h_ +#define _hndpmu_h_ + + +extern void si_pmu_otp_power(si_t *sih, osl_t *osh, bool on); +extern void si_sdiod_drive_strength_init(si_t *sih, osl_t *osh, uint32 drivestrength); + +#endif /* _hndpmu_h_ */ diff --git a/drivers/net/wireless/bcm4329/include/hndrte_armtrap.h b/drivers/net/wireless/bcm4329/include/hndrte_armtrap.h new file mode 100644 index 000000000000..ca3281b6d901 --- /dev/null +++ b/drivers/net/wireless/bcm4329/include/hndrte_armtrap.h @@ -0,0 +1,88 @@ +/* + * HNDRTE arm trap handling. + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: hndrte_armtrap.h,v 13.3.196.2 2010/07/15 19:06:11 Exp $ + */ + +#ifndef _hndrte_armtrap_h +#define _hndrte_armtrap_h + + +/* ARM trap handling */ + +/* Trap types defined by ARM (see arminc.h) */ + +/* Trap locations in lo memory */ +#define TRAP_STRIDE 4 +#define FIRST_TRAP TR_RST +#define LAST_TRAP (TR_FIQ * TRAP_STRIDE) + +#if defined(__ARM_ARCH_4T__) +#define MAX_TRAP_TYPE (TR_FIQ + 1) +#elif defined(__ARM_ARCH_7M__) +#define MAX_TRAP_TYPE (TR_ISR + ARMCM3_NUMINTS) +#endif /* __ARM_ARCH_7M__ */ + +/* The trap structure is defined here as offsets for assembly */ +#define TR_TYPE 0x00 +#define TR_EPC 0x04 +#define TR_CPSR 0x08 +#define TR_SPSR 0x0c +#define TR_REGS 0x10 +#define TR_REG(n) (TR_REGS + (n) * 4) +#define TR_SP TR_REG(13) +#define TR_LR TR_REG(14) +#define TR_PC TR_REG(15) + +#define TRAP_T_SIZE 80 + +#ifndef _LANGUAGE_ASSEMBLY + +#include <typedefs.h> + +typedef struct _trap_struct { + uint32 type; + uint32 epc; + uint32 cpsr; + uint32 spsr; + uint32 r0; + uint32 r1; + uint32 r2; + uint32 r3; + uint32 r4; + uint32 r5; + uint32 r6; + uint32 r7; + uint32 r8; + uint32 r9; + uint32 r10; + uint32 r11; + uint32 r12; + uint32 r13; + uint32 r14; + uint32 pc; +} trap_t; + +#endif /* !_LANGUAGE_ASSEMBLY */ + +#endif /* _hndrte_armtrap_h */ diff --git a/drivers/net/wireless/bcm4329/include/hndrte_cons.h b/drivers/net/wireless/bcm4329/include/hndrte_cons.h new file mode 100644 index 000000000000..a42417478a16 --- /dev/null +++ b/drivers/net/wireless/bcm4329/include/hndrte_cons.h @@ -0,0 +1,63 @@ +/* + * Console support for hndrte. + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: hndrte_cons.h,v 13.1.2.4 2010/07/15 19:06:11 Exp $ + */ + +#include <typedefs.h> + +#define CBUF_LEN (128) + +#define LOG_BUF_LEN 1024 + +typedef struct { + uint32 buf; /* Can't be pointer on (64-bit) hosts */ + uint buf_size; + uint idx; + char *_buf_compat; /* Redundant pointer for backward compat. */ +} hndrte_log_t; + +typedef struct { + /* Virtual UART + * When there is no UART (e.g. Quickturn), the host should write a complete + * input line directly into cbuf and then write the length into vcons_in. + * This may also be used when there is a real UART (at risk of conflicting with + * the real UART). vcons_out is currently unused. + */ + volatile uint vcons_in; + volatile uint vcons_out; + + /* Output (logging) buffer + * Console output is written to a ring buffer log_buf at index log_idx. + * The host may read the output when it sees log_idx advance. + * Output will be lost if the output wraps around faster than the host polls. + */ + hndrte_log_t log; + + /* Console input line buffer + * Characters are read one at a time into cbuf until <CR> is received, then + * the buffer is processed as a command line. Also used for virtual UART. + */ + uint cbuf_idx; + char cbuf[CBUF_LEN]; +} hndrte_cons_t; diff --git a/drivers/net/wireless/bcm4329/include/hndsoc.h b/drivers/net/wireless/bcm4329/include/hndsoc.h new file mode 100644 index 000000000000..35424175f55e --- /dev/null +++ b/drivers/net/wireless/bcm4329/include/hndsoc.h @@ -0,0 +1,195 @@ +/* + * Broadcom HND chip & on-chip-interconnect-related definitions. + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: hndsoc.h,v 13.3.10.3 2008/08/06 03:43:25 Exp $ + */ + +#ifndef _HNDSOC_H +#define _HNDSOC_H + +/* Include the soci specific files */ +#include <sbconfig.h> +#include <aidmp.h> + +/* + * SOC Interconnect Address Map. + * All regions may not exist on all chips. + */ +#define SI_SDRAM_BASE 0x00000000 /* Physical SDRAM */ +#define SI_PCI_MEM 0x08000000 /* Host Mode sb2pcitranslation0 (64 MB) */ +#define SI_PCI_MEM_SZ (64 * 1024 * 1024) +#define SI_PCI_CFG 0x0c000000 /* Host Mode sb2pcitranslation1 (64 MB) */ +#define SI_SDRAM_SWAPPED 0x10000000 /* Byteswapped Physical SDRAM */ + +#define SI_ENUM_BASE 0x18000000 /* Enumeration space base */ +#define SI_CORE_SIZE 0x1000 /* each core gets 4Kbytes for registers */ +#ifndef SI_MAXCORES +#define SI_MAXCORES 16 /* Max cores (this is arbitrary, for software + * convenience and could be changed if we + * make any larger chips + */ +#endif + +#define SI_FASTRAM 0x19000000 /* On-chip RAM on chips that also have DDR */ + +#define SI_FLASH2 0x1c000000 /* Flash Region 2 (region 1 shadowed here) */ +#define SI_FLASH2_SZ 0x02000000 /* Size of Flash Region 2 */ +#define SI_ARMCM3_ROM 0x1e000000 /* ARM Cortex-M3 ROM */ +#define SI_FLASH1 0x1fc00000 /* MIPS Flash Region 1 */ +#define SI_FLASH1_SZ 0x00400000 /* MIPS Size of Flash Region 1 */ +#define SI_ARM7S_ROM 0x20000000 /* ARM7TDMI-S ROM */ +#define SI_ARMCM3_SRAM2 0x60000000 /* ARM Cortex-M3 SRAM Region 2 */ +#define SI_ARM7S_SRAM2 0x80000000 /* ARM7TDMI-S SRAM Region 2 */ +#define SI_ARM_FLASH1 0xffff0000 /* ARM Flash Region 1 */ +#define SI_ARM_FLASH1_SZ 0x00010000 /* ARM Size of Flash Region 1 */ + +#define SI_PCI_DMA 0x40000000 /* Client Mode sb2pcitranslation2 (1 GB) */ +#define SI_PCI_DMA2 0x80000000 /* Client Mode sb2pcitranslation2 (1 GB) */ +#define SI_PCI_DMA_SZ 0x40000000 /* Client Mode sb2pcitranslation2 size in bytes */ +#define SI_PCIE_DMA_L32 0x00000000 /* PCIE Client Mode sb2pcitranslation2 + * (2 ZettaBytes), low 32 bits + */ +#define SI_PCIE_DMA_H32 0x80000000 /* PCIE Client Mode sb2pcitranslation2 + * (2 ZettaBytes), high 32 bits + */ + +/* core codes */ +#define NODEV_CORE_ID 0x700 /* Invalid coreid */ +#define CC_CORE_ID 0x800 /* chipcommon core */ +#define ILINE20_CORE_ID 0x801 /* iline20 core */ +#define SRAM_CORE_ID 0x802 /* sram core */ +#define SDRAM_CORE_ID 0x803 /* sdram core */ +#define PCI_CORE_ID 0x804 /* pci core */ +#define MIPS_CORE_ID 0x805 /* mips core */ +#define ENET_CORE_ID 0x806 /* enet mac core */ +#define CODEC_CORE_ID 0x807 /* v90 codec core */ +#define USB_CORE_ID 0x808 /* usb 1.1 host/device core */ +#define ADSL_CORE_ID 0x809 /* ADSL core */ +#define ILINE100_CORE_ID 0x80a /* iline100 core */ +#define IPSEC_CORE_ID 0x80b /* ipsec core */ +#define UTOPIA_CORE_ID 0x80c /* utopia core */ +#define PCMCIA_CORE_ID 0x80d /* pcmcia core */ +#define SOCRAM_CORE_ID 0x80e /* internal memory core */ +#define MEMC_CORE_ID 0x80f /* memc sdram core */ +#define OFDM_CORE_ID 0x810 /* OFDM phy core */ +#define EXTIF_CORE_ID 0x811 /* external interface core */ +#define D11_CORE_ID 0x812 /* 802.11 MAC core */ +#define APHY_CORE_ID 0x813 /* 802.11a phy core */ +#define BPHY_CORE_ID 0x814 /* 802.11b phy core */ +#define GPHY_CORE_ID 0x815 /* 802.11g phy core */ +#define MIPS33_CORE_ID 0x816 /* mips3302 core */ +#define USB11H_CORE_ID 0x817 /* usb 1.1 host core */ +#define USB11D_CORE_ID 0x818 /* usb 1.1 device core */ +#define USB20H_CORE_ID 0x819 /* usb 2.0 host core */ +#define USB20D_CORE_ID 0x81a /* usb 2.0 device core */ +#define SDIOH_CORE_ID 0x81b /* sdio host core */ +#define ROBO_CORE_ID 0x81c /* roboswitch core */ +#define ATA100_CORE_ID 0x81d /* parallel ATA core */ +#define SATAXOR_CORE_ID 0x81e /* serial ATA & XOR DMA core */ +#define GIGETH_CORE_ID 0x81f /* gigabit ethernet core */ +#define PCIE_CORE_ID 0x820 /* pci express core */ +#define NPHY_CORE_ID 0x821 /* 802.11n 2x2 phy core */ +#define SRAMC_CORE_ID 0x822 /* SRAM controller core */ +#define MINIMAC_CORE_ID 0x823 /* MINI MAC/phy core */ +#define ARM11_CORE_ID 0x824 /* ARM 1176 core */ +#define ARM7S_CORE_ID 0x825 /* ARM7tdmi-s core */ +#define LPPHY_CORE_ID 0x826 /* 802.11a/b/g phy core */ +#define PMU_CORE_ID 0x827 /* PMU core */ +#define SSNPHY_CORE_ID 0x828 /* 802.11n single-stream phy core */ +#define SDIOD_CORE_ID 0x829 /* SDIO device core */ +#define ARMCM3_CORE_ID 0x82a /* ARM Cortex M3 core */ +#define QNPHY_CORE_ID 0x82b /* 802.11n 4x4 phy core */ +#define MIPS74K_CORE_ID 0x82c /* mips 74k core */ +#define GMAC_CORE_ID 0x82d /* Gigabit MAC core */ +#define DMEMC_CORE_ID 0x82e /* DDR1/2 memory controller core */ +#define PCIERC_CORE_ID 0x82f /* PCIE Root Complex core */ +#define OCP_CORE_ID 0x830 /* OCP2OCP bridge core */ +#define SC_CORE_ID 0x831 /* shared common core */ +#define AHB_CORE_ID 0x832 /* OCP2AHB bridge core */ +#define SPIH_CORE_ID 0x833 /* SPI host core */ +#define I2S_CORE_ID 0x834 /* I2S core */ +#define OOB_ROUTER_CORE_ID 0x367 /* OOB router core ID */ +#define DEF_AI_COMP 0xfff /* Default component, in ai chips it maps all + * unused address ranges + */ + +/* There are TWO constants on all HND chips: SI_ENUM_BASE above, + * and chipcommon being the first core: + */ +#define SI_CC_IDX 0 + +/* SOC Interconnect types (aka chip types) */ +#define SOCI_SB 0 +#define SOCI_AI 1 + +/* Common core control flags */ +#define SICF_BIST_EN 0x8000 +#define SICF_PME_EN 0x4000 +#define SICF_CORE_BITS 0x3ffc +#define SICF_FGC 0x0002 +#define SICF_CLOCK_EN 0x0001 + +/* Common core status flags */ +#define SISF_BIST_DONE 0x8000 +#define SISF_BIST_ERROR 0x4000 +#define SISF_GATED_CLK 0x2000 +#define SISF_DMA64 0x1000 +#define SISF_CORE_BITS 0x0fff + +/* A register that is common to all cores to + * communicate w/PMU regarding clock control. + */ +#define SI_CLK_CTL_ST 0x1e0 /* clock control and status */ + +/* clk_ctl_st register */ +#define CCS_FORCEALP 0x00000001 /* force ALP request */ +#define CCS_FORCEHT 0x00000002 /* force HT request */ +#define CCS_FORCEILP 0x00000004 /* force ILP request */ +#define CCS_ALPAREQ 0x00000008 /* ALP Avail Request */ +#define CCS_HTAREQ 0x00000010 /* HT Avail Request */ +#define CCS_FORCEHWREQOFF 0x00000020 /* Force HW Clock Request Off */ +#define CCS_ALPAVAIL 0x00010000 /* ALP is available */ +#define CCS_HTAVAIL 0x00020000 /* HT is available */ +#define CCS0_HTAVAIL 0x00010000 /* HT avail in chipc and pcmcia on 4328a0 */ +#define CCS0_ALPAVAIL 0x00020000 /* ALP avail in chipc and pcmcia on 4328a0 */ + +/* Not really related to SOC Interconnect, but a couple of software + * conventions for the use the flash space: + */ + +/* Minumum amount of flash we support */ +#define FLASH_MIN 0x00020000 /* Minimum flash size */ + +/* A boot/binary may have an embedded block that describes its size */ +#define BISZ_OFFSET 0x3e0 /* At this offset into the binary */ +#define BISZ_MAGIC 0x4249535a /* Marked with this value: 'BISZ' */ +#define BISZ_MAGIC_IDX 0 /* Word 0: magic */ +#define BISZ_TXTST_IDX 1 /* 1: text start */ +#define BISZ_TXTEND_IDX 2 /* 2: text end */ +#define BISZ_DATAST_IDX 3 /* 3: data start */ +#define BISZ_DATAEND_IDX 4 /* 4: data end */ +#define BISZ_BSSST_IDX 5 /* 5: bss start */ +#define BISZ_BSSEND_IDX 6 /* 6: bss end */ +#define BISZ_SIZE 7 /* descriptor size in 32-bit intergers */ + +#endif /* _HNDSOC_H */ diff --git a/drivers/net/wireless/bcm4329/include/linux_osl.h b/drivers/net/wireless/bcm4329/include/linux_osl.h new file mode 100644 index 000000000000..b059c2adb17d --- /dev/null +++ b/drivers/net/wireless/bcm4329/include/linux_osl.h @@ -0,0 +1,322 @@ +/* + * Linux OS Independent Layer + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: linux_osl.h,v 13.131.30.8 2010/04/26 05:42:18 Exp $ + */ + + +#ifndef _linux_osl_h_ +#define _linux_osl_h_ + +#include <typedefs.h> + + +#include <linuxver.h> + + +#ifdef __GNUC__ +#define GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) +#if GCC_VERSION > 30100 +#define ASSERT(exp) do {} while (0) +#else + +#define ASSERT(exp) +#endif +#endif + + +#define OSL_DELAY(usec) osl_delay(usec) +extern void osl_delay(uint usec); + + + +#define OSL_PCMCIA_READ_ATTR(osh, offset, buf, size) \ + osl_pcmcia_read_attr((osh), (offset), (buf), (size)) +#define OSL_PCMCIA_WRITE_ATTR(osh, offset, buf, size) \ + osl_pcmcia_write_attr((osh), (offset), (buf), (size)) +extern void osl_pcmcia_read_attr(osl_t *osh, uint offset, void *buf, int size); +extern void osl_pcmcia_write_attr(osl_t *osh, uint offset, void *buf, int size); + + +#define OSL_PCI_READ_CONFIG(osh, offset, size) \ + osl_pci_read_config((osh), (offset), (size)) +#define OSL_PCI_WRITE_CONFIG(osh, offset, size, val) \ + osl_pci_write_config((osh), (offset), (size), (val)) +extern uint32 osl_pci_read_config(osl_t *osh, uint offset, uint size); +extern void osl_pci_write_config(osl_t *osh, uint offset, uint size, uint val); + + +#define OSL_PCI_BUS(osh) osl_pci_bus(osh) +#define OSL_PCI_SLOT(osh) osl_pci_slot(osh) +extern uint osl_pci_bus(osl_t *osh); +extern uint osl_pci_slot(osl_t *osh); + + +typedef struct { + bool pkttag; + uint pktalloced; + bool mmbus; + pktfree_cb_fn_t tx_fn; + void *tx_ctx; +} osl_pubinfo_t; + + +extern osl_t *osl_attach(void *pdev, uint bustype, bool pkttag); +extern void osl_detach(osl_t *osh); + +#define PKTFREESETCB(osh, _tx_fn, _tx_ctx) \ + do { \ + ((osl_pubinfo_t*)osh)->tx_fn = _tx_fn; \ + ((osl_pubinfo_t*)osh)->tx_ctx = _tx_ctx; \ + } while (0) + + +#define BUS_SWAP32(v) (v) + + +#define MALLOC(osh, size) osl_malloc((osh), (size)) +#define MFREE(osh, addr, size) osl_mfree((osh), (addr), (size)) +#define MALLOCED(osh) osl_malloced((osh)) + + +#define MALLOC_FAILED(osh) osl_malloc_failed((osh)) + +extern void *osl_malloc(osl_t *osh, uint size); +extern void osl_mfree(osl_t *osh, void *addr, uint size); +extern uint osl_malloced(osl_t *osh); +extern uint osl_malloc_failed(osl_t *osh); + + +#define DMA_CONSISTENT_ALIGN PAGE_SIZE +#define DMA_ALLOC_CONSISTENT(osh, size, pap, dmah, alignbits) \ + osl_dma_alloc_consistent((osh), (size), (pap)) +#define DMA_FREE_CONSISTENT(osh, va, size, pa, dmah) \ + osl_dma_free_consistent((osh), (void*)(va), (size), (pa)) +extern void *osl_dma_alloc_consistent(osl_t *osh, uint size, ulong *pap); +extern void osl_dma_free_consistent(osl_t *osh, void *va, uint size, ulong pa); + + +#define DMA_TX 1 +#define DMA_RX 2 + + +#define DMA_MAP(osh, va, size, direction, p, dmah) \ + osl_dma_map((osh), (va), (size), (direction)) +#define DMA_UNMAP(osh, pa, size, direction, p, dmah) \ + osl_dma_unmap((osh), (pa), (size), (direction)) +extern uint osl_dma_map(osl_t *osh, void *va, uint size, int direction); +extern void osl_dma_unmap(osl_t *osh, uint pa, uint size, int direction); + + +#define OSL_DMADDRWIDTH(osh, addrwidth) do {} while (0) + + +#include <bcmsdh.h> +#define OSL_WRITE_REG(osh, r, v) (bcmsdh_reg_write(NULL, (uintptr)(r), sizeof(*(r)), (v))) +#define OSL_READ_REG(osh, r) (bcmsdh_reg_read(NULL, (uintptr)(r), sizeof(*(r)))) + +#define SELECT_BUS_WRITE(osh, mmap_op, bus_op) if (((osl_pubinfo_t*)(osh))->mmbus) \ + mmap_op else bus_op +#define SELECT_BUS_READ(osh, mmap_op, bus_op) (((osl_pubinfo_t*)(osh))->mmbus) ? \ + mmap_op : bus_op + + + + +#ifndef printf +#define printf(fmt, args...) printk(fmt, ## args) +#endif +#include <linux/kernel.h> +#include <linux/string.h> + + +#ifndef IL_BIGENDIAN +#define R_REG(osh, r) (\ + SELECT_BUS_READ(osh, sizeof(*(r)) == sizeof(uint8) ? readb((volatile uint8*)(r)) : \ + sizeof(*(r)) == sizeof(uint16) ? readw((volatile uint16*)(r)) : \ + readl((volatile uint32*)(r)), OSL_READ_REG(osh, r)) \ +) +#define W_REG(osh, r, v) do { \ + SELECT_BUS_WRITE(osh, \ + switch (sizeof(*(r))) { \ + case sizeof(uint8): writeb((uint8)(v), (volatile uint8*)(r)); break; \ + case sizeof(uint16): writew((uint16)(v), (volatile uint16*)(r)); break; \ + case sizeof(uint32): writel((uint32)(v), (volatile uint32*)(r)); break; \ + }, \ + (OSL_WRITE_REG(osh, r, v))); \ + } while (0) +#else +#define R_REG(osh, r) (\ + SELECT_BUS_READ(osh, \ + ({ \ + __typeof(*(r)) __osl_v; \ + switch (sizeof(*(r))) { \ + case sizeof(uint8): __osl_v = \ + readb((volatile uint8*)((uintptr)(r)^3)); break; \ + case sizeof(uint16): __osl_v = \ + readw((volatile uint16*)((uintptr)(r)^2)); break; \ + case sizeof(uint32): __osl_v = \ + readl((volatile uint32*)(r)); break; \ + } \ + __osl_v; \ + }), \ + OSL_READ_REG(osh, r)) \ +) +#define W_REG(osh, r, v) do { \ + SELECT_BUS_WRITE(osh, \ + switch (sizeof(*(r))) { \ + case sizeof(uint8): writeb((uint8)(v), \ + (volatile uint8*)((uintptr)(r)^3)); break; \ + case sizeof(uint16): writew((uint16)(v), \ + (volatile uint16*)((uintptr)(r)^2)); break; \ + case sizeof(uint32): writel((uint32)(v), \ + (volatile uint32*)(r)); break; \ + }, \ + (OSL_WRITE_REG(osh, r, v))); \ + } while (0) +#endif + +#define AND_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) & (v)) +#define OR_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) | (v)) + + +#define bcopy(src, dst, len) memcpy((dst), (src), (len)) +#define bcmp(b1, b2, len) memcmp((b1), (b2), (len)) +#define bzero(b, len) memset((b), '\0', (len)) + + +#define OSL_UNCACHED(va) ((void*)va) + + +#if defined(__i386__) +#define OSL_GETCYCLES(x) rdtscl((x)) +#else +#define OSL_GETCYCLES(x) ((x) = 0) +#endif + + +#define BUSPROBE(val, addr) ({ (val) = R_REG(NULL, (addr)); 0; }) + + +#if !defined(CONFIG_MMC_MSM7X00A) +#define REG_MAP(pa, size) ioremap_nocache((unsigned long)(pa), (unsigned long)(size)) +#else +#define REG_MAP(pa, size) (void *)(0) +#endif +#define REG_UNMAP(va) iounmap((va)) + + +#define R_SM(r) *(r) +#define W_SM(r, v) (*(r) = (v)) +#define BZERO_SM(r, len) memset((r), '\0', (len)) + + +#define PKTGET(osh, len, send) osl_pktget((osh), (len)) +#define PKTFREE(osh, skb, send) osl_pktfree((osh), (skb), (send)) +#ifdef DHD_USE_STATIC_BUF +#define PKTGET_STATIC(osh, len, send) osl_pktget_static((osh), (len)) +#define PKTFREE_STATIC(osh, skb, send) osl_pktfree_static((osh), (skb), (send)) +#endif +#define PKTDATA(osh, skb) (((struct sk_buff*)(skb))->data) +#define PKTLEN(osh, skb) (((struct sk_buff*)(skb))->len) +#define PKTHEADROOM(osh, skb) (PKTDATA(osh, skb)-(((struct sk_buff*)(skb))->head)) +#define PKTTAILROOM(osh, skb) ((((struct sk_buff*)(skb))->end)-(((struct sk_buff*)(skb))->tail)) +#define PKTNEXT(osh, skb) (((struct sk_buff*)(skb))->next) +#define PKTSETNEXT(osh, skb, x) (((struct sk_buff*)(skb))->next = (struct sk_buff*)(x)) +#define PKTSETLEN(osh, skb, len) __skb_trim((struct sk_buff*)(skb), (len)) +#define PKTPUSH(osh, skb, bytes) skb_push((struct sk_buff*)(skb), (bytes)) +#define PKTPULL(osh, skb, bytes) skb_pull((struct sk_buff*)(skb), (bytes)) +#define PKTDUP(osh, skb) osl_pktdup((osh), (skb)) +#define PKTTAG(skb) ((void*)(((struct sk_buff*)(skb))->cb)) +#define PKTALLOCED(osh) ((osl_pubinfo_t *)(osh))->pktalloced +#define PKTSETPOOL(osh, skb, x, y) do {} while (0) +#define PKTPOOL(osh, skb) FALSE +#define PKTPOOLLEN(osh, pktp) (0) +#define PKTPOOLAVAIL(osh, pktp) (0) +#define PKTPOOLADD(osh, pktp, p) BCME_ERROR +#define PKTPOOLGET(osh, pktp) NULL +#define PKTLIST_DUMP(osh, buf) + +extern void *osl_pktget(osl_t *osh, uint len); +extern void osl_pktfree(osl_t *osh, void *skb, bool send); +extern void *osl_pktget_static(osl_t *osh, uint len); +extern void osl_pktfree_static(osl_t *osh, void *skb, bool send); +extern void *osl_pktdup(osl_t *osh, void *skb); + + + +static INLINE void * +osl_pkt_frmnative(osl_pubinfo_t *osh, struct sk_buff *skb) +{ + struct sk_buff *nskb; + + if (osh->pkttag) + bzero((void*)skb->cb, OSL_PKTTAG_SZ); + + + for (nskb = skb; nskb; nskb = nskb->next) { + osh->pktalloced++; + } + + return (void *)skb; +} +#define PKTFRMNATIVE(osh, skb) osl_pkt_frmnative(((osl_pubinfo_t *)osh), (struct sk_buff*)(skb)) + + +static INLINE struct sk_buff * +osl_pkt_tonative(osl_pubinfo_t *osh, void *pkt) +{ + struct sk_buff *nskb; + + if (osh->pkttag) + bzero(((struct sk_buff*)pkt)->cb, OSL_PKTTAG_SZ); + + + for (nskb = (struct sk_buff *)pkt; nskb; nskb = nskb->next) { + osh->pktalloced--; + } + + return (struct sk_buff *)pkt; +} +#define PKTTONATIVE(osh, pkt) osl_pkt_tonative((osl_pubinfo_t *)(osh), (pkt)) + +#define PKTLINK(skb) (((struct sk_buff*)(skb))->prev) +#define PKTSETLINK(skb, x) (((struct sk_buff*)(skb))->prev = (struct sk_buff*)(x)) +#define PKTPRIO(skb) (((struct sk_buff*)(skb))->priority) +#define PKTSETPRIO(skb, x) (((struct sk_buff*)(skb))->priority = (x)) +#define PKTSUMNEEDED(skb) (((struct sk_buff*)(skb))->ip_summed == CHECKSUM_HW) +#define PKTSETSUMGOOD(skb, x) (((struct sk_buff*)(skb))->ip_summed = \ + ((x) ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE)) + +#define PKTSHARED(skb) (((struct sk_buff*)(skb))->cloned) + + +#define OSL_ERROR(bcmerror) osl_error(bcmerror) +extern int osl_error(int bcmerror); + + +#define PKTBUFSZ 2048 + + +#define OSL_SYSUPTIME() ((uint32)jiffies * (1000 / HZ)) +#endif diff --git a/drivers/net/wireless/bcm4329/include/linuxver.h b/drivers/net/wireless/bcm4329/include/linuxver.h new file mode 100644 index 000000000000..6ed22658a72b --- /dev/null +++ b/drivers/net/wireless/bcm4329/include/linuxver.h @@ -0,0 +1,447 @@ +/* + * Linux-specific abstractions to gain some independence from linux kernel versions. + * Pave over some 2.2 versus 2.4 versus 2.6 kernel differences. + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: linuxver.h,v 13.38.8.1.8.6 2010/04/29 05:00:46 Exp $ + */ + + +#ifndef _linuxver_h_ +#define _linuxver_h_ + +#include <linux/version.h> +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) +#include <linux/config.h> +#elif (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) +#include <linux/autoconf.h> +#endif +#include <linux/module.h> + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0)) + +#ifdef __UNDEF_NO_VERSION__ +#undef __NO_VERSION__ +#else +#define __NO_VERSION__ +#endif +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0) +#define module_param(_name_, _type_, _perm_) MODULE_PARM(_name_, "i") +#define module_param_string(_name_, _string_, _size_, _perm_) \ + MODULE_PARM(_string_, "c" __MODULE_STRING(_size_)) +#endif + + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 9)) +#include <linux/malloc.h> +#else +#include <linux/slab.h> +#endif + +#include <linux/types.h> +#include <linux/init.h> +#include <linux/mm.h> +#include <linux/string.h> +#include <linux/pci.h> +#include <linux/interrupt.h> +#include <linux/netdevice.h> +#include <linux/semaphore.h> +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)) +#undef IP_TOS +#endif +#include <asm/io.h> + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41)) +#include <linux/workqueue.h> +#else +#include <linux/tqueue.h> +#ifndef work_struct +#define work_struct tq_struct +#endif +#ifndef INIT_WORK +#define INIT_WORK(_work, _func, _data) INIT_TQUEUE((_work), (_func), (_data)) +#endif +#ifndef schedule_work +#define schedule_work(_work) schedule_task((_work)) +#endif +#ifndef flush_scheduled_work +#define flush_scheduled_work() flush_scheduled_tasks() +#endif +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19) +#define MY_INIT_WORK(_work, _func, _data) INIT_WORK(_work, _func) +#else +#define MY_INIT_WORK(_work, _func, _data) INIT_WORK(_work, _func, _data) +typedef void (*work_func_t)(void *work); +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) + +#ifndef IRQ_NONE +typedef void irqreturn_t; +#define IRQ_NONE +#define IRQ_HANDLED +#define IRQ_RETVAL(x) +#endif +#else +typedef irqreturn_t(*FN_ISR) (int irq, void *dev_id, struct pt_regs *ptregs); +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) +#define IRQF_SHARED SA_SHIRQ +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17) +#ifdef CONFIG_NET_RADIO +#define CONFIG_WIRELESS_EXT +#endif +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 67) +#ifndef SANDGATE2G +#define MOD_INC_USE_COUNT +#endif +#endif + + +#ifndef __exit +#define __exit +#endif +#ifndef __devexit +#define __devexit +#endif +#ifndef __devinit +#define __devinit __init +#endif +#ifndef __devinitdata +#define __devinitdata +#endif +#ifndef __devexit_p +#define __devexit_p(x) x +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0)) + +#define pci_get_drvdata(dev) (dev)->sysdata +#define pci_set_drvdata(dev, value) (dev)->sysdata = (value) + + + +struct pci_device_id { + unsigned int vendor, device; + unsigned int subvendor, subdevice; + unsigned int class, class_mask; + unsigned long driver_data; +}; + +struct pci_driver { + struct list_head node; + char *name; + const struct pci_device_id *id_table; + int (*probe)(struct pci_dev *dev, + const struct pci_device_id *id); + void (*remove)(struct pci_dev *dev); + void (*suspend)(struct pci_dev *dev); + void (*resume)(struct pci_dev *dev); +}; + +#define MODULE_DEVICE_TABLE(type, name) +#define PCI_ANY_ID (~0) + + +#define pci_module_init pci_register_driver +extern int pci_register_driver(struct pci_driver *drv); +extern void pci_unregister_driver(struct pci_driver *drv); + +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18)) +#define pci_module_init pci_register_driver +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 18)) +#ifdef MODULE +#define module_init(x) int init_module(void) { return x(); } +#define module_exit(x) void cleanup_module(void) { x(); } +#else +#define module_init(x) __initcall(x); +#define module_exit(x) __exitcall(x); +#endif +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 48)) +#define list_for_each(pos, head) \ + for (pos = (head)->next; pos != (head); pos = pos->next) +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 13)) +#define pci_resource_start(dev, bar) ((dev)->base_address[(bar)]) +#elif (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 44)) +#define pci_resource_start(dev, bar) ((dev)->resource[(bar)].start) +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 23)) +#define pci_enable_device(dev) do { } while (0) +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 14)) +#define net_device device +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 42)) + + + +#ifndef PCI_DMA_TODEVICE +#define PCI_DMA_TODEVICE 1 +#define PCI_DMA_FROMDEVICE 2 +#endif + +typedef u32 dma_addr_t; + + +static inline int get_order(unsigned long size) +{ + int order; + + size = (size-1) >> (PAGE_SHIFT-1); + order = -1; + do { + size >>= 1; + order++; + } while (size); + return order; +} + +static inline void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, + dma_addr_t *dma_handle) +{ + void *ret; + int gfp = GFP_ATOMIC | GFP_DMA; + + ret = (void *)__get_free_pages(gfp, get_order(size)); + + if (ret != NULL) { + memset(ret, 0, size); + *dma_handle = virt_to_bus(ret); + } + return ret; +} +static inline void pci_free_consistent(struct pci_dev *hwdev, size_t size, + void *vaddr, dma_addr_t dma_handle) +{ + free_pages((unsigned long)vaddr, get_order(size)); +} +#define pci_map_single(cookie, address, size, dir) virt_to_bus(address) +#define pci_unmap_single(cookie, address, size, dir) + +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 43)) + +#define dev_kfree_skb_any(a) dev_kfree_skb(a) +#define netif_down(dev) do { (dev)->start = 0; } while (0) + + +#ifndef _COMPAT_NETDEVICE_H + + + +#define dev_kfree_skb_irq(a) dev_kfree_skb(a) +#define netif_wake_queue(dev) \ + do { clear_bit(0, &(dev)->tbusy); mark_bh(NET_BH); } while (0) +#define netif_stop_queue(dev) set_bit(0, &(dev)->tbusy) + +static inline void netif_start_queue(struct net_device *dev) +{ + dev->tbusy = 0; + dev->interrupt = 0; + dev->start = 1; +} + +#define netif_queue_stopped(dev) (dev)->tbusy +#define netif_running(dev) (dev)->start + +#endif + +#define netif_device_attach(dev) netif_start_queue(dev) +#define netif_device_detach(dev) netif_stop_queue(dev) + + +#define tasklet_struct tq_struct +static inline void tasklet_schedule(struct tasklet_struct *tasklet) +{ + queue_task(tasklet, &tq_immediate); + mark_bh(IMMEDIATE_BH); +} + +static inline void tasklet_init(struct tasklet_struct *tasklet, + void (*func)(unsigned long), + unsigned long data) +{ + tasklet->next = NULL; + tasklet->sync = 0; + tasklet->routine = (void (*)(void *))func; + tasklet->data = (void *)data; +} +#define tasklet_kill(tasklet) { do {} while (0); } + + +#define del_timer_sync(timer) del_timer(timer) + +#else + +#define netif_down(dev) + +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 3)) + + +#define PREPARE_TQUEUE(_tq, _routine, _data) \ + do { \ + (_tq)->routine = _routine; \ + (_tq)->data = _data; \ + } while (0) + + +#define INIT_TQUEUE(_tq, _routine, _data) \ + do { \ + INIT_LIST_HEAD(&(_tq)->list); \ + (_tq)->sync = 0; \ + PREPARE_TQUEUE((_tq), (_routine), (_data)); \ + } while (0) + +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 6)) + + + +static inline int +pci_save_state(struct pci_dev *dev, u32 *buffer) +{ + int i; + if (buffer) { + for (i = 0; i < 16; i++) + pci_read_config_dword(dev, i * 4, &buffer[i]); + } + return 0; +} + +static inline int +pci_restore_state(struct pci_dev *dev, u32 *buffer) +{ + int i; + + if (buffer) { + for (i = 0; i < 16; i++) + pci_write_config_dword(dev, i * 4, buffer[i]); + } + + else { + for (i = 0; i < 6; i ++) + pci_write_config_dword(dev, + PCI_BASE_ADDRESS_0 + (i * 4), + pci_resource_start(dev, i)); + pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq); + } + return 0; +} + +#endif + + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 19)) +#define read_c0_count() read_32bit_cp0_register(CP0_COUNT) +#endif + + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24)) +#ifndef SET_MODULE_OWNER +#define SET_MODULE_OWNER(dev) do {} while (0) +#define OLD_MOD_INC_USE_COUNT MOD_INC_USE_COUNT +#define OLD_MOD_DEC_USE_COUNT MOD_DEC_USE_COUNT +#else +#define OLD_MOD_INC_USE_COUNT do {} while (0) +#define OLD_MOD_DEC_USE_COUNT do {} while (0) +#endif +#else +#ifndef SET_MODULE_OWNER +#define SET_MODULE_OWNER(dev) do {} while (0) +#endif +#ifndef MOD_INC_USE_COUNT +#define MOD_INC_USE_COUNT do {} while (0) +#endif +#ifndef MOD_DEC_USE_COUNT +#define MOD_DEC_USE_COUNT do {} while (0) +#endif +#define OLD_MOD_INC_USE_COUNT MOD_INC_USE_COUNT +#define OLD_MOD_DEC_USE_COUNT MOD_DEC_USE_COUNT +#endif + +#ifndef SET_NETDEV_DEV +#define SET_NETDEV_DEV(net, pdev) do {} while (0) +#endif + +#ifndef HAVE_FREE_NETDEV +#define free_netdev(dev) kfree(dev) +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) + +#define af_packet_priv data +#endif + + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11) +#define DRV_SUSPEND_STATE_TYPE pm_message_t +#else +#define DRV_SUSPEND_STATE_TYPE uint32 +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19) +#define CHECKSUM_HW CHECKSUM_PARTIAL +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) +#define KILL_PROC(pid, sig) \ +{ \ + struct task_struct *tsk; \ + tsk = pid_task(find_vpid(pid), PIDTYPE_PID); \ + if (tsk) send_sig(sig, tsk, 1); \ +} +#else +#define KILL_PROC(pid, sig) \ +{ \ + kill_proc(pid, sig, 1); \ +} +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) +#define netdev_priv(dev) dev->priv +#endif + +#endif diff --git a/drivers/net/wireless/bcm4329/include/miniopt.h b/drivers/net/wireless/bcm4329/include/miniopt.h new file mode 100644 index 000000000000..3667fb1e215b --- /dev/null +++ b/drivers/net/wireless/bcm4329/include/miniopt.h @@ -0,0 +1,77 @@ +/* + * Command line options parser. + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * $Id: miniopt.h,v 1.1.6.2 2009/01/14 23:52:48 Exp $ + */ + + +#ifndef MINI_OPT_H +#define MINI_OPT_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* ---- Include Files ---------------------------------------------------- */ +/* ---- Constants and Types ---------------------------------------------- */ + +#define MINIOPT_MAXKEY 128 /* Max options */ +typedef struct miniopt { + + /* These are persistent after miniopt_init() */ + const char* name; /* name for prompt in error strings */ + const char* flags; /* option chars that take no args */ + bool longflags; /* long options may be flags */ + bool opt_end; /* at end of options (passed a "--") */ + + /* These are per-call to miniopt() */ + + int consumed; /* number of argv entries cosumed in + * the most recent call to miniopt() + */ + bool positional; + bool good_int; /* 'val' member is the result of a sucessful + * strtol conversion of the option value + */ + char opt; + char key[MINIOPT_MAXKEY]; + char* valstr; /* positional param, or value for the option, + * or null if the option had + * no accompanying value + */ + uint uval; /* strtol translation of valstr */ + int val; /* strtol translation of valstr */ +} miniopt_t; + +void miniopt_init(miniopt_t *t, const char* name, const char* flags, bool longflags); +int miniopt(miniopt_t *t, char **argv); + + +/* ---- Variable Externs ------------------------------------------------- */ +/* ---- Function Prototypes ---------------------------------------------- */ + + +#ifdef __cplusplus + } +#endif + +#endif /* MINI_OPT_H */ diff --git a/drivers/net/wireless/bcm4329/include/msgtrace.h b/drivers/net/wireless/bcm4329/include/msgtrace.h new file mode 100644 index 000000000000..1479086dba3e --- /dev/null +++ b/drivers/net/wireless/bcm4329/include/msgtrace.h @@ -0,0 +1,72 @@ +/* + * Trace messages sent over HBUS + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: msgtrace.h,v 1.1.2.4 2009/01/27 04:09:40 Exp $ + */ + +#ifndef _MSGTRACE_H +#define _MSGTRACE_H + +#ifndef _TYPEDEFS_H_ +#include <typedefs.h> +#endif + + +/* This marks the start of a packed structure section. */ +#include <packed_section_start.h> + +#define MSGTRACE_VERSION 1 + +/* Message trace header */ +typedef BWL_PRE_PACKED_STRUCT struct msgtrace_hdr { + uint8 version; + uint8 spare; + uint16 len; /* Len of the trace */ + uint32 seqnum; /* Sequence number of message. Useful if the messsage has been lost + * because of DMA error or a bus reset (ex: SDIO Func2) + */ + uint32 discarded_bytes; /* Number of discarded bytes because of trace overflow */ + uint32 discarded_printf; /* Number of discarded printf because of trace overflow */ +} BWL_POST_PACKED_STRUCT msgtrace_hdr_t; + +#define MSGTRACE_HDRLEN sizeof(msgtrace_hdr_t) + +/* The hbus driver generates traces when sending a trace message. This causes endless traces. + * This flag must be set to TRUE in any hbus traces. The flag is reset in the function msgtrace_put. + * This prevents endless traces but generates hasardous lost of traces only in bus device code. + * It is recommendat to set this flag in macro SD_TRACE but not in SD_ERROR for avoiding missing + * hbus error traces. hbus error trace should not generates endless traces. + */ +extern bool msgtrace_hbus_trace; + +typedef void (*msgtrace_func_send_t)(void *hdl1, void *hdl2, uint8 *hdr, + uint16 hdrlen, uint8 *buf, uint16 buflen); + +extern void msgtrace_sent(void); +extern void msgtrace_put(char *buf, int count); +extern void msgtrace_init(void *hdl1, void *hdl2, msgtrace_func_send_t func_send); + +/* This marks the end of a packed structure section. */ +#include <packed_section_end.h> + +#endif /* _MSGTRACE_H */ diff --git a/drivers/net/wireless/bcm4329/include/osl.h b/drivers/net/wireless/bcm4329/include/osl.h new file mode 100644 index 000000000000..5599e536eeea --- /dev/null +++ b/drivers/net/wireless/bcm4329/include/osl.h @@ -0,0 +1,55 @@ +/* + * OS Abstraction Layer + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * $Id: osl.h,v 13.37.32.1 2008/11/20 00:51:15 Exp $ + */ + + +#ifndef _osl_h_ +#define _osl_h_ + + +typedef struct osl_info osl_t; +typedef struct osl_dmainfo osldma_t; + +#define OSL_PKTTAG_SZ 32 + + +typedef void (*pktfree_cb_fn_t)(void *ctx, void *pkt, unsigned int status); + +#include <linux_osl.h> + + + + +#define SET_REG(osh, r, mask, val) W_REG((osh), (r), ((R_REG((osh), r) & ~(mask)) | (val))) + +#ifndef AND_REG +#define AND_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) & (v)) +#endif + +#ifndef OR_REG +#define OR_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) | (v)) +#endif + + +#endif diff --git a/drivers/net/wireless/bcm4329/include/packed_section_end.h b/drivers/net/wireless/bcm4329/include/packed_section_end.h new file mode 100644 index 000000000000..5b61c18fcd08 --- /dev/null +++ b/drivers/net/wireless/bcm4329/include/packed_section_end.h @@ -0,0 +1,54 @@ +/* + * Declare directives for structure packing. No padding will be provided + * between the members of packed structures, and therefore, there is no + * guarantee that structure members will be aligned. + * + * Declaring packed structures is compiler specific. In order to handle all + * cases, packed structures should be delared as: + * + * #include <packed_section_start.h> + * + * typedef BWL_PRE_PACKED_STRUCT struct foobar_t { + * some_struct_members; + * } BWL_POST_PACKED_STRUCT foobar_t; + * + * #include <packed_section_end.h> + * + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * $Id: packed_section_end.h,v 1.1.6.3 2008/12/10 00:27:54 Exp $ + */ + + + + +#ifdef BWL_PACKED_SECTION + #undef BWL_PACKED_SECTION +#else + #error "BWL_PACKED_SECTION is NOT defined!" +#endif + + + + + +#undef BWL_PRE_PACKED_STRUCT +#undef BWL_POST_PACKED_STRUCT diff --git a/drivers/net/wireless/bcm4329/include/packed_section_start.h b/drivers/net/wireless/bcm4329/include/packed_section_start.h new file mode 100644 index 000000000000..cb93aa64079a --- /dev/null +++ b/drivers/net/wireless/bcm4329/include/packed_section_start.h @@ -0,0 +1,61 @@ +/* + * Declare directives for structure packing. No padding will be provided + * between the members of packed structures, and therefore, there is no + * guarantee that structure members will be aligned. + * + * Declaring packed structures is compiler specific. In order to handle all + * cases, packed structures should be delared as: + * + * #include <packed_section_start.h> + * + * typedef BWL_PRE_PACKED_STRUCT struct foobar_t { + * some_struct_members; + * } BWL_POST_PACKED_STRUCT foobar_t; + * + * #include <packed_section_end.h> + * + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * $Id: packed_section_start.h,v 1.1.6.3 2008/12/10 00:27:54 Exp $ + */ + + + + +#ifdef BWL_PACKED_SECTION + #error "BWL_PACKED_SECTION is already defined!" +#else + #define BWL_PACKED_SECTION +#endif + + + + + +#if defined(__GNUC__) + #define BWL_PRE_PACKED_STRUCT + #define BWL_POST_PACKED_STRUCT __attribute__((packed)) +#elif defined(__CC_ARM) + #define BWL_PRE_PACKED_STRUCT __packed + #define BWL_POST_PACKED_STRUCT +#else + #error "Unknown compiler!" +#endif diff --git a/drivers/net/wireless/bcm4329/include/pcicfg.h b/drivers/net/wireless/bcm4329/include/pcicfg.h new file mode 100644 index 000000000000..898962c942a8 --- /dev/null +++ b/drivers/net/wireless/bcm4329/include/pcicfg.h @@ -0,0 +1,52 @@ +/* + * pcicfg.h: PCI configuration constants and structures. + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: pcicfg.h,v 1.41.12.3 2008/06/26 22:49:41 Exp $ + */ + + +#ifndef _h_pcicfg_ +#define _h_pcicfg_ + + +#define PCI_CFG_VID 0 +#define PCI_CFG_CMD 4 +#define PCI_CFG_REV 8 +#define PCI_CFG_BAR0 0x10 +#define PCI_CFG_BAR1 0x14 +#define PCI_BAR0_WIN 0x80 +#define PCI_INT_STATUS 0x90 +#define PCI_INT_MASK 0x94 + +#define PCIE_EXTCFG_OFFSET 0x100 +#define PCI_BAR0_PCIREGS_OFFSET (6 * 1024) +#define PCI_BAR0_PCISBR_OFFSET (4 * 1024) + +#define PCI_BAR0_WINSZ (16 * 1024) + + +#define PCI_16KB0_PCIREGS_OFFSET (8 * 1024) +#define PCI_16KB0_CCREGS_OFFSET (12 * 1024) +#define PCI_16KBB0_WINSZ (16 * 1024) + +#endif diff --git a/drivers/net/wireless/bcm4329/include/proto/802.11.h b/drivers/net/wireless/bcm4329/include/proto/802.11.h new file mode 100644 index 000000000000..fd26317361da --- /dev/null +++ b/drivers/net/wireless/bcm4329/include/proto/802.11.h @@ -0,0 +1,1433 @@ +/* + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * Fundamental types and constants relating to 802.11 + * + * $Id: 802.11.h,v 9.219.4.1.4.5.6.11 2010/02/09 13:23:26 Exp $ + */ + + +#ifndef _802_11_H_ +#define _802_11_H_ + +#ifndef _TYPEDEFS_H_ +#include <typedefs.h> +#endif + +#ifndef _NET_ETHERNET_H_ +#include <proto/ethernet.h> +#endif + +#include <proto/wpa.h> + + +#include <packed_section_start.h> + + +#define DOT11_TU_TO_US 1024 + + +#define DOT11_A3_HDR_LEN 24 +#define DOT11_A4_HDR_LEN 30 +#define DOT11_MAC_HDR_LEN DOT11_A3_HDR_LEN +#define DOT11_FCS_LEN 4 +#define DOT11_ICV_LEN 4 +#define DOT11_ICV_AES_LEN 8 +#define DOT11_QOS_LEN 2 +#define DOT11_HTC_LEN 4 + +#define DOT11_KEY_INDEX_SHIFT 6 +#define DOT11_IV_LEN 4 +#define DOT11_IV_TKIP_LEN 8 +#define DOT11_IV_AES_OCB_LEN 4 +#define DOT11_IV_AES_CCM_LEN 8 +#define DOT11_IV_MAX_LEN 8 + + +#define DOT11_MAX_MPDU_BODY_LEN 2304 + +#define DOT11_MAX_MPDU_LEN (DOT11_A4_HDR_LEN + \ + DOT11_QOS_LEN + \ + DOT11_IV_AES_CCM_LEN + \ + DOT11_MAX_MPDU_BODY_LEN + \ + DOT11_ICV_LEN + \ + DOT11_FCS_LEN) + +#define DOT11_MAX_SSID_LEN 32 + + +#define DOT11_DEFAULT_RTS_LEN 2347 +#define DOT11_MAX_RTS_LEN 2347 + + +#define DOT11_MIN_FRAG_LEN 256 +#define DOT11_MAX_FRAG_LEN 2346 +#define DOT11_DEFAULT_FRAG_LEN 2346 + + +#define DOT11_MIN_BEACON_PERIOD 1 +#define DOT11_MAX_BEACON_PERIOD 0xFFFF + + +#define DOT11_MIN_DTIM_PERIOD 1 +#define DOT11_MAX_DTIM_PERIOD 0xFF + + +#define DOT11_LLC_SNAP_HDR_LEN 8 +#define DOT11_OUI_LEN 3 +BWL_PRE_PACKED_STRUCT struct dot11_llc_snap_header { + uint8 dsap; + uint8 ssap; + uint8 ctl; + uint8 oui[DOT11_OUI_LEN]; + uint16 type; +} BWL_POST_PACKED_STRUCT; + + +#define RFC1042_HDR_LEN (ETHER_HDR_LEN + DOT11_LLC_SNAP_HDR_LEN) + + + +BWL_PRE_PACKED_STRUCT struct dot11_header { + uint16 fc; + uint16 durid; + struct ether_addr a1; + struct ether_addr a2; + struct ether_addr a3; + uint16 seq; + struct ether_addr a4; +} BWL_POST_PACKED_STRUCT; + + + +BWL_PRE_PACKED_STRUCT struct dot11_rts_frame { + uint16 fc; + uint16 durid; + struct ether_addr ra; + struct ether_addr ta; +} BWL_POST_PACKED_STRUCT; +#define DOT11_RTS_LEN 16 + +BWL_PRE_PACKED_STRUCT struct dot11_cts_frame { + uint16 fc; + uint16 durid; + struct ether_addr ra; +} BWL_POST_PACKED_STRUCT; +#define DOT11_CTS_LEN 10 + +BWL_PRE_PACKED_STRUCT struct dot11_ack_frame { + uint16 fc; + uint16 durid; + struct ether_addr ra; +} BWL_POST_PACKED_STRUCT; +#define DOT11_ACK_LEN 10 + +BWL_PRE_PACKED_STRUCT struct dot11_ps_poll_frame { + uint16 fc; + uint16 durid; + struct ether_addr bssid; + struct ether_addr ta; +} BWL_POST_PACKED_STRUCT; +#define DOT11_PS_POLL_LEN 16 + +BWL_PRE_PACKED_STRUCT struct dot11_cf_end_frame { + uint16 fc; + uint16 durid; + struct ether_addr ra; + struct ether_addr bssid; +} BWL_POST_PACKED_STRUCT; +#define DOT11_CS_END_LEN 16 + +BWL_PRE_PACKED_STRUCT struct dot11_action_wifi_vendor_specific { + uint8 category; + uint8 OUI[3]; + uint8 type; + uint8 subtype; + uint8 data[1040]; + struct dot11_action_wifi_vendor_specific* next_node; +} BWL_POST_PACKED_STRUCT; + +typedef struct dot11_action_wifi_vendor_specific dot11_action_wifi_vendor_specific_t; + +#define DOT11_BA_CTL_POLICY_NORMAL 0x0000 +#define DOT11_BA_CTL_POLICY_NOACK 0x0001 +#define DOT11_BA_CTL_POLICY_MASK 0x0001 + +#define DOT11_BA_CTL_MTID 0x0002 +#define DOT11_BA_CTL_COMPRESSED 0x0004 + +#define DOT11_BA_CTL_NUMMSDU_MASK 0x0FC0 +#define DOT11_BA_CTL_NUMMSDU_SHIFT 6 + +#define DOT11_BA_CTL_TID_MASK 0xF000 +#define DOT11_BA_CTL_TID_SHIFT 12 + + +BWL_PRE_PACKED_STRUCT struct dot11_ctl_header { + uint16 fc; + uint16 durid; + struct ether_addr ra; + struct ether_addr ta; +} BWL_POST_PACKED_STRUCT; +#define DOT11_CTL_HDR_LEN 16 + + +BWL_PRE_PACKED_STRUCT struct dot11_bar { + uint16 bar_control; + uint16 seqnum; +} BWL_POST_PACKED_STRUCT; +#define DOT11_BAR_LEN 4 + +#define DOT11_BA_BITMAP_LEN 128 +#define DOT11_BA_CMP_BITMAP_LEN 8 + +BWL_PRE_PACKED_STRUCT struct dot11_ba { + uint16 ba_control; + uint16 seqnum; + uint8 bitmap[DOT11_BA_BITMAP_LEN]; +} BWL_POST_PACKED_STRUCT; +#define DOT11_BA_LEN 4 + + +BWL_PRE_PACKED_STRUCT struct dot11_management_header { + uint16 fc; + uint16 durid; + struct ether_addr da; + struct ether_addr sa; + struct ether_addr bssid; + uint16 seq; +} BWL_POST_PACKED_STRUCT; +#define DOT11_MGMT_HDR_LEN 24 + + + +BWL_PRE_PACKED_STRUCT struct dot11_bcn_prb { + uint32 timestamp[2]; + uint16 beacon_interval; + uint16 capability; +} BWL_POST_PACKED_STRUCT; +#define DOT11_BCN_PRB_LEN 12 + +BWL_PRE_PACKED_STRUCT struct dot11_auth { + uint16 alg; + uint16 seq; + uint16 status; +} BWL_POST_PACKED_STRUCT; +#define DOT11_AUTH_FIXED_LEN 6 + +BWL_PRE_PACKED_STRUCT struct dot11_assoc_req { + uint16 capability; + uint16 listen; +} BWL_POST_PACKED_STRUCT; +#define DOT11_ASSOC_REQ_FIXED_LEN 4 + +BWL_PRE_PACKED_STRUCT struct dot11_reassoc_req { + uint16 capability; + uint16 listen; + struct ether_addr ap; +} BWL_POST_PACKED_STRUCT; +#define DOT11_REASSOC_REQ_FIXED_LEN 10 + +BWL_PRE_PACKED_STRUCT struct dot11_assoc_resp { + uint16 capability; + uint16 status; + uint16 aid; +} BWL_POST_PACKED_STRUCT; +#define DOT11_ASSOC_RESP_FIXED_LEN 6 + +BWL_PRE_PACKED_STRUCT struct dot11_action_measure { + uint8 category; + uint8 action; + uint8 token; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +#define DOT11_ACTION_MEASURE_LEN 3 + +BWL_PRE_PACKED_STRUCT struct dot11_action_ht_ch_width { + uint8 category; + uint8 action; + uint8 ch_width; +} BWL_POST_PACKED_STRUCT; + +BWL_PRE_PACKED_STRUCT struct dot11_action_ht_mimops { + uint8 category; + uint8 action; + uint8 control; +} BWL_POST_PACKED_STRUCT; + +#define SM_PWRSAVE_ENABLE 1 +#define SM_PWRSAVE_MODE 2 + + +BWL_PRE_PACKED_STRUCT struct dot11_power_cnst { + uint8 id; + uint8 len; + uint8 power; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_power_cnst dot11_power_cnst_t; + +BWL_PRE_PACKED_STRUCT struct dot11_power_cap { + uint8 min; + uint8 max; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_power_cap dot11_power_cap_t; + +BWL_PRE_PACKED_STRUCT struct dot11_tpc_rep { + uint8 id; + uint8 len; + uint8 tx_pwr; + uint8 margin; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tpc_rep dot11_tpc_rep_t; +#define DOT11_MNG_IE_TPC_REPORT_LEN 2 + +BWL_PRE_PACKED_STRUCT struct dot11_supp_channels { + uint8 id; + uint8 len; + uint8 first_channel; + uint8 num_channels; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_supp_channels dot11_supp_channels_t; + + +BWL_PRE_PACKED_STRUCT struct dot11_extch { + uint8 id; + uint8 len; + uint8 extch; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_extch dot11_extch_ie_t; + +BWL_PRE_PACKED_STRUCT struct dot11_brcm_extch { + uint8 id; + uint8 len; + uint8 oui[3]; + uint8 type; + uint8 extch; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_brcm_extch dot11_brcm_extch_ie_t; + +#define BRCM_EXTCH_IE_LEN 5 +#define BRCM_EXTCH_IE_TYPE 53 +#define DOT11_EXTCH_IE_LEN 1 +#define DOT11_EXT_CH_MASK 0x03 +#define DOT11_EXT_CH_UPPER 0x01 +#define DOT11_EXT_CH_LOWER 0x03 +#define DOT11_EXT_CH_NONE 0x00 + +BWL_PRE_PACKED_STRUCT struct dot11_action_frmhdr { + uint8 category; + uint8 action; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +#define DOT11_ACTION_FRMHDR_LEN 2 + + +BWL_PRE_PACKED_STRUCT struct dot11_channel_switch { + uint8 id; + uint8 len; + uint8 mode; + uint8 channel; + uint8 count; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_channel_switch dot11_chan_switch_ie_t; + +#define DOT11_SWITCH_IE_LEN 3 + +#define DOT11_CSA_MODE_ADVISORY 0 +#define DOT11_CSA_MODE_NO_TX 1 + +BWL_PRE_PACKED_STRUCT struct dot11_action_switch_channel { + uint8 category; + uint8 action; + dot11_chan_switch_ie_t chan_switch_ie; + dot11_brcm_extch_ie_t extch_ie; +} BWL_POST_PACKED_STRUCT; + +BWL_PRE_PACKED_STRUCT struct dot11_csa_body { + uint8 mode; + uint8 reg; + uint8 channel; + uint8 count; +} BWL_POST_PACKED_STRUCT; + + +BWL_PRE_PACKED_STRUCT struct dot11_ext_csa { + uint8 id; + uint8 len; + struct dot11_csa_body b; +} BWL_POST_PACKED_STRUCT; + +BWL_PRE_PACKED_STRUCT struct dot11y_action_ext_csa { + uint8 category; + uint8 action; + struct dot11_csa_body b; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ext_csa dot11_ext_csa_ie_t; +#define DOT11_EXT_CSA_IE_LEN 4 + +BWL_PRE_PACKED_STRUCT struct dot11_action_ext_csa { + uint8 category; + uint8 action; + dot11_ext_csa_ie_t chan_switch_ie; +} BWL_POST_PACKED_STRUCT; + +BWL_PRE_PACKED_STRUCT struct dot11_obss_coex { + uint8 id; + uint8 len; + uint8 info; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_obss_coex dot11_obss_coex_t; +#define DOT11_OBSS_COEXINFO_LEN 1 + +#define DOT11_OBSS_COEX_INFO_REQ 0x01 +#define DOT11_OBSS_COEX_40MHZ_INTOLERANT 0x02 +#define DOT11_OBSS_COEX_20MHZ_WIDTH_REQ 0x04 + +BWL_PRE_PACKED_STRUCT struct dot11_obss_chanlist { + uint8 id; + uint8 len; + uint8 regclass; + uint8 chanlist[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_obss_chanlist dot11_obss_chanlist_t; +#define DOT11_OBSS_CHANLIST_FIXED_LEN 1 + +BWL_PRE_PACKED_STRUCT struct dot11_extcap_ie { + uint8 id; + uint8 len; + uint8 cap; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_extcap_ie dot11_extcap_ie_t; +#define DOT11_EXTCAP_LEN 1 + + + +#define DOT11_MEASURE_TYPE_BASIC 0 +#define DOT11_MEASURE_TYPE_CCA 1 +#define DOT11_MEASURE_TYPE_RPI 2 + + +#define DOT11_MEASURE_MODE_ENABLE (1<<1) +#define DOT11_MEASURE_MODE_REQUEST (1<<2) +#define DOT11_MEASURE_MODE_REPORT (1<<3) + +#define DOT11_MEASURE_MODE_LATE (1<<0) +#define DOT11_MEASURE_MODE_INCAPABLE (1<<1) +#define DOT11_MEASURE_MODE_REFUSED (1<<2) + +#define DOT11_MEASURE_BASIC_MAP_BSS ((uint8)(1<<0)) +#define DOT11_MEASURE_BASIC_MAP_OFDM ((uint8)(1<<1)) +#define DOT11_MEASURE_BASIC_MAP_UKNOWN ((uint8)(1<<2)) +#define DOT11_MEASURE_BASIC_MAP_RADAR ((uint8)(1<<3)) +#define DOT11_MEASURE_BASIC_MAP_UNMEAS ((uint8)(1<<4)) + +BWL_PRE_PACKED_STRUCT struct dot11_meas_req { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + uint8 channel; + uint8 start_time[8]; + uint16 duration; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_meas_req dot11_meas_req_t; +#define DOT11_MNG_IE_MREQ_LEN 14 + +#define DOT11_MNG_IE_MREQ_FIXED_LEN 3 + +BWL_PRE_PACKED_STRUCT struct dot11_meas_rep { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + BWL_PRE_PACKED_STRUCT union + { + BWL_PRE_PACKED_STRUCT struct { + uint8 channel; + uint8 start_time[8]; + uint16 duration; + uint8 map; + } BWL_POST_PACKED_STRUCT basic; + uint8 data[1]; + } BWL_POST_PACKED_STRUCT rep; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_meas_rep dot11_meas_rep_t; + + +#define DOT11_MNG_IE_MREP_FIXED_LEN 3 + +BWL_PRE_PACKED_STRUCT struct dot11_meas_rep_basic { + uint8 channel; + uint8 start_time[8]; + uint16 duration; + uint8 map; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_meas_rep_basic dot11_meas_rep_basic_t; +#define DOT11_MEASURE_BASIC_REP_LEN 12 + +BWL_PRE_PACKED_STRUCT struct dot11_quiet { + uint8 id; + uint8 len; + uint8 count; + uint8 period; + uint16 duration; + uint16 offset; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_quiet dot11_quiet_t; + +BWL_PRE_PACKED_STRUCT struct chan_map_tuple { + uint8 channel; + uint8 map; +} BWL_POST_PACKED_STRUCT; +typedef struct chan_map_tuple chan_map_tuple_t; + +BWL_PRE_PACKED_STRUCT struct dot11_ibss_dfs { + uint8 id; + uint8 len; + uint8 eaddr[ETHER_ADDR_LEN]; + uint8 interval; + chan_map_tuple_t map[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ibss_dfs dot11_ibss_dfs_t; + + +#define WME_OUI "\x00\x50\xf2" +#define WME_VER 1 +#define WME_TYPE 2 +#define WME_SUBTYPE_IE 0 +#define WME_SUBTYPE_PARAM_IE 1 +#define WME_SUBTYPE_TSPEC 2 + + +#define AC_BE 0 +#define AC_BK 1 +#define AC_VI 2 +#define AC_VO 3 +#define AC_COUNT 4 + +typedef uint8 ac_bitmap_t; + +#define AC_BITMAP_NONE 0x0 +#define AC_BITMAP_ALL 0xf +#define AC_BITMAP_TST(ab, ac) (((ab) & (1 << (ac))) != 0) +#define AC_BITMAP_SET(ab, ac) (((ab) |= (1 << (ac)))) +#define AC_BITMAP_RESET(ab, ac) (((ab) &= ~(1 << (ac)))) + + +BWL_PRE_PACKED_STRUCT struct wme_ie { + uint8 oui[3]; + uint8 type; + uint8 subtype; + uint8 version; + uint8 qosinfo; +} BWL_POST_PACKED_STRUCT; +typedef struct wme_ie wme_ie_t; +#define WME_IE_LEN 7 + +BWL_PRE_PACKED_STRUCT struct edcf_acparam { + uint8 ACI; + uint8 ECW; + uint16 TXOP; +} BWL_POST_PACKED_STRUCT; +typedef struct edcf_acparam edcf_acparam_t; + + +BWL_PRE_PACKED_STRUCT struct wme_param_ie { + uint8 oui[3]; + uint8 type; + uint8 subtype; + uint8 version; + uint8 qosinfo; + uint8 rsvd; + edcf_acparam_t acparam[AC_COUNT]; +} BWL_POST_PACKED_STRUCT; +typedef struct wme_param_ie wme_param_ie_t; +#define WME_PARAM_IE_LEN 24 + + +#define WME_QI_AP_APSD_MASK 0x80 +#define WME_QI_AP_APSD_SHIFT 7 +#define WME_QI_AP_COUNT_MASK 0x0f +#define WME_QI_AP_COUNT_SHIFT 0 + + +#define WME_QI_STA_MAXSPLEN_MASK 0x60 +#define WME_QI_STA_MAXSPLEN_SHIFT 5 +#define WME_QI_STA_APSD_ALL_MASK 0xf +#define WME_QI_STA_APSD_ALL_SHIFT 0 +#define WME_QI_STA_APSD_BE_MASK 0x8 +#define WME_QI_STA_APSD_BE_SHIFT 3 +#define WME_QI_STA_APSD_BK_MASK 0x4 +#define WME_QI_STA_APSD_BK_SHIFT 2 +#define WME_QI_STA_APSD_VI_MASK 0x2 +#define WME_QI_STA_APSD_VI_SHIFT 1 +#define WME_QI_STA_APSD_VO_MASK 0x1 +#define WME_QI_STA_APSD_VO_SHIFT 0 + + +#define EDCF_AIFSN_MIN 1 +#define EDCF_AIFSN_MAX 15 +#define EDCF_AIFSN_MASK 0x0f +#define EDCF_ACM_MASK 0x10 +#define EDCF_ACI_MASK 0x60 +#define EDCF_ACI_SHIFT 5 +#define EDCF_AIFSN_SHIFT 12 + + +#define EDCF_ECW_MIN 0 +#define EDCF_ECW_MAX 15 +#define EDCF_ECW2CW(exp) ((1 << (exp)) - 1) +#define EDCF_ECWMIN_MASK 0x0f +#define EDCF_ECWMAX_MASK 0xf0 +#define EDCF_ECWMAX_SHIFT 4 + + +#define EDCF_TXOP_MIN 0 +#define EDCF_TXOP_MAX 65535 +#define EDCF_TXOP2USEC(txop) ((txop) << 5) + + +#define NON_EDCF_AC_BE_ACI_STA 0x02 + + +#define EDCF_AC_BE_ACI_STA 0x03 +#define EDCF_AC_BE_ECW_STA 0xA4 +#define EDCF_AC_BE_TXOP_STA 0x0000 +#define EDCF_AC_BK_ACI_STA 0x27 +#define EDCF_AC_BK_ECW_STA 0xA4 +#define EDCF_AC_BK_TXOP_STA 0x0000 +#define EDCF_AC_VI_ACI_STA 0x42 +#define EDCF_AC_VI_ECW_STA 0x43 +#define EDCF_AC_VI_TXOP_STA 0x005e +#define EDCF_AC_VO_ACI_STA 0x62 +#define EDCF_AC_VO_ECW_STA 0x32 +#define EDCF_AC_VO_TXOP_STA 0x002f + + +#define EDCF_AC_BE_ACI_AP 0x03 +#define EDCF_AC_BE_ECW_AP 0x64 +#define EDCF_AC_BE_TXOP_AP 0x0000 +#define EDCF_AC_BK_ACI_AP 0x27 +#define EDCF_AC_BK_ECW_AP 0xA4 +#define EDCF_AC_BK_TXOP_AP 0x0000 +#define EDCF_AC_VI_ACI_AP 0x41 +#define EDCF_AC_VI_ECW_AP 0x43 +#define EDCF_AC_VI_TXOP_AP 0x005e +#define EDCF_AC_VO_ACI_AP 0x61 +#define EDCF_AC_VO_ECW_AP 0x32 +#define EDCF_AC_VO_TXOP_AP 0x002f + + +BWL_PRE_PACKED_STRUCT struct edca_param_ie { + uint8 qosinfo; + uint8 rsvd; + edcf_acparam_t acparam[AC_COUNT]; +} BWL_POST_PACKED_STRUCT; +typedef struct edca_param_ie edca_param_ie_t; +#define EDCA_PARAM_IE_LEN 18 + + +BWL_PRE_PACKED_STRUCT struct qos_cap_ie { + uint8 qosinfo; +} BWL_POST_PACKED_STRUCT; +typedef struct qos_cap_ie qos_cap_ie_t; + +BWL_PRE_PACKED_STRUCT struct dot11_qbss_load_ie { + uint8 id; + uint8 length; + uint16 station_count; + uint8 channel_utilization; + uint16 aac; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_qbss_load_ie dot11_qbss_load_ie_t; + + +#define FIXED_MSDU_SIZE 0x8000 +#define MSDU_SIZE_MASK 0x7fff + + + +#define INTEGER_SHIFT 13 +#define FRACTION_MASK 0x1FFF + + +BWL_PRE_PACKED_STRUCT struct dot11_management_notification { + uint8 category; + uint8 action; + uint8 token; + uint8 status; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +#define DOT11_MGMT_NOTIFICATION_LEN 4 + + +#define WME_ADDTS_REQUEST 0 +#define WME_ADDTS_RESPONSE 1 +#define WME_DELTS_REQUEST 2 + + +#define WME_ADMISSION_ACCEPTED 0 +#define WME_INVALID_PARAMETERS 1 +#define WME_ADMISSION_REFUSED 3 + + +#define BCN_PRB_SSID(body) ((char*)(body) + DOT11_BCN_PRB_LEN) + + +#define DOT11_OPEN_SYSTEM 0 +#define DOT11_SHARED_KEY 1 + +#define DOT11_OPEN_SHARED 2 +#define DOT11_CHALLENGE_LEN 128 + + +#define FC_PVER_MASK 0x3 +#define FC_PVER_SHIFT 0 +#define FC_TYPE_MASK 0xC +#define FC_TYPE_SHIFT 2 +#define FC_SUBTYPE_MASK 0xF0 +#define FC_SUBTYPE_SHIFT 4 +#define FC_TODS 0x100 +#define FC_TODS_SHIFT 8 +#define FC_FROMDS 0x200 +#define FC_FROMDS_SHIFT 9 +#define FC_MOREFRAG 0x400 +#define FC_MOREFRAG_SHIFT 10 +#define FC_RETRY 0x800 +#define FC_RETRY_SHIFT 11 +#define FC_PM 0x1000 +#define FC_PM_SHIFT 12 +#define FC_MOREDATA 0x2000 +#define FC_MOREDATA_SHIFT 13 +#define FC_WEP 0x4000 +#define FC_WEP_SHIFT 14 +#define FC_ORDER 0x8000 +#define FC_ORDER_SHIFT 15 + + +#define SEQNUM_SHIFT 4 +#define SEQNUM_MAX 0x1000 +#define FRAGNUM_MASK 0xF + + + + +#define FC_TYPE_MNG 0 +#define FC_TYPE_CTL 1 +#define FC_TYPE_DATA 2 + + +#define FC_SUBTYPE_ASSOC_REQ 0 +#define FC_SUBTYPE_ASSOC_RESP 1 +#define FC_SUBTYPE_REASSOC_REQ 2 +#define FC_SUBTYPE_REASSOC_RESP 3 +#define FC_SUBTYPE_PROBE_REQ 4 +#define FC_SUBTYPE_PROBE_RESP 5 +#define FC_SUBTYPE_BEACON 8 +#define FC_SUBTYPE_ATIM 9 +#define FC_SUBTYPE_DISASSOC 10 +#define FC_SUBTYPE_AUTH 11 +#define FC_SUBTYPE_DEAUTH 12 +#define FC_SUBTYPE_ACTION 13 +#define FC_SUBTYPE_ACTION_NOACK 14 + + +#define FC_SUBTYPE_CTL_WRAPPER 7 +#define FC_SUBTYPE_BLOCKACK_REQ 8 +#define FC_SUBTYPE_BLOCKACK 9 +#define FC_SUBTYPE_PS_POLL 10 +#define FC_SUBTYPE_RTS 11 +#define FC_SUBTYPE_CTS 12 +#define FC_SUBTYPE_ACK 13 +#define FC_SUBTYPE_CF_END 14 +#define FC_SUBTYPE_CF_END_ACK 15 + + +#define FC_SUBTYPE_DATA 0 +#define FC_SUBTYPE_DATA_CF_ACK 1 +#define FC_SUBTYPE_DATA_CF_POLL 2 +#define FC_SUBTYPE_DATA_CF_ACK_POLL 3 +#define FC_SUBTYPE_NULL 4 +#define FC_SUBTYPE_CF_ACK 5 +#define FC_SUBTYPE_CF_POLL 6 +#define FC_SUBTYPE_CF_ACK_POLL 7 +#define FC_SUBTYPE_QOS_DATA 8 +#define FC_SUBTYPE_QOS_DATA_CF_ACK 9 +#define FC_SUBTYPE_QOS_DATA_CF_POLL 10 +#define FC_SUBTYPE_QOS_DATA_CF_ACK_POLL 11 +#define FC_SUBTYPE_QOS_NULL 12 +#define FC_SUBTYPE_QOS_CF_POLL 14 +#define FC_SUBTYPE_QOS_CF_ACK_POLL 15 + + +#define FC_SUBTYPE_ANY_QOS(s) (((s) & 8) != 0) +#define FC_SUBTYPE_ANY_NULL(s) (((s) & 4) != 0) +#define FC_SUBTYPE_ANY_CF_POLL(s) (((s) & 2) != 0) +#define FC_SUBTYPE_ANY_CF_ACK(s) (((s) & 1) != 0) + + +#define FC_KIND_MASK (FC_TYPE_MASK | FC_SUBTYPE_MASK) + +#define FC_KIND(t, s) (((t) << FC_TYPE_SHIFT) | ((s) << FC_SUBTYPE_SHIFT)) + +#define FC_SUBTYPE(fc) (((fc) & FC_SUBTYPE_MASK) >> FC_SUBTYPE_SHIFT) +#define FC_TYPE(fc) (((fc) & FC_TYPE_MASK) >> FC_TYPE_SHIFT) + +#define FC_ASSOC_REQ FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ASSOC_REQ) +#define FC_ASSOC_RESP FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ASSOC_RESP) +#define FC_REASSOC_REQ FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_REASSOC_REQ) +#define FC_REASSOC_RESP FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_REASSOC_RESP) +#define FC_PROBE_REQ FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_PROBE_REQ) +#define FC_PROBE_RESP FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_PROBE_RESP) +#define FC_BEACON FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_BEACON) +#define FC_DISASSOC FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_DISASSOC) +#define FC_AUTH FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_AUTH) +#define FC_DEAUTH FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_DEAUTH) +#define FC_ACTION FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ACTION) +#define FC_ACTION_NOACK FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ACTION_NOACK) + +#define FC_CTL_WRAPPER FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CTL_WRAPPER) +#define FC_BLOCKACK_REQ FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_BLOCKACK_REQ) +#define FC_BLOCKACK FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_BLOCKACK) +#define FC_PS_POLL FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_PS_POLL) +#define FC_RTS FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_RTS) +#define FC_CTS FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CTS) +#define FC_ACK FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_ACK) +#define FC_CF_END FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CF_END) +#define FC_CF_END_ACK FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CF_END_ACK) + +#define FC_DATA FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_DATA) +#define FC_NULL_DATA FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_NULL) +#define FC_DATA_CF_ACK FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_DATA_CF_ACK) +#define FC_QOS_DATA FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_QOS_DATA) +#define FC_QOS_NULL FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_QOS_NULL) + + + + +#define QOS_PRIO_SHIFT 0 +#define QOS_PRIO_MASK 0x0007 +#define QOS_PRIO(qos) (((qos) & QOS_PRIO_MASK) >> QOS_PRIO_SHIFT) + + +#define QOS_TID_SHIFT 0 +#define QOS_TID_MASK 0x000f +#define QOS_TID(qos) (((qos) & QOS_TID_MASK) >> QOS_TID_SHIFT) + + +#define QOS_EOSP_SHIFT 4 +#define QOS_EOSP_MASK 0x0010 +#define QOS_EOSP(qos) (((qos) & QOS_EOSP_MASK) >> QOS_EOSP_SHIFT) + + +#define QOS_ACK_NORMAL_ACK 0 +#define QOS_ACK_NO_ACK 1 +#define QOS_ACK_NO_EXP_ACK 2 +#define QOS_ACK_BLOCK_ACK 3 +#define QOS_ACK_SHIFT 5 +#define QOS_ACK_MASK 0x0060 +#define QOS_ACK(qos) (((qos) & QOS_ACK_MASK) >> QOS_ACK_SHIFT) + + +#define QOS_AMSDU_SHIFT 7 +#define QOS_AMSDU_MASK 0x0080 + + + + + + +#define DOT11_MNG_AUTH_ALGO_LEN 2 +#define DOT11_MNG_AUTH_SEQ_LEN 2 +#define DOT11_MNG_BEACON_INT_LEN 2 +#define DOT11_MNG_CAP_LEN 2 +#define DOT11_MNG_AP_ADDR_LEN 6 +#define DOT11_MNG_LISTEN_INT_LEN 2 +#define DOT11_MNG_REASON_LEN 2 +#define DOT11_MNG_AID_LEN 2 +#define DOT11_MNG_STATUS_LEN 2 +#define DOT11_MNG_TIMESTAMP_LEN 8 + + +#define DOT11_AID_MASK 0x3fff + + +#define DOT11_RC_RESERVED 0 +#define DOT11_RC_UNSPECIFIED 1 +#define DOT11_RC_AUTH_INVAL 2 +#define DOT11_RC_DEAUTH_LEAVING 3 +#define DOT11_RC_INACTIVITY 4 +#define DOT11_RC_BUSY 5 +#define DOT11_RC_INVAL_CLASS_2 6 +#define DOT11_RC_INVAL_CLASS_3 7 +#define DOT11_RC_DISASSOC_LEAVING 8 +#define DOT11_RC_NOT_AUTH 9 +#define DOT11_RC_BAD_PC 10 +#define DOT11_RC_BAD_CHANNELS 11 + + + +#define DOT11_RC_UNSPECIFIED_QOS 32 +#define DOT11_RC_INSUFFCIENT_BW 33 +#define DOT11_RC_EXCESSIVE_FRAMES 34 +#define DOT11_RC_TX_OUTSIDE_TXOP 35 +#define DOT11_RC_LEAVING_QBSS 36 +#define DOT11_RC_BAD_MECHANISM 37 +#define DOT11_RC_SETUP_NEEDED 38 +#define DOT11_RC_TIMEOUT 39 + +#define DOT11_RC_MAX 23 + + +#define DOT11_SC_SUCCESS 0 +#define DOT11_SC_FAILURE 1 +#define DOT11_SC_CAP_MISMATCH 10 +#define DOT11_SC_REASSOC_FAIL 11 +#define DOT11_SC_ASSOC_FAIL 12 +#define DOT11_SC_AUTH_MISMATCH 13 +#define DOT11_SC_AUTH_SEQ 14 +#define DOT11_SC_AUTH_CHALLENGE_FAIL 15 +#define DOT11_SC_AUTH_TIMEOUT 16 +#define DOT11_SC_ASSOC_BUSY_FAIL 17 +#define DOT11_SC_ASSOC_RATE_MISMATCH 18 +#define DOT11_SC_ASSOC_SHORT_REQUIRED 19 +#define DOT11_SC_ASSOC_PBCC_REQUIRED 20 +#define DOT11_SC_ASSOC_AGILITY_REQUIRED 21 +#define DOT11_SC_ASSOC_SPECTRUM_REQUIRED 22 +#define DOT11_SC_ASSOC_BAD_POWER_CAP 23 +#define DOT11_SC_ASSOC_BAD_SUP_CHANNELS 24 +#define DOT11_SC_ASSOC_SHORTSLOT_REQUIRED 25 +#define DOT11_SC_ASSOC_ERPBCC_REQUIRED 26 +#define DOT11_SC_ASSOC_DSSOFDM_REQUIRED 27 + +#define DOT11_SC_DECLINED 37 +#define DOT11_SC_INVALID_PARAMS 38 + + +#define DOT11_MNG_DS_PARAM_LEN 1 +#define DOT11_MNG_IBSS_PARAM_LEN 2 + + +#define DOT11_MNG_TIM_FIXED_LEN 3 +#define DOT11_MNG_TIM_DTIM_COUNT 0 +#define DOT11_MNG_TIM_DTIM_PERIOD 1 +#define DOT11_MNG_TIM_BITMAP_CTL 2 +#define DOT11_MNG_TIM_PVB 3 + + +#define TLV_TAG_OFF 0 +#define TLV_LEN_OFF 1 +#define TLV_HDR_LEN 2 +#define TLV_BODY_OFF 2 + + +#define DOT11_MNG_SSID_ID 0 +#define DOT11_MNG_RATES_ID 1 +#define DOT11_MNG_FH_PARMS_ID 2 +#define DOT11_MNG_DS_PARMS_ID 3 +#define DOT11_MNG_CF_PARMS_ID 4 +#define DOT11_MNG_TIM_ID 5 +#define DOT11_MNG_IBSS_PARMS_ID 6 +#define DOT11_MNG_COUNTRY_ID 7 +#define DOT11_MNG_HOPPING_PARMS_ID 8 +#define DOT11_MNG_HOPPING_TABLE_ID 9 +#define DOT11_MNG_REQUEST_ID 10 +#define DOT11_MNG_QBSS_LOAD_ID 11 +#define DOT11_MNG_EDCA_PARAM_ID 12 +#define DOT11_MNG_CHALLENGE_ID 16 +#define DOT11_MNG_PWR_CONSTRAINT_ID 32 +#define DOT11_MNG_PWR_CAP_ID 33 +#define DOT11_MNG_TPC_REQUEST_ID 34 +#define DOT11_MNG_TPC_REPORT_ID 35 +#define DOT11_MNG_SUPP_CHANNELS_ID 36 +#define DOT11_MNG_CHANNEL_SWITCH_ID 37 +#define DOT11_MNG_MEASURE_REQUEST_ID 38 +#define DOT11_MNG_MEASURE_REPORT_ID 39 +#define DOT11_MNG_QUIET_ID 40 +#define DOT11_MNG_IBSS_DFS_ID 41 +#define DOT11_MNG_ERP_ID 42 +#define DOT11_MNG_TS_DELAY_ID 43 +#define DOT11_MNG_HT_CAP 45 +#define DOT11_MNG_QOS_CAP_ID 46 +#define DOT11_MNG_NONERP_ID 47 +#define DOT11_MNG_RSN_ID 48 +#define DOT11_MNG_EXT_RATES_ID 50 +#define DOT11_MNG_REGCLASS_ID 59 +#define DOT11_MNG_EXT_CSA_ID 60 +#define DOT11_MNG_HT_ADD 61 +#define DOT11_MNG_EXT_CHANNEL_OFFSET 62 +#define DOT11_MNG_WAPI_ID 68 +#define DOT11_MNG_HT_BSS_COEXINFO_ID 72 +#define DOT11_MNG_HT_BSS_CHANNEL_REPORT_ID 73 +#define DOT11_MNG_HT_OBSS_ID 74 +#define DOT11_MNG_EXT_CAP 127 +#define DOT11_MNG_WPA_ID 221 +#define DOT11_MNG_PROPR_ID 221 + + +#define DOT11_RATE_BASIC 0x80 +#define DOT11_RATE_MASK 0x7F + + +#define DOT11_MNG_ERP_LEN 1 +#define DOT11_MNG_NONERP_PRESENT 0x01 +#define DOT11_MNG_USE_PROTECTION 0x02 +#define DOT11_MNG_BARKER_PREAMBLE 0x04 + +#define DOT11_MGN_TS_DELAY_LEN 4 +#define TS_DELAY_FIELD_SIZE 4 + + +#define DOT11_CAP_ESS 0x0001 +#define DOT11_CAP_IBSS 0x0002 +#define DOT11_CAP_POLLABLE 0x0004 +#define DOT11_CAP_POLL_RQ 0x0008 +#define DOT11_CAP_PRIVACY 0x0010 +#define DOT11_CAP_SHORT 0x0020 +#define DOT11_CAP_PBCC 0x0040 +#define DOT11_CAP_AGILITY 0x0080 +#define DOT11_CAP_SPECTRUM 0x0100 +#define DOT11_CAP_SHORTSLOT 0x0400 +#define DOT11_CAP_CCK_OFDM 0x2000 + + +#define DOT11_OBSS_COEX_MNG_SUPPORT 0x01 + + +#define DOT11_ACTION_HDR_LEN 2 +#define DOT11_ACTION_CAT_ERR_MASK 0x80 +#define DOT11_ACTION_CAT_MASK 0x7F +#define DOT11_ACTION_CAT_SPECT_MNG 0 +#define DOT11_ACTION_CAT_BLOCKACK 3 +#define DOT11_ACTION_CAT_PUBLIC 4 +#define DOT11_ACTION_CAT_HT 7 +#define DOT11_ACTION_CAT_VS 127 +#define DOT11_ACTION_NOTIFICATION 0x11 + +#define DOT11_ACTION_ID_M_REQ 0 +#define DOT11_ACTION_ID_M_REP 1 +#define DOT11_ACTION_ID_TPC_REQ 2 +#define DOT11_ACTION_ID_TPC_REP 3 +#define DOT11_ACTION_ID_CHANNEL_SWITCH 4 +#define DOT11_ACTION_ID_EXT_CSA 5 + + +#define DOT11_ACTION_ID_HT_CH_WIDTH 0 +#define DOT11_ACTION_ID_HT_MIMO_PS 1 + + +#define DOT11_PUB_ACTION_BSS_COEX_MNG 0 +#define DOT11_PUB_ACTION_CHANNEL_SWITCH 4 + + +#define DOT11_BA_ACTION_ADDBA_REQ 0 +#define DOT11_BA_ACTION_ADDBA_RESP 1 +#define DOT11_BA_ACTION_DELBA 2 + + +#define DOT11_ADDBA_PARAM_AMSDU_SUP 0x0001 +#define DOT11_ADDBA_PARAM_POLICY_MASK 0x0002 +#define DOT11_ADDBA_PARAM_POLICY_SHIFT 1 +#define DOT11_ADDBA_PARAM_TID_MASK 0x003c +#define DOT11_ADDBA_PARAM_TID_SHIFT 2 +#define DOT11_ADDBA_PARAM_BSIZE_MASK 0xffc0 +#define DOT11_ADDBA_PARAM_BSIZE_SHIFT 6 + +#define DOT11_ADDBA_POLICY_DELAYED 0 +#define DOT11_ADDBA_POLICY_IMMEDIATE 1 + +BWL_PRE_PACKED_STRUCT struct dot11_addba_req { + uint8 category; + uint8 action; + uint8 token; + uint16 addba_param_set; + uint16 timeout; + uint16 start_seqnum; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_addba_req dot11_addba_req_t; +#define DOT11_ADDBA_REQ_LEN 9 + +BWL_PRE_PACKED_STRUCT struct dot11_addba_resp { + uint8 category; + uint8 action; + uint8 token; + uint16 status; + uint16 addba_param_set; + uint16 timeout; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_addba_resp dot11_addba_resp_t; +#define DOT11_ADDBA_RESP_LEN 9 + + +#define DOT11_DELBA_PARAM_INIT_MASK 0x0800 +#define DOT11_DELBA_PARAM_INIT_SHIFT 11 +#define DOT11_DELBA_PARAM_TID_MASK 0xf000 +#define DOT11_DELBA_PARAM_TID_SHIFT 12 + +BWL_PRE_PACKED_STRUCT struct dot11_delba { + uint8 category; + uint8 action; + uint16 delba_param_set; + uint16 reason; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_delba dot11_delba_t; +#define DOT11_DELBA_LEN 6 + + +#define DOT11_BSSTYPE_INFRASTRUCTURE 0 +#define DOT11_BSSTYPE_INDEPENDENT 1 +#define DOT11_BSSTYPE_ANY 2 +#define DOT11_SCANTYPE_ACTIVE 0 +#define DOT11_SCANTYPE_PASSIVE 1 + + +#define PREN_PREAMBLE 24 +#define PREN_MM_EXT 8 +#define PREN_PREAMBLE_EXT 4 + + +#define NPHY_RIFS_TIME 2 + + +#define APHY_SLOT_TIME 9 +#define APHY_SIFS_TIME 16 +#define APHY_DIFS_TIME (APHY_SIFS_TIME + (2 * APHY_SLOT_TIME)) +#define APHY_PREAMBLE_TIME 16 +#define APHY_SIGNAL_TIME 4 +#define APHY_SYMBOL_TIME 4 +#define APHY_SERVICE_NBITS 16 +#define APHY_TAIL_NBITS 6 +#define APHY_CWMIN 15 + + +#define BPHY_SLOT_TIME 20 +#define BPHY_SIFS_TIME 10 +#define BPHY_DIFS_TIME 50 +#define BPHY_PLCP_TIME 192 +#define BPHY_PLCP_SHORT_TIME 96 +#define BPHY_CWMIN 31 + + +#define DOT11_OFDM_SIGNAL_EXTENSION 6 + +#define PHY_CWMAX 1023 + +#define DOT11_MAXNUMFRAGS 16 + + +typedef struct d11cnt { + uint32 txfrag; + uint32 txmulti; + uint32 txfail; + uint32 txretry; + uint32 txretrie; + uint32 rxdup; + uint32 txrts; + uint32 txnocts; + uint32 txnoack; + uint32 rxfrag; + uint32 rxmulti; + uint32 rxcrc; + uint32 txfrmsnt; + uint32 rxundec; +} d11cnt_t; + + +#define BRCM_PROP_OUI "\x00\x90\x4C" + + + + +BWL_PRE_PACKED_STRUCT struct brcm_prop_ie_s { + uint8 id; + uint8 len; + uint8 oui[3]; + uint8 type; + uint16 cap; +} BWL_POST_PACKED_STRUCT; +typedef struct brcm_prop_ie_s brcm_prop_ie_t; + +#define BRCM_PROP_IE_LEN 6 + +#define DPT_IE_TYPE 2 + + +#define BRCM_OUI "\x00\x10\x18" + + +BWL_PRE_PACKED_STRUCT struct brcm_ie { + uint8 id; + uint8 len; + uint8 oui[3]; + uint8 ver; + uint8 assoc; + uint8 flags; + uint8 flags1; + uint16 amsdu_mtu_pref; +} BWL_POST_PACKED_STRUCT; +typedef struct brcm_ie brcm_ie_t; +#define BRCM_IE_LEN 11 +#define BRCM_IE_VER 2 +#define BRCM_IE_LEGACY_AES_VER 1 + + +#ifdef WLAFTERBURNER +#define BRF_ABCAP 0x1 +#define BRF_ABRQRD 0x2 +#define BRF_ABCOUNTER_MASK 0xf0 +#define BRF_ABCOUNTER_SHIFT 4 +#endif +#define BRF_LZWDS 0x4 +#define BRF_BLOCKACK 0x8 + + +#define BRF1_AMSDU 0x1 +#define BRF1_WMEPS 0x4 +#define BRF1_PSOFIX 0x8 + +#ifdef WLAFTERBURNER +#define AB_WDS_TIMEOUT_MAX 15 +#define AB_WDS_TIMEOUT_MIN 1 +#endif + +#define AB_GUARDCOUNT 10 + +#define MCSSET_LEN 16 +#define MAX_MCS_NUM (128) + +BWL_PRE_PACKED_STRUCT struct ht_cap_ie { + uint16 cap; + uint8 params; + uint8 supp_mcs[MCSSET_LEN]; + uint16 ext_htcap; + uint32 txbf_cap; + uint8 as_cap; +} BWL_POST_PACKED_STRUCT; +typedef struct ht_cap_ie ht_cap_ie_t; + + + +BWL_PRE_PACKED_STRUCT struct ht_prop_cap_ie { + uint8 id; + uint8 len; + uint8 oui[3]; + uint8 type; + ht_cap_ie_t cap_ie; +} BWL_POST_PACKED_STRUCT; +typedef struct ht_prop_cap_ie ht_prop_cap_ie_t; +#define HT_PROP_IE_OVERHEAD 4 +#define HT_CAP_IE_LEN 26 +#define HT_CAP_IE_TYPE 51 + +#define HT_CAP_LDPC_CODING 0x0001 +#define HT_CAP_40MHZ 0x0002 +#define HT_CAP_MIMO_PS_MASK 0x000C +#define HT_CAP_MIMO_PS_SHIFT 0x0002 +#define HT_CAP_MIMO_PS_OFF 0x0003 +#define HT_CAP_MIMO_PS_RTS 0x0001 +#define HT_CAP_MIMO_PS_ON 0x0000 +#define HT_CAP_GF 0x0010 +#define HT_CAP_SHORT_GI_20 0x0020 +#define HT_CAP_SHORT_GI_40 0x0040 +#define HT_CAP_TX_STBC 0x0080 +#define HT_CAP_RX_STBC_MASK 0x0300 +#define HT_CAP_RX_STBC_SHIFT 8 +#define HT_CAP_DELAYED_BA 0x0400 +#define HT_CAP_MAX_AMSDU 0x0800 +#define HT_CAP_DSSS_CCK 0x1000 +#define HT_CAP_PSMP 0x2000 +#define HT_CAP_40MHZ_INTOLERANT 0x4000 +#define HT_CAP_LSIG_TXOP 0x8000 + +#define HT_CAP_RX_STBC_NO 0x0 +#define HT_CAP_RX_STBC_ONE_STREAM 0x1 +#define HT_CAP_RX_STBC_TWO_STREAM 0x2 +#define HT_CAP_RX_STBC_THREE_STREAM 0x3 + +#define HT_MAX_AMSDU 7935 +#define HT_MIN_AMSDU 3835 + +#define HT_PARAMS_RX_FACTOR_MASK 0x03 +#define HT_PARAMS_DENSITY_MASK 0x1C +#define HT_PARAMS_DENSITY_SHIFT 2 + + +#define AMPDU_MAX_MPDU_DENSITY 7 +#define AMPDU_RX_FACTOR_64K 3 +#define AMPDU_RX_FACTOR_BASE 8*1024 +#define AMPDU_DELIMITER_LEN 4 + +#define HT_CAP_EXT_PCO 0x0001 +#define HT_CAP_EXT_PCO_TTIME_MASK 0x0006 +#define HT_CAP_EXT_PCO_TTIME_SHIFT 1 +#define HT_CAP_EXT_MCS_FEEDBACK_MASK 0x0300 +#define HT_CAP_EXT_MCS_FEEDBACK_SHIFT 8 +#define HT_CAP_EXT_HTC 0x0400 +#define HT_CAP_EXT_RD_RESP 0x0800 + +BWL_PRE_PACKED_STRUCT struct ht_add_ie { + uint8 ctl_ch; + uint8 byte1; + uint16 opmode; + uint16 misc_bits; + uint8 basic_mcs[MCSSET_LEN]; +} BWL_POST_PACKED_STRUCT; +typedef struct ht_add_ie ht_add_ie_t; + + + +BWL_PRE_PACKED_STRUCT struct ht_prop_add_ie { + uint8 id; + uint8 len; + uint8 oui[3]; + uint8 type; + ht_add_ie_t add_ie; +} BWL_POST_PACKED_STRUCT; +typedef struct ht_prop_add_ie ht_prop_add_ie_t; + +#define HT_ADD_IE_LEN 22 +#define HT_ADD_IE_TYPE 52 + + +#define HT_BW_ANY 0x04 +#define HT_RIFS_PERMITTED 0x08 + + +#define HT_OPMODE_MASK 0x0003 +#define HT_OPMODE_SHIFT 0 +#define HT_OPMODE_PURE 0x0000 +#define HT_OPMODE_OPTIONAL 0x0001 +#define HT_OPMODE_HT20IN40 0x0002 +#define HT_OPMODE_MIXED 0x0003 +#define HT_OPMODE_NONGF 0x0004 +#define DOT11N_TXBURST 0x0008 +#define DOT11N_OBSS_NONHT 0x0010 + + +#define HT_BASIC_STBC_MCS 0x007f +#define HT_DUAL_STBC_PROT 0x0080 +#define HT_SECOND_BCN 0x0100 +#define HT_LSIG_TXOP 0x0200 +#define HT_PCO_ACTIVE 0x0400 +#define HT_PCO_PHASE 0x0800 +#define HT_DUALCTS_PROTECTION 0x0080 + + +#define DOT11N_2G_TXBURST_LIMIT 6160 +#define DOT11N_5G_TXBURST_LIMIT 3080 + + +#define GET_HT_OPMODE(add_ie) ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \ + >> HT_OPMODE_SHIFT) +#define HT_MIXEDMODE_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \ + == HT_OPMODE_MIXED) +#define HT_HT20_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \ + == HT_OPMODE_HT20IN40) +#define HT_OPTIONAL_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \ + == HT_OPMODE_OPTIONAL) +#define HT_USE_PROTECTION(add_ie) (HT_HT20_PRESENT((add_ie)) || \ + HT_MIXEDMODE_PRESENT((add_ie))) +#define HT_NONGF_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_NONGF) \ + == HT_OPMODE_NONGF) +#define DOT11N_TXBURST_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & DOT11N_TXBURST) \ + == DOT11N_TXBURST) +#define DOT11N_OBSS_NONHT_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & DOT11N_OBSS_NONHT) \ + == DOT11N_OBSS_NONHT) + +BWL_PRE_PACKED_STRUCT struct obss_params { + uint16 passive_dwell; + uint16 active_dwell; + uint16 bss_widthscan_interval; + uint16 passive_total; + uint16 active_total; + uint16 chanwidth_transition_dly; + uint16 activity_threshold; +} BWL_POST_PACKED_STRUCT; +typedef struct obss_params obss_params_t; + +BWL_PRE_PACKED_STRUCT struct dot11_obss_ie { + uint8 id; + uint8 len; + obss_params_t obss_params; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_obss_ie dot11_obss_ie_t; +#define DOT11_OBSS_SCAN_IE_LEN sizeof(obss_params_t) + + +BWL_PRE_PACKED_STRUCT struct vndr_ie { + uchar id; + uchar len; + uchar oui [3]; + uchar data [1]; +} BWL_POST_PACKED_STRUCT; +typedef struct vndr_ie vndr_ie_t; + +#define VNDR_IE_HDR_LEN 2 +#define VNDR_IE_MIN_LEN 3 +#define VNDR_IE_MAX_LEN 256 + + +#define WPA_VERSION 1 +#define WPA_OUI "\x00\x50\xF2" + +#define WPA2_VERSION 1 +#define WPA2_VERSION_LEN 2 +#define WPA2_OUI "\x00\x0F\xAC" + +#define WPA_OUI_LEN 3 + + +#define RSN_AKM_NONE 0 +#define RSN_AKM_UNSPECIFIED 1 +#define RSN_AKM_PSK 2 + + +#define DOT11_MAX_DEFAULT_KEYS 4 +#define DOT11_MAX_KEY_SIZE 32 +#define DOT11_MAX_IV_SIZE 16 +#define DOT11_EXT_IV_FLAG (1<<5) +#define DOT11_WPA_KEY_RSC_LEN 8 + +#define WEP1_KEY_SIZE 5 +#define WEP1_KEY_HEX_SIZE 10 +#define WEP128_KEY_SIZE 13 +#define WEP128_KEY_HEX_SIZE 26 +#define TKIP_MIC_SIZE 8 +#define TKIP_EOM_SIZE 7 +#define TKIP_EOM_FLAG 0x5a +#define TKIP_KEY_SIZE 32 +#define TKIP_MIC_AUTH_TX 16 +#define TKIP_MIC_AUTH_RX 24 +#define TKIP_MIC_SUP_RX TKIP_MIC_AUTH_TX +#define TKIP_MIC_SUP_TX TKIP_MIC_AUTH_RX +#define AES_KEY_SIZE 16 +#define AES_MIC_SIZE 8 + +#define SMS4_KEY_LEN 16 +#define SMS4_WPI_CBC_MAC_LEN 16 + + +#include <packed_section_end.h> + + +#endif diff --git a/drivers/net/wireless/bcm4329/include/proto/802.11e.h b/drivers/net/wireless/bcm4329/include/proto/802.11e.h new file mode 100644 index 000000000000..1dd6f45b1ed8 --- /dev/null +++ b/drivers/net/wireless/bcm4329/include/proto/802.11e.h @@ -0,0 +1,131 @@ +/* + * 802.11e protocol header file + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: 802.11e.h,v 1.5.56.1 2008/11/20 00:51:18 Exp $ + */ + +#ifndef _802_11e_H_ +#define _802_11e_H_ + +#ifndef _TYPEDEFS_H_ +#include <typedefs.h> +#endif + +/* This marks the start of a packed structure section. */ +#include <packed_section_start.h> + + +/* WME Traffic Specification (TSPEC) element */ +#define WME_TSPEC_HDR_LEN 2 /* WME TSPEC header length */ +#define WME_TSPEC_BODY_OFF 2 /* WME TSPEC body offset */ + +#define WME_CATEGORY_CODE_OFFSET 0 /* WME Category code offset */ +#define WME_ACTION_CODE_OFFSET 1 /* WME Action code offset */ +#define WME_TOKEN_CODE_OFFSET 2 /* WME Token code offset */ +#define WME_STATUS_CODE_OFFSET 3 /* WME Status code offset */ + +BWL_PRE_PACKED_STRUCT struct tsinfo { + uint8 octets[3]; +} BWL_POST_PACKED_STRUCT; + +typedef struct tsinfo tsinfo_t; + +/* 802.11e TSPEC IE */ +typedef BWL_PRE_PACKED_STRUCT struct tspec { + uint8 oui[DOT11_OUI_LEN]; /* WME_OUI */ + uint8 type; /* WME_TYPE */ + uint8 subtype; /* WME_SUBTYPE_TSPEC */ + uint8 version; /* WME_VERSION */ + tsinfo_t tsinfo; /* TS Info bit field */ + uint16 nom_msdu_size; /* (Nominal or fixed) MSDU Size (bytes) */ + uint16 max_msdu_size; /* Maximum MSDU Size (bytes) */ + uint32 min_srv_interval; /* Minimum Service Interval (us) */ + uint32 max_srv_interval; /* Maximum Service Interval (us) */ + uint32 inactivity_interval; /* Inactivity Interval (us) */ + uint32 suspension_interval; /* Suspension Interval (us) */ + uint32 srv_start_time; /* Service Start Time (us) */ + uint32 min_data_rate; /* Minimum Data Rate (bps) */ + uint32 mean_data_rate; /* Mean Data Rate (bps) */ + uint32 peak_data_rate; /* Peak Data Rate (bps) */ + uint32 max_burst_size; /* Maximum Burst Size (bytes) */ + uint32 delay_bound; /* Delay Bound (us) */ + uint32 min_phy_rate; /* Minimum PHY Rate (bps) */ + uint16 surplus_bw; /* Surplus Bandwidth Allowance (range 1.0-8.0) */ + uint16 medium_time; /* Medium Time (32 us/s periods) */ +} BWL_POST_PACKED_STRUCT tspec_t; + +#define WME_TSPEC_LEN (sizeof(tspec_t)) /* not including 2-bytes of header */ + +/* ts_info */ +/* 802.1D priority is duplicated - bits 13-11 AND bits 3-1 */ +#define TS_INFO_TID_SHIFT 1 /* TS info. TID shift */ +#define TS_INFO_TID_MASK (0xf << TS_INFO_TID_SHIFT) /* TS info. TID mask */ +#define TS_INFO_CONTENTION_SHIFT 7 /* TS info. contention shift */ +#define TS_INFO_CONTENTION_MASK (0x1 << TS_INFO_CONTENTION_SHIFT) /* TS info. contention mask */ +#define TS_INFO_DIRECTION_SHIFT 5 /* TS info. direction shift */ +#define TS_INFO_DIRECTION_MASK (0x3 << TS_INFO_DIRECTION_SHIFT) /* TS info. direction mask */ +#define TS_INFO_PSB_SHIFT 2 /* TS info. PSB bit Shift */ +#define TS_INFO_PSB_MASK (1 << TS_INFO_PSB_SHIFT) /* TS info. PSB mask */ +#define TS_INFO_UPLINK (0 << TS_INFO_DIRECTION_SHIFT) /* TS info. uplink */ +#define TS_INFO_DOWNLINK (1 << TS_INFO_DIRECTION_SHIFT) /* TS info. downlink */ +#define TS_INFO_BIDIRECTIONAL (3 << TS_INFO_DIRECTION_SHIFT) /* TS info. bidirectional */ +#define TS_INFO_USER_PRIO_SHIFT 3 /* TS info. user priority shift */ +/* TS info. user priority mask */ +#define TS_INFO_USER_PRIO_MASK (0x7 << TS_INFO_USER_PRIO_SHIFT) + +/* Macro to get/set bit(s) field in TSINFO */ +#define WLC_CAC_GET_TID(pt) ((((pt).octets[0]) & TS_INFO_TID_MASK) >> TS_INFO_TID_SHIFT) +#define WLC_CAC_GET_DIR(pt) ((((pt).octets[0]) & \ + TS_INFO_DIRECTION_MASK) >> TS_INFO_DIRECTION_SHIFT) +#define WLC_CAC_GET_PSB(pt) ((((pt).octets[1]) & TS_INFO_PSB_MASK) >> TS_INFO_PSB_SHIFT) +#define WLC_CAC_GET_USER_PRIO(pt) ((((pt).octets[1]) & \ + TS_INFO_USER_PRIO_MASK) >> TS_INFO_USER_PRIO_SHIFT) + +#define WLC_CAC_SET_TID(pt, id) ((((pt).octets[0]) & (~TS_INFO_TID_MASK)) | \ + ((id) << TS_INFO_TID_SHIFT)) +#define WLC_CAC_SET_USER_PRIO(pt, prio) ((((pt).octets[0]) & (~TS_INFO_USER_PRIO_MASK)) | \ + ((prio) << TS_INFO_USER_PRIO_SHIFT)) + +/* 802.11e QBSS Load IE */ +#define QBSS_LOAD_IE_LEN 5 /* QBSS Load IE length */ +#define QBSS_LOAD_AAC_OFF 3 /* AAC offset in IE */ + +#define CAC_ADDTS_RESP_TIMEOUT 300 /* default ADDTS response timeout in ms */ + +/* 802.11e ADDTS status code */ +#define DOT11E_STATUS_ADMISSION_ACCEPTED 0 /* TSPEC Admission accepted status */ +#define DOT11E_STATUS_ADDTS_INVALID_PARAM 1 /* TSPEC invalid parameter status */ +#define DOT11E_STATUS_ADDTS_REFUSED_NSBW 3 /* ADDTS refused (non-sufficient BW) */ +#define DOT11E_STATUS_ADDTS_REFUSED_AWHILE 47 /* ADDTS refused but could retry later */ + +/* 802.11e DELTS status code */ +#define DOT11E_STATUS_QSTA_LEAVE_QBSS 36 /* STA leave QBSS */ +#define DOT11E_STATUS_END_TS 37 /* END TS */ +#define DOT11E_STATUS_UNKNOWN_TS 38 /* UNKNOWN TS */ +#define DOT11E_STATUS_QSTA_REQ_TIMEOUT 39 /* STA ADDTS request timeout */ + + +/* This marks the end of a packed structure section. */ +#include <packed_section_end.h> + +#endif /* _802_11e_CAC_H_ */ diff --git a/drivers/net/wireless/bcm4329/include/proto/802.1d.h b/drivers/net/wireless/bcm4329/include/proto/802.1d.h new file mode 100644 index 000000000000..45c728bc2976 --- /dev/null +++ b/drivers/net/wireless/bcm4329/include/proto/802.1d.h @@ -0,0 +1,49 @@ +/* + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * Fundamental types and constants relating to 802.1D + * + * $Id: 802.1d.h,v 9.3 2007/04/10 21:33:06 Exp $ + */ + + +#ifndef _802_1_D_ +#define _802_1_D_ + + +#define PRIO_8021D_NONE 2 +#define PRIO_8021D_BK 1 +#define PRIO_8021D_BE 0 +#define PRIO_8021D_EE 3 +#define PRIO_8021D_CL 4 +#define PRIO_8021D_VI 5 +#define PRIO_8021D_VO 6 +#define PRIO_8021D_NC 7 +#define MAXPRIO 7 +#define NUMPRIO (MAXPRIO + 1) + +#define ALLPRIO -1 + + +#define PRIO2PREC(prio) \ + (((prio) == PRIO_8021D_NONE || (prio) == PRIO_8021D_BE) ? ((prio^2)) : (prio)) + +#endif diff --git a/drivers/net/wireless/bcm4329/include/proto/bcmeth.h b/drivers/net/wireless/bcm4329/include/proto/bcmeth.h new file mode 100644 index 000000000000..fdb5a2a5648f --- /dev/null +++ b/drivers/net/wireless/bcm4329/include/proto/bcmeth.h @@ -0,0 +1,83 @@ +/* + * Broadcom Ethernettype protocol definitions + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: bcmeth.h,v 9.9.46.1 2008/11/20 00:51:20 Exp $ + */ + + + + +#ifndef _BCMETH_H_ +#define _BCMETH_H_ + +#ifndef _TYPEDEFS_H_ +#include <typedefs.h> +#endif + + +#include <packed_section_start.h> + + + + + + + +#define BCMILCP_SUBTYPE_RATE 1 +#define BCMILCP_SUBTYPE_LINK 2 +#define BCMILCP_SUBTYPE_CSA 3 +#define BCMILCP_SUBTYPE_LARQ 4 +#define BCMILCP_SUBTYPE_VENDOR 5 +#define BCMILCP_SUBTYPE_FLH 17 + +#define BCMILCP_SUBTYPE_VENDOR_LONG 32769 +#define BCMILCP_SUBTYPE_CERT 32770 +#define BCMILCP_SUBTYPE_SES 32771 + + +#define BCMILCP_BCM_SUBTYPE_RESERVED 0 +#define BCMILCP_BCM_SUBTYPE_EVENT 1 +#define BCMILCP_BCM_SUBTYPE_SES 2 + + +#define BCMILCP_BCM_SUBTYPE_DPT 4 + +#define BCMILCP_BCM_SUBTYPEHDR_MINLENGTH 8 +#define BCMILCP_BCM_SUBTYPEHDR_VERSION 0 + + +typedef BWL_PRE_PACKED_STRUCT struct bcmeth_hdr +{ + uint16 subtype; + uint16 length; + uint8 version; + uint8 oui[3]; + + uint16 usr_subtype; +} BWL_POST_PACKED_STRUCT bcmeth_hdr_t; + + + +#include <packed_section_end.h> + +#endif diff --git a/drivers/net/wireless/bcm4329/include/proto/bcmevent.h b/drivers/net/wireless/bcm4329/include/proto/bcmevent.h new file mode 100644 index 000000000000..46c04d379227 --- /dev/null +++ b/drivers/net/wireless/bcm4329/include/proto/bcmevent.h @@ -0,0 +1,211 @@ +/* + * Broadcom Event protocol definitions + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * Dependencies: proto/bcmeth.h + * + * $Id: bcmevent.h,v 9.34.4.1.20.16 2009/09/25 23:52:38 Exp $ + * + */ + + + + +#ifndef _BCMEVENT_H_ +#define _BCMEVENT_H_ + +#ifndef _TYPEDEFS_H_ +#include <typedefs.h> +#endif + + +#include <packed_section_start.h> + +#define BCM_EVENT_MSG_VERSION 1 +#define BCM_MSG_IFNAME_MAX 16 + + +#define WLC_EVENT_MSG_LINK 0x01 +#define WLC_EVENT_MSG_FLUSHTXQ 0x02 +#define WLC_EVENT_MSG_GROUP 0x04 + + +typedef BWL_PRE_PACKED_STRUCT struct +{ + uint16 version; + uint16 flags; + uint32 event_type; + uint32 status; + uint32 reason; + uint32 auth_type; + uint32 datalen; + struct ether_addr addr; + char ifname[BCM_MSG_IFNAME_MAX]; +} BWL_POST_PACKED_STRUCT wl_event_msg_t; + + +typedef BWL_PRE_PACKED_STRUCT struct bcm_event { + struct ether_header eth; + bcmeth_hdr_t bcm_hdr; + wl_event_msg_t event; + +} BWL_POST_PACKED_STRUCT bcm_event_t; + +#define BCM_MSG_LEN (sizeof(bcm_event_t) - sizeof(bcmeth_hdr_t) - sizeof(struct ether_header)) + + +#define WLC_E_SET_SSID 0 +#define WLC_E_JOIN 1 +#define WLC_E_START 2 +#define WLC_E_AUTH 3 +#define WLC_E_AUTH_IND 4 +#define WLC_E_DEAUTH 5 +#define WLC_E_DEAUTH_IND 6 +#define WLC_E_ASSOC 7 +#define WLC_E_ASSOC_IND 8 +#define WLC_E_REASSOC 9 +#define WLC_E_REASSOC_IND 10 +#define WLC_E_DISASSOC 11 +#define WLC_E_DISASSOC_IND 12 +#define WLC_E_QUIET_START 13 +#define WLC_E_QUIET_END 14 +#define WLC_E_BEACON_RX 15 +#define WLC_E_LINK 16 +#define WLC_E_MIC_ERROR 17 +#define WLC_E_NDIS_LINK 18 +#define WLC_E_ROAM 19 +#define WLC_E_TXFAIL 20 +#define WLC_E_PMKID_CACHE 21 +#define WLC_E_RETROGRADE_TSF 22 +#define WLC_E_PRUNE 23 +#define WLC_E_AUTOAUTH 24 +#define WLC_E_EAPOL_MSG 25 +#define WLC_E_SCAN_COMPLETE 26 +#define WLC_E_ADDTS_IND 27 +#define WLC_E_DELTS_IND 28 +#define WLC_E_BCNSENT_IND 29 +#define WLC_E_BCNRX_MSG 30 +#define WLC_E_BCNLOST_MSG 31 +#define WLC_E_ROAM_PREP 32 +#define WLC_E_PFN_NET_FOUND 33 +#define WLC_E_PFN_NET_LOST 34 +#define WLC_E_RESET_COMPLETE 35 +#define WLC_E_JOIN_START 36 +#define WLC_E_ROAM_START 37 +#define WLC_E_ASSOC_START 38 +#define WLC_E_IBSS_ASSOC 39 +#define WLC_E_RADIO 40 +#define WLC_E_PSM_WATCHDOG 41 +#define WLC_E_PROBREQ_MSG 44 +#define WLC_E_SCAN_CONFIRM_IND 45 +#define WLC_E_PSK_SUP 46 +#define WLC_E_COUNTRY_CODE_CHANGED 47 +#define WLC_E_EXCEEDED_MEDIUM_TIME 48 +#define WLC_E_ICV_ERROR 49 +#define WLC_E_UNICAST_DECODE_ERROR 50 +#define WLC_E_MULTICAST_DECODE_ERROR 51 +#define WLC_E_TRACE 52 +#define WLC_E_IF 54 +#define WLC_E_RSSI 56 +#define WLC_E_PFN_SCAN_COMPLETE 57 +#define WLC_E_ACTION_FRAME 58 +#define WLC_E_ACTION_FRAME_COMPLETE 59 + +#define WLC_E_ESCAN_RESULT 69 +#define WLC_E_WAKE_EVENT 70 +#define WLC_E_LAST 71 + + + + +#define WLC_E_STATUS_SUCCESS 0 +#define WLC_E_STATUS_FAIL 1 +#define WLC_E_STATUS_TIMEOUT 2 +#define WLC_E_STATUS_NO_NETWORKS 3 +#define WLC_E_STATUS_ABORT 4 +#define WLC_E_STATUS_NO_ACK 5 +#define WLC_E_STATUS_UNSOLICITED 6 +#define WLC_E_STATUS_ATTEMPT 7 +#define WLC_E_STATUS_PARTIAL 8 +#define WLC_E_STATUS_NEWSCAN 9 +#define WLC_E_STATUS_NEWASSOC 10 +#define WLC_E_STATUS_11HQUIET 11 +#define WLC_E_STATUS_SUPPRESS 12 +#define WLC_E_STATUS_NOCHANS 13 +#define WLC_E_STATUS_CCXFASTRM 14 +#define WLC_E_STATUS_CS_ABORT 15 + + +#define WLC_E_REASON_INITIAL_ASSOC 0 +#define WLC_E_REASON_LOW_RSSI 1 +#define WLC_E_REASON_DEAUTH 2 +#define WLC_E_REASON_DISASSOC 3 +#define WLC_E_REASON_BCNS_LOST 4 +#define WLC_E_REASON_FAST_ROAM_FAILED 5 +#define WLC_E_REASON_DIRECTED_ROAM 6 +#define WLC_E_REASON_TSPEC_REJECTED 7 +#define WLC_E_REASON_BETTER_AP 8 + + +#define WLC_E_PRUNE_ENCR_MISMATCH 1 +#define WLC_E_PRUNE_BCAST_BSSID 2 +#define WLC_E_PRUNE_MAC_DENY 3 +#define WLC_E_PRUNE_MAC_NA 4 +#define WLC_E_PRUNE_REG_PASSV 5 +#define WLC_E_PRUNE_SPCT_MGMT 6 +#define WLC_E_PRUNE_RADAR 7 +#define WLC_E_RSN_MISMATCH 8 +#define WLC_E_PRUNE_NO_COMMON_RATES 9 +#define WLC_E_PRUNE_BASIC_RATES 10 +#define WLC_E_PRUNE_CIPHER_NA 12 +#define WLC_E_PRUNE_KNOWN_STA 13 +#define WLC_E_PRUNE_WDS_PEER 15 +#define WLC_E_PRUNE_QBSS_LOAD 16 +#define WLC_E_PRUNE_HOME_AP 17 + + +#define WLC_E_SUP_OTHER 0 +#define WLC_E_SUP_DECRYPT_KEY_DATA 1 +#define WLC_E_SUP_BAD_UCAST_WEP128 2 +#define WLC_E_SUP_BAD_UCAST_WEP40 3 +#define WLC_E_SUP_UNSUP_KEY_LEN 4 +#define WLC_E_SUP_PW_KEY_CIPHER 5 +#define WLC_E_SUP_MSG3_TOO_MANY_IE 6 +#define WLC_E_SUP_MSG3_IE_MISMATCH 7 +#define WLC_E_SUP_NO_INSTALL_FLAG 8 +#define WLC_E_SUP_MSG3_NO_GTK 9 +#define WLC_E_SUP_GRP_KEY_CIPHER 10 +#define WLC_E_SUP_GRP_MSG1_NO_GTK 11 +#define WLC_E_SUP_GTK_DECRYPT_FAIL 12 +#define WLC_E_SUP_SEND_FAIL 13 +#define WLC_E_SUP_DEAUTH 14 +#define WLC_E_SUP_WPA_PSK_TMO 15 + + +#define WLC_E_IF_ADD 1 +#define WLC_E_IF_DEL 2 + + +#include <packed_section_end.h> + +#endif diff --git a/drivers/net/wireless/bcm4329/include/proto/bcmip.h b/drivers/net/wireless/bcm4329/include/proto/bcmip.h new file mode 100644 index 000000000000..9d2fd6fba484 --- /dev/null +++ b/drivers/net/wireless/bcm4329/include/proto/bcmip.h @@ -0,0 +1,157 @@ +/* + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * Fundamental constants relating to IP Protocol + * + * $Id: bcmip.h,v 9.16.186.4 2009/01/27 04:25:25 Exp $ + */ + + +#ifndef _bcmip_h_ +#define _bcmip_h_ + +#ifndef _TYPEDEFS_H_ +#include <typedefs.h> +#endif + + +#include <packed_section_start.h> + + + +#define IP_VER_OFFSET 0x0 +#define IP_VER_MASK 0xf0 +#define IP_VER_SHIFT 4 +#define IP_VER_4 4 +#define IP_VER_6 6 + +#define IP_VER(ip_body) \ + ((((uint8 *)(ip_body))[IP_VER_OFFSET] & IP_VER_MASK) >> IP_VER_SHIFT) + +#define IP_PROT_ICMP 0x1 +#define IP_PROT_TCP 0x6 +#define IP_PROT_UDP 0x11 + + +#define IPV4_VER_HL_OFFSET 0 +#define IPV4_TOS_OFFSET 1 +#define IPV4_PKTLEN_OFFSET 2 +#define IPV4_PKTFLAG_OFFSET 6 +#define IPV4_PROT_OFFSET 9 +#define IPV4_CHKSUM_OFFSET 10 +#define IPV4_SRC_IP_OFFSET 12 +#define IPV4_DEST_IP_OFFSET 16 +#define IPV4_OPTIONS_OFFSET 20 + + +#define IPV4_VER_MASK 0xf0 +#define IPV4_VER_SHIFT 4 + +#define IPV4_HLEN_MASK 0x0f +#define IPV4_HLEN(ipv4_body) (4 * (((uint8 *)(ipv4_body))[IPV4_VER_HL_OFFSET] & IPV4_HLEN_MASK)) + +#define IPV4_ADDR_LEN 4 + +#define IPV4_ADDR_NULL(a) ((((uint8 *)(a))[0] | ((uint8 *)(a))[1] | \ + ((uint8 *)(a))[2] | ((uint8 *)(a))[3]) == 0) + +#define IPV4_ADDR_BCAST(a) ((((uint8 *)(a))[0] & ((uint8 *)(a))[1] & \ + ((uint8 *)(a))[2] & ((uint8 *)(a))[3]) == 0xff) + +#define IPV4_TOS_DSCP_MASK 0xfc +#define IPV4_TOS_DSCP_SHIFT 2 + +#define IPV4_TOS(ipv4_body) (((uint8 *)(ipv4_body))[IPV4_TOS_OFFSET]) + +#define IPV4_TOS_PREC_MASK 0xe0 +#define IPV4_TOS_PREC_SHIFT 5 + +#define IPV4_TOS_LOWDELAY 0x10 +#define IPV4_TOS_THROUGHPUT 0x8 +#define IPV4_TOS_RELIABILITY 0x4 + +#define IPV4_PROT(ipv4_body) (((uint8 *)(ipv4_body))[IPV4_PROT_OFFSET]) + +#define IPV4_FRAG_RESV 0x8000 +#define IPV4_FRAG_DONT 0x4000 +#define IPV4_FRAG_MORE 0x2000 +#define IPV4_FRAG_OFFSET_MASK 0x1fff + +#define IPV4_ADDR_STR_LEN 16 + + +BWL_PRE_PACKED_STRUCT struct ipv4_addr { + uint8 addr[IPV4_ADDR_LEN]; +} BWL_POST_PACKED_STRUCT; + +BWL_PRE_PACKED_STRUCT struct ipv4_hdr { + uint8 version_ihl; + uint8 tos; + uint16 tot_len; + uint16 id; + uint16 frag; + uint8 ttl; + uint8 prot; + uint16 hdr_chksum; + uint8 src_ip[IPV4_ADDR_LEN]; + uint8 dst_ip[IPV4_ADDR_LEN]; +} BWL_POST_PACKED_STRUCT; + + +#define IPV6_PAYLOAD_LEN_OFFSET 4 +#define IPV6_NEXT_HDR_OFFSET 6 +#define IPV6_HOP_LIMIT_OFFSET 7 +#define IPV6_SRC_IP_OFFSET 8 +#define IPV6_DEST_IP_OFFSET 24 + + +#define IPV6_TRAFFIC_CLASS(ipv6_body) \ + (((((uint8 *)(ipv6_body))[0] & 0x0f) << 4) | \ + ((((uint8 *)(ipv6_body))[1] & 0xf0) >> 4)) + +#define IPV6_FLOW_LABEL(ipv6_body) \ + (((((uint8 *)(ipv6_body))[1] & 0x0f) << 16) | \ + (((uint8 *)(ipv6_body))[2] << 8) | \ + (((uint8 *)(ipv6_body))[3])) + +#define IPV6_PAYLOAD_LEN(ipv6_body) \ + ((((uint8 *)(ipv6_body))[IPV6_PAYLOAD_LEN_OFFSET + 0] << 8) | \ + ((uint8 *)(ipv6_body))[IPV6_PAYLOAD_LEN_OFFSET + 1]) + +#define IPV6_NEXT_HDR(ipv6_body) \ + (((uint8 *)(ipv6_body))[IPV6_NEXT_HDR_OFFSET]) + +#define IPV6_PROT(ipv6_body) IPV6_NEXT_HDR(ipv6_body) + +#define IPV6_ADDR_LEN 16 + + +#ifndef IP_TOS +#define IP_TOS(ip_body) \ + (IP_VER(ip_body) == IP_VER_4 ? IPV4_TOS(ip_body) : \ + IP_VER(ip_body) == IP_VER_6 ? IPV6_TRAFFIC_CLASS(ip_body) : 0) +#endif + + + +#include <packed_section_end.h> + +#endif diff --git a/drivers/net/wireless/bcm4329/include/proto/eapol.h b/drivers/net/wireless/bcm4329/include/proto/eapol.h new file mode 100644 index 000000000000..95e76ff18c6b --- /dev/null +++ b/drivers/net/wireless/bcm4329/include/proto/eapol.h @@ -0,0 +1,172 @@ +/* + * 802.1x EAPOL definitions + * + * See + * IEEE Std 802.1X-2001 + * IEEE 802.1X RADIUS Usage Guidelines + * + * Copyright (C) 2002 Broadcom Corporation + * + * $Id: eapol.h,v 9.18.260.1.2.1.6.6 2009/04/08 05:00:08 Exp $ + */ + +#ifndef _eapol_h_ +#define _eapol_h_ + +#ifndef _TYPEDEFS_H_ +#include <typedefs.h> +#endif + +/* This marks the start of a packed structure section. */ +#include <packed_section_start.h> + +#define AKW_BLOCK_LEN 8 /* The only def we need here */ + +/* EAPOL for 802.3/Ethernet */ +typedef struct { + struct ether_header eth; /* 802.3/Ethernet header */ + unsigned char version; /* EAPOL protocol version */ + unsigned char type; /* EAPOL type */ + unsigned short length; /* Length of body */ + unsigned char body[1]; /* Body (optional) */ +} eapol_header_t; + +#define EAPOL_HEADER_LEN 18 + +/* EAPOL version */ +#define WPA2_EAPOL_VERSION 2 +#define WPA_EAPOL_VERSION 1 +#define LEAP_EAPOL_VERSION 1 +#define SES_EAPOL_VERSION 1 + +/* EAPOL types */ +#define EAP_PACKET 0 +#define EAPOL_START 1 +#define EAPOL_LOGOFF 2 +#define EAPOL_KEY 3 +#define EAPOL_ASF 4 + +/* EAPOL-Key types */ +#define EAPOL_RC4_KEY 1 +#define EAPOL_WPA2_KEY 2 /* 802.11i/WPA2 */ +#define EAPOL_WPA_KEY 254 /* WPA */ + +/* RC4 EAPOL-Key header field sizes */ +#define EAPOL_KEY_REPLAY_LEN 8 +#define EAPOL_KEY_IV_LEN 16 +#define EAPOL_KEY_SIG_LEN 16 + +/* RC4 EAPOL-Key */ +typedef BWL_PRE_PACKED_STRUCT struct { + unsigned char type; /* Key Descriptor Type */ + unsigned short length; /* Key Length (unaligned) */ + unsigned char replay[EAPOL_KEY_REPLAY_LEN]; /* Replay Counter */ + unsigned char iv[EAPOL_KEY_IV_LEN]; /* Key IV */ + unsigned char index; /* Key Flags & Index */ + unsigned char signature[EAPOL_KEY_SIG_LEN]; /* Key Signature */ + unsigned char key[1]; /* Key (optional) */ +} BWL_POST_PACKED_STRUCT eapol_key_header_t; + +#define EAPOL_KEY_HEADER_LEN 44 + +/* RC4 EAPOL-Key flags */ +#define EAPOL_KEY_FLAGS_MASK 0x80 +#define EAPOL_KEY_BROADCAST 0 +#define EAPOL_KEY_UNICAST 0x80 + +/* RC4 EAPOL-Key index */ +#define EAPOL_KEY_INDEX_MASK 0x7f + +/* WPA/802.11i/WPA2 EAPOL-Key header field sizes */ +#define EAPOL_WPA_KEY_REPLAY_LEN 8 +#define EAPOL_WPA_KEY_NONCE_LEN 32 +#define EAPOL_WPA_KEY_IV_LEN 16 +#define EAPOL_WPA_KEY_ID_LEN 8 +#define EAPOL_WPA_KEY_RSC_LEN 8 +#define EAPOL_WPA_KEY_MIC_LEN 16 +#define EAPOL_WPA_KEY_DATA_LEN (EAPOL_WPA_MAX_KEY_SIZE + AKW_BLOCK_LEN) +#define EAPOL_WPA_MAX_KEY_SIZE 32 + +/* WPA EAPOL-Key */ +typedef BWL_PRE_PACKED_STRUCT struct { + unsigned char type; /* Key Descriptor Type */ + unsigned short key_info; /* Key Information (unaligned) */ + unsigned short key_len; /* Key Length (unaligned) */ + unsigned char replay[EAPOL_WPA_KEY_REPLAY_LEN]; /* Replay Counter */ + unsigned char nonce[EAPOL_WPA_KEY_NONCE_LEN]; /* Nonce */ + unsigned char iv[EAPOL_WPA_KEY_IV_LEN]; /* Key IV */ + unsigned char rsc[EAPOL_WPA_KEY_RSC_LEN]; /* Key RSC */ + unsigned char id[EAPOL_WPA_KEY_ID_LEN]; /* WPA:Key ID, 802.11i/WPA2: Reserved */ + unsigned char mic[EAPOL_WPA_KEY_MIC_LEN]; /* Key MIC */ + unsigned short data_len; /* Key Data Length */ + unsigned char data[EAPOL_WPA_KEY_DATA_LEN]; /* Key data */ +} BWL_POST_PACKED_STRUCT eapol_wpa_key_header_t; + +#define EAPOL_WPA_KEY_LEN 95 + +/* WPA/802.11i/WPA2 KEY KEY_INFO bits */ +#define WPA_KEY_DESC_V1 0x01 +#define WPA_KEY_DESC_V2 0x02 +#define WPA_KEY_PAIRWISE 0x08 +#define WPA_KEY_INSTALL 0x40 +#define WPA_KEY_ACK 0x80 +#define WPA_KEY_MIC 0x100 +#define WPA_KEY_SECURE 0x200 +#define WPA_KEY_ERROR 0x400 +#define WPA_KEY_REQ 0x800 + +/* WPA-only KEY KEY_INFO bits */ +#define WPA_KEY_INDEX_0 0x00 +#define WPA_KEY_INDEX_1 0x10 +#define WPA_KEY_INDEX_2 0x20 +#define WPA_KEY_INDEX_3 0x30 +#define WPA_KEY_INDEX_MASK 0x30 +#define WPA_KEY_INDEX_SHIFT 0x04 + +/* 802.11i/WPA2-only KEY KEY_INFO bits */ +#define WPA_KEY_ENCRYPTED_DATA 0x1000 + +/* Key Data encapsulation */ +typedef BWL_PRE_PACKED_STRUCT struct { + uint8 type; + uint8 length; + uint8 oui[3]; + uint8 subtype; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT eapol_wpa2_encap_data_t; + +#define EAPOL_WPA2_ENCAP_DATA_HDR_LEN 6 + +#define WPA2_KEY_DATA_SUBTYPE_GTK 1 +#define WPA2_KEY_DATA_SUBTYPE_STAKEY 2 +#define WPA2_KEY_DATA_SUBTYPE_MAC 3 +#define WPA2_KEY_DATA_SUBTYPE_PMKID 4 + +/* GTK encapsulation */ +typedef BWL_PRE_PACKED_STRUCT struct { + uint8 flags; + uint8 reserved; + uint8 gtk[EAPOL_WPA_MAX_KEY_SIZE]; +} BWL_POST_PACKED_STRUCT eapol_wpa2_key_gtk_encap_t; + +#define EAPOL_WPA2_KEY_GTK_ENCAP_HDR_LEN 2 + +#define WPA2_GTK_INDEX_MASK 0x03 +#define WPA2_GTK_INDEX_SHIFT 0x00 + +#define WPA2_GTK_TRANSMIT 0x04 + +/* STAKey encapsulation */ +typedef BWL_PRE_PACKED_STRUCT struct { + uint8 reserved[2]; + uint8 mac[ETHER_ADDR_LEN]; + uint8 stakey[EAPOL_WPA_MAX_KEY_SIZE]; +} BWL_POST_PACKED_STRUCT eapol_wpa2_key_stakey_encap_t; + +#define WPA2_KEY_DATA_PAD 0xdd + + +/* This marks the end of a packed structure section. */ +#include <packed_section_end.h> + +#endif /* _eapol_h_ */ diff --git a/drivers/net/wireless/bcm4329/include/proto/ethernet.h b/drivers/net/wireless/bcm4329/include/proto/ethernet.h new file mode 100644 index 000000000000..9ad2ea0c70fd --- /dev/null +++ b/drivers/net/wireless/bcm4329/include/proto/ethernet.h @@ -0,0 +1,148 @@ +/* + * From FreeBSD 2.2.7: Fundamental constants relating to ethernet. + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: ethernet.h,v 9.45.56.5 2010/02/22 22:04:36 Exp $ + */ + + +#ifndef _NET_ETHERNET_H_ +#define _NET_ETHERNET_H_ + +#ifndef _TYPEDEFS_H_ +#include "typedefs.h" +#endif + + +#include <packed_section_start.h> + + + +#define ETHER_ADDR_LEN 6 + + +#define ETHER_TYPE_LEN 2 + + +#define ETHER_CRC_LEN 4 + + +#define ETHER_HDR_LEN (ETHER_ADDR_LEN * 2 + ETHER_TYPE_LEN) + + +#define ETHER_MIN_LEN 64 + + +#define ETHER_MIN_DATA 46 + + +#define ETHER_MAX_LEN 1518 + + +#define ETHER_MAX_DATA 1500 + + +#define ETHER_TYPE_MIN 0x0600 +#define ETHER_TYPE_IP 0x0800 +#define ETHER_TYPE_ARP 0x0806 +#define ETHER_TYPE_8021Q 0x8100 +#define ETHER_TYPE_BRCM 0x886c +#define ETHER_TYPE_802_1X 0x888e +#define ETHER_TYPE_WAI 0x88b4 +#ifdef BCMWPA2 +#define ETHER_TYPE_802_1X_PREAUTH 0x88c7 +#endif + + +#define ETHER_BRCM_SUBTYPE_LEN 4 +#define ETHER_BRCM_CRAM 1 + + +#define ETHER_DEST_OFFSET (0 * ETHER_ADDR_LEN) +#define ETHER_SRC_OFFSET (1 * ETHER_ADDR_LEN) +#define ETHER_TYPE_OFFSET (2 * ETHER_ADDR_LEN) + + +#define ETHER_IS_VALID_LEN(foo) \ + ((foo) >= ETHER_MIN_LEN && (foo) <= ETHER_MAX_LEN) + + +#ifndef __INCif_etherh + +BWL_PRE_PACKED_STRUCT struct ether_header { + uint8 ether_dhost[ETHER_ADDR_LEN]; + uint8 ether_shost[ETHER_ADDR_LEN]; + uint16 ether_type; +} BWL_POST_PACKED_STRUCT; + + +BWL_PRE_PACKED_STRUCT struct ether_addr { + uint8 octet[ETHER_ADDR_LEN]; +} BWL_POST_PACKED_STRUCT; +#endif + + +#define ETHER_SET_LOCALADDR(ea) (((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] | 2)) +#define ETHER_IS_LOCALADDR(ea) (((uint8 *)(ea))[0] & 2) +#define ETHER_CLR_LOCALADDR(ea) (((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] & 0xd)) +#define ETHER_TOGGLE_LOCALADDR(ea) (((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] ^ 2)) + + +#define ETHER_SET_UNICAST(ea) (((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] & ~1)) + + +#define ETHER_ISMULTI(ea) (((const uint8 *)(ea))[0] & 1) + + + +#define ether_cmp(a, b) (!(((short*)a)[0] == ((short*)b)[0]) | \ + !(((short*)a)[1] == ((short*)b)[1]) | \ + !(((short*)a)[2] == ((short*)b)[2])) + + +#define ether_copy(s, d) { \ + ((short*)d)[0] = ((short*)s)[0]; \ + ((short*)d)[1] = ((short*)s)[1]; \ + ((short*)d)[2] = ((short*)s)[2]; } + + +static const struct ether_addr ether_bcast = {{255, 255, 255, 255, 255, 255}}; +static const struct ether_addr ether_null = {{0, 0, 0, 0, 0, 0}}; + +#define ETHER_ISBCAST(ea) ((((uint8 *)(ea))[0] & \ + ((uint8 *)(ea))[1] & \ + ((uint8 *)(ea))[2] & \ + ((uint8 *)(ea))[3] & \ + ((uint8 *)(ea))[4] & \ + ((uint8 *)(ea))[5]) == 0xff) +#define ETHER_ISNULLADDR(ea) ((((uint8 *)(ea))[0] | \ + ((uint8 *)(ea))[1] | \ + ((uint8 *)(ea))[2] | \ + ((uint8 *)(ea))[3] | \ + ((uint8 *)(ea))[4] | \ + ((uint8 *)(ea))[5]) == 0) + + + +#include <packed_section_end.h> + +#endif diff --git a/drivers/net/wireless/bcm4329/include/proto/sdspi.h b/drivers/net/wireless/bcm4329/include/proto/sdspi.h new file mode 100644 index 000000000000..7739e68a2440 --- /dev/null +++ b/drivers/net/wireless/bcm4329/include/proto/sdspi.h @@ -0,0 +1,71 @@ +/* + * SD-SPI Protocol Standard + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: sdspi.h,v 9.1.20.1 2008/05/06 22:59:19 Exp $ + */ + +#define SPI_START_M BITFIELD_MASK(1) /* Bit [31] - Start Bit */ +#define SPI_START_S 31 +#define SPI_DIR_M BITFIELD_MASK(1) /* Bit [30] - Direction */ +#define SPI_DIR_S 30 +#define SPI_CMD_INDEX_M BITFIELD_MASK(6) /* Bits [29:24] - Command number */ +#define SPI_CMD_INDEX_S 24 +#define SPI_RW_M BITFIELD_MASK(1) /* Bit [23] - Read=0, Write=1 */ +#define SPI_RW_S 23 +#define SPI_FUNC_M BITFIELD_MASK(3) /* Bits [22:20] - Function Number */ +#define SPI_FUNC_S 20 +#define SPI_RAW_M BITFIELD_MASK(1) /* Bit [19] - Read After Wr */ +#define SPI_RAW_S 19 +#define SPI_STUFF_M BITFIELD_MASK(1) /* Bit [18] - Stuff bit */ +#define SPI_STUFF_S 18 +#define SPI_BLKMODE_M BITFIELD_MASK(1) /* Bit [19] - Blockmode 1=blk */ +#define SPI_BLKMODE_S 19 +#define SPI_OPCODE_M BITFIELD_MASK(1) /* Bit [18] - OP Code */ +#define SPI_OPCODE_S 18 +#define SPI_ADDR_M BITFIELD_MASK(17) /* Bits [17:1] - Address */ +#define SPI_ADDR_S 1 +#define SPI_STUFF0_M BITFIELD_MASK(1) /* Bit [0] - Stuff bit */ +#define SPI_STUFF0_S 0 + +#define SPI_RSP_START_M BITFIELD_MASK(1) /* Bit [7] - Start Bit (always 0) */ +#define SPI_RSP_START_S 7 +#define SPI_RSP_PARAM_ERR_M BITFIELD_MASK(1) /* Bit [6] - Parameter Error */ +#define SPI_RSP_PARAM_ERR_S 6 +#define SPI_RSP_RFU5_M BITFIELD_MASK(1) /* Bit [5] - RFU (Always 0) */ +#define SPI_RSP_RFU5_S 5 +#define SPI_RSP_FUNC_ERR_M BITFIELD_MASK(1) /* Bit [4] - Function number error */ +#define SPI_RSP_FUNC_ERR_S 4 +#define SPI_RSP_CRC_ERR_M BITFIELD_MASK(1) /* Bit [3] - COM CRC Error */ +#define SPI_RSP_CRC_ERR_S 3 +#define SPI_RSP_ILL_CMD_M BITFIELD_MASK(1) /* Bit [2] - Illegal Command error */ +#define SPI_RSP_ILL_CMD_S 2 +#define SPI_RSP_RFU1_M BITFIELD_MASK(1) /* Bit [1] - RFU (Always 0) */ +#define SPI_RSP_RFU1_S 1 +#define SPI_RSP_IDLE_M BITFIELD_MASK(1) /* Bit [0] - In idle state */ +#define SPI_RSP_IDLE_S 0 + +/* SD-SPI Protocol Definitions */ +#define SDSPI_COMMAND_LEN 6 /* Number of bytes in an SD command */ +#define SDSPI_START_BLOCK 0xFE /* SD Start Block Token */ +#define SDSPI_IDLE_PAD 0xFF /* SD-SPI idle value for MOSI */ +#define SDSPI_START_BIT_MASK 0x80 diff --git a/drivers/net/wireless/bcm4329/include/proto/vlan.h b/drivers/net/wireless/bcm4329/include/proto/vlan.h new file mode 100644 index 000000000000..670bc44c6bd6 --- /dev/null +++ b/drivers/net/wireless/bcm4329/include/proto/vlan.h @@ -0,0 +1,63 @@ +/* + * 802.1Q VLAN protocol definitions + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: vlan.h,v 9.4.196.2 2008/12/07 21:19:20 Exp $ + */ + + +#ifndef _vlan_h_ +#define _vlan_h_ + +#ifndef _TYPEDEFS_H_ +#include <typedefs.h> +#endif + + +#include <packed_section_start.h> + +#define VLAN_VID_MASK 0xfff +#define VLAN_CFI_SHIFT 12 +#define VLAN_PRI_SHIFT 13 + +#define VLAN_PRI_MASK 7 + +#define VLAN_TAG_LEN 4 +#define VLAN_TAG_OFFSET (2 * ETHER_ADDR_LEN) + +#define VLAN_TPID 0x8100 + +struct ethervlan_header { + uint8 ether_dhost[ETHER_ADDR_LEN]; + uint8 ether_shost[ETHER_ADDR_LEN]; + uint16 vlan_type; + uint16 vlan_tag; + uint16 ether_type; +}; + +#define ETHERVLAN_HDR_LEN (ETHER_HDR_LEN + VLAN_TAG_LEN) + + + +#include <packed_section_end.h> + +#endif diff --git a/drivers/net/wireless/bcm4329/include/proto/wpa.h b/drivers/net/wireless/bcm4329/include/proto/wpa.h new file mode 100644 index 000000000000..f5d0cd539777 --- /dev/null +++ b/drivers/net/wireless/bcm4329/include/proto/wpa.h @@ -0,0 +1,159 @@ +/* + * Fundamental types and constants relating to WPA + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: wpa.h,v 1.16.166.1.20.1 2008/11/20 00:51:31 Exp $ + */ + + +#ifndef _proto_wpa_h_ +#define _proto_wpa_h_ + +#include <typedefs.h> +#include <proto/ethernet.h> + + + +#include <packed_section_start.h> + + + + +#define DOT11_RC_INVALID_WPA_IE 13 +#define DOT11_RC_MIC_FAILURE 14 +#define DOT11_RC_4WH_TIMEOUT 15 +#define DOT11_RC_GTK_UPDATE_TIMEOUT 16 +#define DOT11_RC_WPA_IE_MISMATCH 17 +#define DOT11_RC_INVALID_MC_CIPHER 18 +#define DOT11_RC_INVALID_UC_CIPHER 19 +#define DOT11_RC_INVALID_AKMP 20 +#define DOT11_RC_BAD_WPA_VERSION 21 +#define DOT11_RC_INVALID_WPA_CAP 22 +#define DOT11_RC_8021X_AUTH_FAIL 23 + +#define WPA2_PMKID_LEN 16 + + +typedef BWL_PRE_PACKED_STRUCT struct +{ + uint8 tag; + uint8 length; + uint8 oui[3]; + uint8 oui_type; + BWL_PRE_PACKED_STRUCT struct { + uint8 low; + uint8 high; + } BWL_POST_PACKED_STRUCT version; +} BWL_POST_PACKED_STRUCT wpa_ie_fixed_t; +#define WPA_IE_OUITYPE_LEN 4 +#define WPA_IE_FIXED_LEN 8 +#define WPA_IE_TAG_FIXED_LEN 6 + +typedef BWL_PRE_PACKED_STRUCT struct { + uint8 tag; + uint8 length; + BWL_PRE_PACKED_STRUCT struct { + uint8 low; + uint8 high; + } BWL_POST_PACKED_STRUCT version; +} BWL_POST_PACKED_STRUCT wpa_rsn_ie_fixed_t; +#define WPA_RSN_IE_FIXED_LEN 4 +#define WPA_RSN_IE_TAG_FIXED_LEN 2 +typedef uint8 wpa_pmkid_t[WPA2_PMKID_LEN]; + + +typedef BWL_PRE_PACKED_STRUCT struct +{ + uint8 oui[3]; + uint8 type; +} BWL_POST_PACKED_STRUCT wpa_suite_t, wpa_suite_mcast_t; +#define WPA_SUITE_LEN 4 + + +typedef BWL_PRE_PACKED_STRUCT struct +{ + BWL_PRE_PACKED_STRUCT struct { + uint8 low; + uint8 high; + } BWL_POST_PACKED_STRUCT count; + wpa_suite_t list[1]; +} BWL_POST_PACKED_STRUCT wpa_suite_ucast_t, wpa_suite_auth_key_mgmt_t; +#define WPA_IE_SUITE_COUNT_LEN 2 +typedef BWL_PRE_PACKED_STRUCT struct +{ + BWL_PRE_PACKED_STRUCT struct { + uint8 low; + uint8 high; + } BWL_POST_PACKED_STRUCT count; + wpa_pmkid_t list[1]; +} BWL_POST_PACKED_STRUCT wpa_pmkid_list_t; + + +#define WPA_CIPHER_NONE 0 +#define WPA_CIPHER_WEP_40 1 +#define WPA_CIPHER_TKIP 2 +#define WPA_CIPHER_AES_OCB 3 +#define WPA_CIPHER_AES_CCM 4 +#define WPA_CIPHER_WEP_104 5 + +#define IS_WPA_CIPHER(cipher) ((cipher) == WPA_CIPHER_NONE || \ + (cipher) == WPA_CIPHER_WEP_40 || \ + (cipher) == WPA_CIPHER_WEP_104 || \ + (cipher) == WPA_CIPHER_TKIP || \ + (cipher) == WPA_CIPHER_AES_OCB || \ + (cipher) == WPA_CIPHER_AES_CCM) + + +#define WPA_TKIP_CM_DETECT 60 +#define WPA_TKIP_CM_BLOCK 60 + + +#define RSN_CAP_LEN 2 + + +#define RSN_CAP_PREAUTH 0x0001 +#define RSN_CAP_NOPAIRWISE 0x0002 +#define RSN_CAP_PTK_REPLAY_CNTR_MASK 0x000C +#define RSN_CAP_PTK_REPLAY_CNTR_SHIFT 2 +#define RSN_CAP_GTK_REPLAY_CNTR_MASK 0x0030 +#define RSN_CAP_GTK_REPLAY_CNTR_SHIFT 4 +#define RSN_CAP_1_REPLAY_CNTR 0 +#define RSN_CAP_2_REPLAY_CNTRS 1 +#define RSN_CAP_4_REPLAY_CNTRS 2 +#define RSN_CAP_16_REPLAY_CNTRS 3 + + +#define WPA_CAP_4_REPLAY_CNTRS RSN_CAP_4_REPLAY_CNTRS +#define WPA_CAP_16_REPLAY_CNTRS RSN_CAP_16_REPLAY_CNTRS +#define WPA_CAP_REPLAY_CNTR_SHIFT RSN_CAP_PTK_REPLAY_CNTR_SHIFT +#define WPA_CAP_REPLAY_CNTR_MASK RSN_CAP_PTK_REPLAY_CNTR_MASK + + +#define WPA_CAP_LEN RSN_CAP_LEN + +#define WPA_CAP_WPA2_PREAUTH RSN_CAP_PREAUTH + + + +#include <packed_section_end.h> + +#endif diff --git a/drivers/net/wireless/bcm4329/include/sbchipc.h b/drivers/net/wireless/bcm4329/include/sbchipc.h new file mode 100644 index 000000000000..39e5c8d6aed0 --- /dev/null +++ b/drivers/net/wireless/bcm4329/include/sbchipc.h @@ -0,0 +1,1026 @@ +/* + * SiliconBackplane Chipcommon core hardware definitions. + * + * The chipcommon core provides chip identification, SB control, + * jtag, 0/1/2 uarts, clock frequency control, a watchdog interrupt timer, + * gpio interface, extbus, and support for serial and parallel flashes. + * + * $Id: sbchipc.h,v 13.103.2.5.4.5.2.9 2009/07/03 14:23:21 Exp $ + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + */ + + +#ifndef _SBCHIPC_H +#define _SBCHIPC_H + +#ifndef _LANGUAGE_ASSEMBLY + + +#ifndef PAD +#define _PADLINE(line) pad ## line +#define _XSTR(line) _PADLINE(line) +#define PAD _XSTR(__LINE__) +#endif + +typedef volatile struct { + uint32 chipid; + uint32 capabilities; + uint32 corecontrol; + uint32 bist; + + + uint32 otpstatus; + uint32 otpcontrol; + uint32 otpprog; + uint32 PAD; + + + uint32 intstatus; + uint32 intmask; + uint32 chipcontrol; + uint32 chipstatus; + + + uint32 jtagcmd; + uint32 jtagir; + uint32 jtagdr; + uint32 jtagctrl; + + + uint32 flashcontrol; + uint32 flashaddress; + uint32 flashdata; + uint32 PAD[1]; + + + uint32 broadcastaddress; + uint32 broadcastdata; + + + uint32 gpiopullup; + uint32 gpiopulldown; + uint32 gpioin; + uint32 gpioout; + uint32 gpioouten; + uint32 gpiocontrol; + uint32 gpiointpolarity; + uint32 gpiointmask; + + + uint32 gpioevent; + uint32 gpioeventintmask; + + + uint32 watchdog; + + + uint32 gpioeventintpolarity; + + + uint32 gpiotimerval; + uint32 gpiotimeroutmask; + + + uint32 clockcontrol_n; + uint32 clockcontrol_sb; + uint32 clockcontrol_pci; + uint32 clockcontrol_m2; + uint32 clockcontrol_m3; + uint32 clkdiv; + uint32 PAD[2]; + + + uint32 pll_on_delay; + uint32 fref_sel_delay; + uint32 slow_clk_ctl; + uint32 PAD[1]; + + + uint32 system_clk_ctl; + uint32 clkstatestretch; + uint32 PAD[13]; + + + uint32 eromptr; + + + uint32 pcmcia_config; + uint32 pcmcia_memwait; + uint32 pcmcia_attrwait; + uint32 pcmcia_iowait; + uint32 ide_config; + uint32 ide_memwait; + uint32 ide_attrwait; + uint32 ide_iowait; + uint32 prog_config; + uint32 prog_waitcount; + uint32 flash_config; + uint32 flash_waitcount; + uint32 PAD[4]; + uint32 PAD[40]; + + + + uint32 clk_ctl_st; + uint32 hw_war; + uint32 PAD[70]; + + + uint8 uart0data; + uint8 uart0imr; + uint8 uart0fcr; + uint8 uart0lcr; + uint8 uart0mcr; + uint8 uart0lsr; + uint8 uart0msr; + uint8 uart0scratch; + uint8 PAD[248]; + + uint8 uart1data; + uint8 uart1imr; + uint8 uart1fcr; + uint8 uart1lcr; + uint8 uart1mcr; + uint8 uart1lsr; + uint8 uart1msr; + uint8 uart1scratch; + uint32 PAD[126]; + + + uint32 pmucontrol; + uint32 pmucapabilities; + uint32 pmustatus; + uint32 res_state; + uint32 res_pending; + uint32 pmutimer; + uint32 min_res_mask; + uint32 max_res_mask; + uint32 res_table_sel; + uint32 res_dep_mask; + uint32 res_updn_timer; + uint32 res_timer; + uint32 clkstretch; + uint32 pmuwatchdog; + uint32 gpiosel; + uint32 gpioenable; + uint32 res_req_timer_sel; + uint32 res_req_timer; + uint32 res_req_mask; + uint32 PAD; + uint32 chipcontrol_addr; + uint32 chipcontrol_data; + uint32 regcontrol_addr; + uint32 regcontrol_data; + uint32 pllcontrol_addr; + uint32 pllcontrol_data; + uint32 PAD[102]; + uint16 otp[768]; +} chipcregs_t; + +#endif + +#define CC_CHIPID 0 +#define CC_CAPABILITIES 4 +#define CC_OTPST 0x10 +#define CC_CHIPST 0x2c +#define CC_JTAGCMD 0x30 +#define CC_JTAGIR 0x34 +#define CC_JTAGDR 0x38 +#define CC_JTAGCTRL 0x3c +#define CC_WATCHDOG 0x80 +#define CC_CLKC_N 0x90 +#define CC_CLKC_M0 0x94 +#define CC_CLKC_M1 0x98 +#define CC_CLKC_M2 0x9c +#define CC_CLKC_M3 0xa0 +#define CC_CLKDIV 0xa4 +#define CC_SYS_CLK_CTL 0xc0 +#define CC_CLK_CTL_ST SI_CLK_CTL_ST +#define CC_EROMPTR 0xfc +#define PMU_CTL 0x600 +#define PMU_CAP 0x604 +#define PMU_ST 0x608 +#define PMU_RES_STATE 0x60c +#define PMU_TIMER 0x614 +#define PMU_MIN_RES_MASK 0x618 +#define PMU_MAX_RES_MASK 0x61c +#define PMU_REG_CONTROL_ADDR 0x658 +#define PMU_REG_CONTROL_DATA 0x65C +#define PMU_PLL_CONTROL_ADDR 0x660 +#define PMU_PLL_CONTROL_DATA 0x664 +#define CC_OTP 0x800 + + +#define CID_ID_MASK 0x0000ffff +#define CID_REV_MASK 0x000f0000 +#define CID_REV_SHIFT 16 +#define CID_PKG_MASK 0x00f00000 +#define CID_PKG_SHIFT 20 +#define CID_CC_MASK 0x0f000000 +#define CID_CC_SHIFT 24 +#define CID_TYPE_MASK 0xf0000000 +#define CID_TYPE_SHIFT 28 + + +#define CC_CAP_UARTS_MASK 0x00000003 +#define CC_CAP_MIPSEB 0x00000004 +#define CC_CAP_UCLKSEL 0x00000018 +#define CC_CAP_UINTCLK 0x00000008 +#define CC_CAP_UARTGPIO 0x00000020 +#define CC_CAP_EXTBUS_MASK 0x000000c0 +#define CC_CAP_EXTBUS_NONE 0x00000000 +#define CC_CAP_EXTBUS_FULL 0x00000040 +#define CC_CAP_EXTBUS_PROG 0x00000080 +#define CC_CAP_FLASH_MASK 0x00000700 +#define CC_CAP_PLL_MASK 0x00038000 +#define CC_CAP_PWR_CTL 0x00040000 +#define CC_CAP_OTPSIZE 0x00380000 +#define CC_CAP_OTPSIZE_SHIFT 19 +#define CC_CAP_OTPSIZE_BASE 5 +#define CC_CAP_JTAGP 0x00400000 +#define CC_CAP_ROM 0x00800000 +#define CC_CAP_BKPLN64 0x08000000 +#define CC_CAP_PMU 0x10000000 +#define CC_CAP_ECI 0x20000000 + + +#define PLL_NONE 0x00000000 +#define PLL_TYPE1 0x00010000 +#define PLL_TYPE2 0x00020000 +#define PLL_TYPE3 0x00030000 +#define PLL_TYPE4 0x00008000 +#define PLL_TYPE5 0x00018000 +#define PLL_TYPE6 0x00028000 +#define PLL_TYPE7 0x00038000 + + +#define ILP_CLOCK 32000 + + +#define ALP_CLOCK 20000000 + + +#define HT_CLOCK 80000000 + + +#define CC_UARTCLKO 0x00000001 +#define CC_SE 0x00000002 +#define CC_UARTCLKEN 0x00000008 + + +#define CHIPCTRL_4321A0_DEFAULT 0x3a4 +#define CHIPCTRL_4321A1_DEFAULT 0x0a4 +#define CHIPCTRL_4321_PLL_DOWN 0x800000 + + +#define OTPS_OL_MASK 0x000000ff +#define OTPS_OL_MFG 0x00000001 +#define OTPS_OL_OR1 0x00000002 +#define OTPS_OL_OR2 0x00000004 +#define OTPS_OL_GU 0x00000008 +#define OTPS_GUP_MASK 0x00000f00 +#define OTPS_GUP_SHIFT 8 +#define OTPS_GUP_HW 0x00000100 +#define OTPS_GUP_SW 0x00000200 +#define OTPS_GUP_CI 0x00000400 +#define OTPS_GUP_FUSE 0x00000800 +#define OTPS_READY 0x00001000 +#define OTPS_RV(x) (1 << (16 + (x))) +#define OTPS_RV_MASK 0x0fff0000 + + +#define OTPC_PROGSEL 0x00000001 +#define OTPC_PCOUNT_MASK 0x0000000e +#define OTPC_PCOUNT_SHIFT 1 +#define OTPC_VSEL_MASK 0x000000f0 +#define OTPC_VSEL_SHIFT 4 +#define OTPC_TMM_MASK 0x00000700 +#define OTPC_TMM_SHIFT 8 +#define OTPC_ODM 0x00000800 +#define OTPC_PROGEN 0x80000000 + + +#define OTPP_COL_MASK 0x000000ff +#define OTPP_COL_SHIFT 0 +#define OTPP_ROW_MASK 0x0000ff00 +#define OTPP_ROW_SHIFT 8 +#define OTPP_OC_MASK 0x0f000000 +#define OTPP_OC_SHIFT 24 +#define OTPP_READERR 0x10000000 +#define OTPP_VALUE_MASK 0x20000000 +#define OTPP_VALUE_SHIFT 29 +#define OTPP_START_BUSY 0x80000000 + + +#define OTPPOC_READ 0 +#define OTPPOC_BIT_PROG 1 +#define OTPPOC_VERIFY 3 +#define OTPPOC_INIT 4 +#define OTPPOC_SET 5 +#define OTPPOC_RESET 6 +#define OTPPOC_OCST 7 +#define OTPPOC_ROW_LOCK 8 +#define OTPPOC_PRESCN_TEST 9 + + +#define JCMD_START 0x80000000 +#define JCMD_BUSY 0x80000000 +#define JCMD_STATE_MASK 0x60000000 +#define JCMD_STATE_TLR 0x00000000 +#define JCMD_STATE_PIR 0x20000000 +#define JCMD_STATE_PDR 0x40000000 +#define JCMD_STATE_RTI 0x60000000 +#define JCMD0_ACC_MASK 0x0000f000 +#define JCMD0_ACC_IRDR 0x00000000 +#define JCMD0_ACC_DR 0x00001000 +#define JCMD0_ACC_IR 0x00002000 +#define JCMD0_ACC_RESET 0x00003000 +#define JCMD0_ACC_IRPDR 0x00004000 +#define JCMD0_ACC_PDR 0x00005000 +#define JCMD0_IRW_MASK 0x00000f00 +#define JCMD_ACC_MASK 0x000f0000 +#define JCMD_ACC_IRDR 0x00000000 +#define JCMD_ACC_DR 0x00010000 +#define JCMD_ACC_IR 0x00020000 +#define JCMD_ACC_RESET 0x00030000 +#define JCMD_ACC_IRPDR 0x00040000 +#define JCMD_ACC_PDR 0x00050000 +#define JCMD_ACC_PIR 0x00060000 +#define JCMD_ACC_IRDR_I 0x00070000 +#define JCMD_ACC_DR_I 0x00080000 +#define JCMD_IRW_MASK 0x00001f00 +#define JCMD_IRW_SHIFT 8 +#define JCMD_DRW_MASK 0x0000003f + + +#define JCTRL_FORCE_CLK 4 +#define JCTRL_EXT_EN 2 +#define JCTRL_EN 1 + + +#define CLKD_SFLASH 0x0f000000 +#define CLKD_SFLASH_SHIFT 24 +#define CLKD_OTP 0x000f0000 +#define CLKD_OTP_SHIFT 16 +#define CLKD_JTAG 0x00000f00 +#define CLKD_JTAG_SHIFT 8 +#define CLKD_UART 0x000000ff + + +#define CI_GPIO 0x00000001 +#define CI_EI 0x00000002 +#define CI_TEMP 0x00000004 +#define CI_SIRQ 0x00000008 +#define CI_ECI 0x00000010 +#define CI_PMU 0x00000020 +#define CI_UART 0x00000040 +#define CI_WDRESET 0x80000000 + + +#define SCC_SS_MASK 0x00000007 +#define SCC_SS_LPO 0x00000000 +#define SCC_SS_XTAL 0x00000001 +#define SCC_SS_PCI 0x00000002 +#define SCC_LF 0x00000200 +#define SCC_LP 0x00000400 +#define SCC_FS 0x00000800 +#define SCC_IP 0x00001000 +#define SCC_XC 0x00002000 +#define SCC_XP 0x00004000 +#define SCC_CD_MASK 0xffff0000 +#define SCC_CD_SHIFT 16 + + +#define SYCC_IE 0x00000001 +#define SYCC_AE 0x00000002 +#define SYCC_FP 0x00000004 +#define SYCC_AR 0x00000008 +#define SYCC_HR 0x00000010 +#define SYCC_CD_MASK 0xffff0000 +#define SYCC_CD_SHIFT 16 + + +#define CF_EN 0x00000001 +#define CF_EM_MASK 0x0000000e +#define CF_EM_SHIFT 1 +#define CF_EM_FLASH 0 +#define CF_EM_SYNC 2 +#define CF_EM_PCMCIA 4 +#define CF_DS 0x00000010 +#define CF_BS 0x00000020 +#define CF_CD_MASK 0x000000c0 +#define CF_CD_SHIFT 6 +#define CF_CD_DIV2 0x00000000 +#define CF_CD_DIV3 0x00000040 +#define CF_CD_DIV4 0x00000080 +#define CF_CE 0x00000100 +#define CF_SB 0x00000200 + + +#define PM_W0_MASK 0x0000003f +#define PM_W1_MASK 0x00001f00 +#define PM_W1_SHIFT 8 +#define PM_W2_MASK 0x001f0000 +#define PM_W2_SHIFT 16 +#define PM_W3_MASK 0x1f000000 +#define PM_W3_SHIFT 24 + + +#define PA_W0_MASK 0x0000003f +#define PA_W1_MASK 0x00001f00 +#define PA_W1_SHIFT 8 +#define PA_W2_MASK 0x001f0000 +#define PA_W2_SHIFT 16 +#define PA_W3_MASK 0x1f000000 +#define PA_W3_SHIFT 24 + + +#define PI_W0_MASK 0x0000003f +#define PI_W1_MASK 0x00001f00 +#define PI_W1_SHIFT 8 +#define PI_W2_MASK 0x001f0000 +#define PI_W2_SHIFT 16 +#define PI_W3_MASK 0x1f000000 +#define PI_W3_SHIFT 24 + + +#define PW_W0_MASK 0x0000001f +#define PW_W1_MASK 0x00001f00 +#define PW_W1_SHIFT 8 +#define PW_W2_MASK 0x001f0000 +#define PW_W2_SHIFT 16 +#define PW_W3_MASK 0x1f000000 +#define PW_W3_SHIFT 24 + +#define PW_W0 0x0000000c +#define PW_W1 0x00000a00 +#define PW_W2 0x00020000 +#define PW_W3 0x01000000 + + +#define FW_W0_MASK 0x0000003f +#define FW_W1_MASK 0x00001f00 +#define FW_W1_SHIFT 8 +#define FW_W2_MASK 0x001f0000 +#define FW_W2_SHIFT 16 +#define FW_W3_MASK 0x1f000000 +#define FW_W3_SHIFT 24 + + +#define WATCHDOG_CLOCK 48000000 +#define WATCHDOG_CLOCK_5354 32000 + + +#define PCTL_ILP_DIV_MASK 0xffff0000 +#define PCTL_ILP_DIV_SHIFT 16 +#define PCTL_PLL_PLLCTL_UPD 0x00000400 +#define PCTL_NOILP_ON_WAIT 0x00000200 +#define PCTL_HT_REQ_EN 0x00000100 +#define PCTL_ALP_REQ_EN 0x00000080 +#define PCTL_XTALFREQ_MASK 0x0000007c +#define PCTL_XTALFREQ_SHIFT 2 +#define PCTL_ILP_DIV_EN 0x00000002 +#define PCTL_LPO_SEL 0x00000001 + + +#define CSTRETCH_HT 0xffff0000 +#define CSTRETCH_ALP 0x0000ffff + + +#define GPIO_ONTIME_SHIFT 16 + + +#define CN_N1_MASK 0x3f +#define CN_N2_MASK 0x3f00 +#define CN_N2_SHIFT 8 +#define CN_PLLC_MASK 0xf0000 +#define CN_PLLC_SHIFT 16 + + +#define CC_M1_MASK 0x3f +#define CC_M2_MASK 0x3f00 +#define CC_M2_SHIFT 8 +#define CC_M3_MASK 0x3f0000 +#define CC_M3_SHIFT 16 +#define CC_MC_MASK 0x1f000000 +#define CC_MC_SHIFT 24 + + +#define CC_F6_2 0x02 +#define CC_F6_3 0x03 +#define CC_F6_4 0x05 +#define CC_F6_5 0x09 +#define CC_F6_6 0x11 +#define CC_F6_7 0x21 + +#define CC_F5_BIAS 5 + +#define CC_MC_BYPASS 0x08 +#define CC_MC_M1 0x04 +#define CC_MC_M1M2 0x02 +#define CC_MC_M1M2M3 0x01 +#define CC_MC_M1M3 0x11 + + +#define CC_T2_BIAS 2 +#define CC_T2M2_BIAS 3 + +#define CC_T2MC_M1BYP 1 +#define CC_T2MC_M2BYP 2 +#define CC_T2MC_M3BYP 4 + + +#define CC_T6_MMASK 1 +#define CC_T6_M0 120000000 +#define CC_T6_M1 100000000 +#define SB2MIPS_T6(sb) (2 * (sb)) + + +#define CC_CLOCK_BASE1 24000000 +#define CC_CLOCK_BASE2 12500000 + + +#define CLKC_5350_N 0x0311 +#define CLKC_5350_M 0x04020009 + + +#define FLASH_NONE 0x000 +#define SFLASH_ST 0x100 +#define SFLASH_AT 0x200 +#define PFLASH 0x700 + + +#define CC_CFG_EN 0x0001 +#define CC_CFG_EM_MASK 0x000e +#define CC_CFG_EM_ASYNC 0x0000 +#define CC_CFG_EM_SYNC 0x0002 +#define CC_CFG_EM_PCMCIA 0x0004 +#define CC_CFG_EM_IDE 0x0006 +#define CC_CFG_DS 0x0010 +#define CC_CFG_CD_MASK 0x00e0 +#define CC_CFG_CE 0x0100 +#define CC_CFG_SB 0x0200 +#define CC_CFG_IS 0x0400 + + +#define CC_EB_BASE 0x1a000000 +#define CC_EB_PCMCIA_MEM 0x1a000000 +#define CC_EB_PCMCIA_IO 0x1a200000 +#define CC_EB_PCMCIA_CFG 0x1a400000 +#define CC_EB_IDE 0x1a800000 +#define CC_EB_PCMCIA1_MEM 0x1a800000 +#define CC_EB_PCMCIA1_IO 0x1aa00000 +#define CC_EB_PCMCIA1_CFG 0x1ac00000 +#define CC_EB_PROGIF 0x1b000000 + + + +#define SFLASH_OPCODE 0x000000ff +#define SFLASH_ACTION 0x00000700 +#define SFLASH_CS_ACTIVE 0x00001000 +#define SFLASH_START 0x80000000 +#define SFLASH_BUSY SFLASH_START + + +#define SFLASH_ACT_OPONLY 0x0000 +#define SFLASH_ACT_OP1D 0x0100 +#define SFLASH_ACT_OP3A 0x0200 +#define SFLASH_ACT_OP3A1D 0x0300 +#define SFLASH_ACT_OP3A4D 0x0400 +#define SFLASH_ACT_OP3A4X4D 0x0500 +#define SFLASH_ACT_OP3A1X4D 0x0700 + + +#define SFLASH_ST_WREN 0x0006 +#define SFLASH_ST_WRDIS 0x0004 +#define SFLASH_ST_RDSR 0x0105 +#define SFLASH_ST_WRSR 0x0101 +#define SFLASH_ST_READ 0x0303 +#define SFLASH_ST_PP 0x0302 +#define SFLASH_ST_SE 0x02d8 +#define SFLASH_ST_BE 0x00c7 +#define SFLASH_ST_DP 0x00b9 +#define SFLASH_ST_RES 0x03ab +#define SFLASH_ST_CSA 0x1000 + + +#define SFLASH_ST_WIP 0x01 +#define SFLASH_ST_WEL 0x02 +#define SFLASH_ST_BP_MASK 0x1c +#define SFLASH_ST_BP_SHIFT 2 +#define SFLASH_ST_SRWD 0x80 + + +#define SFLASH_AT_READ 0x07e8 +#define SFLASH_AT_PAGE_READ 0x07d2 +#define SFLASH_AT_BUF1_READ +#define SFLASH_AT_BUF2_READ +#define SFLASH_AT_STATUS 0x01d7 +#define SFLASH_AT_BUF1_WRITE 0x0384 +#define SFLASH_AT_BUF2_WRITE 0x0387 +#define SFLASH_AT_BUF1_ERASE_PROGRAM 0x0283 +#define SFLASH_AT_BUF2_ERASE_PROGRAM 0x0286 +#define SFLASH_AT_BUF1_PROGRAM 0x0288 +#define SFLASH_AT_BUF2_PROGRAM 0x0289 +#define SFLASH_AT_PAGE_ERASE 0x0281 +#define SFLASH_AT_BLOCK_ERASE 0x0250 +#define SFLASH_AT_BUF1_WRITE_ERASE_PROGRAM 0x0382 +#define SFLASH_AT_BUF2_WRITE_ERASE_PROGRAM 0x0385 +#define SFLASH_AT_BUF1_LOAD 0x0253 +#define SFLASH_AT_BUF2_LOAD 0x0255 +#define SFLASH_AT_BUF1_COMPARE 0x0260 +#define SFLASH_AT_BUF2_COMPARE 0x0261 +#define SFLASH_AT_BUF1_REPROGRAM 0x0258 +#define SFLASH_AT_BUF2_REPROGRAM 0x0259 + + +#define SFLASH_AT_READY 0x80 +#define SFLASH_AT_MISMATCH 0x40 +#define SFLASH_AT_ID_MASK 0x38 +#define SFLASH_AT_ID_SHIFT 3 + + + +#define UART_RX 0 +#define UART_TX 0 +#define UART_DLL 0 +#define UART_IER 1 +#define UART_DLM 1 +#define UART_IIR 2 +#define UART_FCR 2 +#define UART_LCR 3 +#define UART_MCR 4 +#define UART_LSR 5 +#define UART_MSR 6 +#define UART_SCR 7 +#define UART_LCR_DLAB 0x80 +#define UART_LCR_WLEN8 0x03 +#define UART_MCR_OUT2 0x08 +#define UART_MCR_LOOP 0x10 +#define UART_LSR_RX_FIFO 0x80 +#define UART_LSR_TDHR 0x40 +#define UART_LSR_THRE 0x20 +#define UART_LSR_BREAK 0x10 +#define UART_LSR_FRAMING 0x08 +#define UART_LSR_PARITY 0x04 +#define UART_LSR_OVERRUN 0x02 +#define UART_LSR_RXRDY 0x01 +#define UART_FCR_FIFO_ENABLE 1 + + +#define UART_IIR_FIFO_MASK 0xc0 +#define UART_IIR_INT_MASK 0xf +#define UART_IIR_MDM_CHG 0x0 +#define UART_IIR_NOINT 0x1 +#define UART_IIR_THRE 0x2 +#define UART_IIR_RCVD_DATA 0x4 +#define UART_IIR_RCVR_STATUS 0x6 +#define UART_IIR_CHAR_TIME 0xc + + +#define UART_IER_EDSSI 8 +#define UART_IER_ELSI 4 +#define UART_IER_ETBEI 2 +#define UART_IER_ERBFI 1 + + +#define PST_INTPEND 0x0040 +#define PST_SBCLKST 0x0030 +#define PST_SBCLKST_ILP 0x0010 +#define PST_SBCLKST_ALP 0x0020 +#define PST_SBCLKST_HT 0x0030 +#define PST_ALPAVAIL 0x0008 +#define PST_HTAVAIL 0x0004 +#define PST_RESINIT 0x0003 + + +#define PCAP_REV_MASK 0x000000ff +#define PCAP_RC_MASK 0x00001f00 +#define PCAP_RC_SHIFT 8 +#define PCAP_TC_MASK 0x0001e000 +#define PCAP_TC_SHIFT 13 +#define PCAP_PC_MASK 0x001e0000 +#define PCAP_PC_SHIFT 17 +#define PCAP_VC_MASK 0x01e00000 +#define PCAP_VC_SHIFT 21 +#define PCAP_CC_MASK 0x1e000000 +#define PCAP_CC_SHIFT 25 +#define PCAP5_PC_MASK 0x003e0000 +#define PCAP5_PC_SHIFT 17 +#define PCAP5_VC_MASK 0x07c00000 +#define PCAP5_VC_SHIFT 22 +#define PCAP5_CC_MASK 0xf8000000 +#define PCAP5_CC_SHIFT 27 + + + +#define PRRT_TIME_MASK 0x03ff +#define PRRT_INTEN 0x0400 +#define PRRT_REQ_ACTIVE 0x0800 +#define PRRT_ALP_REQ 0x1000 +#define PRRT_HT_REQ 0x2000 + + +#define PMURES_BIT(bit) (1 << (bit)) + + +#define PMURES_MAX_RESNUM 30 + + + + +#define PMU0_PLL0_PLLCTL0 0 +#define PMU0_PLL0_PC0_PDIV_MASK 1 +#define PMU0_PLL0_PC0_PDIV_FREQ 25000 +#define PMU0_PLL0_PC0_DIV_ARM_MASK 0x00000038 +#define PMU0_PLL0_PC0_DIV_ARM_SHIFT 3 +#define PMU0_PLL0_PC0_DIV_ARM_BASE 8 + + +#define PMU0_PLL0_PC0_DIV_ARM_110MHZ 0 +#define PMU0_PLL0_PC0_DIV_ARM_97_7MHZ 1 +#define PMU0_PLL0_PC0_DIV_ARM_88MHZ 2 +#define PMU0_PLL0_PC0_DIV_ARM_80MHZ 3 +#define PMU0_PLL0_PC0_DIV_ARM_73_3MHZ 4 +#define PMU0_PLL0_PC0_DIV_ARM_67_7MHZ 5 +#define PMU0_PLL0_PC0_DIV_ARM_62_9MHZ 6 +#define PMU0_PLL0_PC0_DIV_ARM_58_6MHZ 7 + + +#define PMU0_PLL0_PLLCTL1 1 +#define PMU0_PLL0_PC1_WILD_INT_MASK 0xf0000000 +#define PMU0_PLL0_PC1_WILD_INT_SHIFT 28 +#define PMU0_PLL0_PC1_WILD_FRAC_MASK 0x0fffff00 +#define PMU0_PLL0_PC1_WILD_FRAC_SHIFT 8 +#define PMU0_PLL0_PC1_STOP_MOD 0x00000040 + + +#define PMU0_PLL0_PLLCTL2 2 +#define PMU0_PLL0_PC2_WILD_INT_MASK 0xf +#define PMU0_PLL0_PC2_WILD_INT_SHIFT 4 + + +#define RES4328_EXT_SWITCHER_PWM 0 +#define RES4328_BB_SWITCHER_PWM 1 +#define RES4328_BB_SWITCHER_BURST 2 +#define RES4328_BB_EXT_SWITCHER_BURST 3 +#define RES4328_ILP_REQUEST 4 +#define RES4328_RADIO_SWITCHER_PWM 5 +#define RES4328_RADIO_SWITCHER_BURST 6 +#define RES4328_ROM_SWITCH 7 +#define RES4328_PA_REF_LDO 8 +#define RES4328_RADIO_LDO 9 +#define RES4328_AFE_LDO 10 +#define RES4328_PLL_LDO 11 +#define RES4328_BG_FILTBYP 12 +#define RES4328_TX_FILTBYP 13 +#define RES4328_RX_FILTBYP 14 +#define RES4328_XTAL_PU 15 +#define RES4328_XTAL_EN 16 +#define RES4328_BB_PLL_FILTBYP 17 +#define RES4328_RF_PLL_FILTBYP 18 +#define RES4328_BB_PLL_PU 19 + +#define RES5354_EXT_SWITCHER_PWM 0 +#define RES5354_BB_SWITCHER_PWM 1 +#define RES5354_BB_SWITCHER_BURST 2 +#define RES5354_BB_EXT_SWITCHER_BURST 3 +#define RES5354_ILP_REQUEST 4 +#define RES5354_RADIO_SWITCHER_PWM 5 +#define RES5354_RADIO_SWITCHER_BURST 6 +#define RES5354_ROM_SWITCH 7 +#define RES5354_PA_REF_LDO 8 +#define RES5354_RADIO_LDO 9 +#define RES5354_AFE_LDO 10 +#define RES5354_PLL_LDO 11 +#define RES5354_BG_FILTBYP 12 +#define RES5354_TX_FILTBYP 13 +#define RES5354_RX_FILTBYP 14 +#define RES5354_XTAL_PU 15 +#define RES5354_XTAL_EN 16 +#define RES5354_BB_PLL_FILTBYP 17 +#define RES5354_RF_PLL_FILTBYP 18 +#define RES5354_BB_PLL_PU 19 + + + +#define DOT11MAC_880MHZ_CLK_DIVISOR_SHIFT 8 +#define DOT11MAC_880MHZ_CLK_DIVISOR_MASK (0xFF << DOT11MAC_880MHZ_CLK_DIVISOR_SHIFT) +#define DOT11MAC_880MHZ_CLK_DIVISOR_VAL (0xE << DOT11MAC_880MHZ_CLK_DIVISOR_SHIFT) + + +#define PMU2_PHY_PLL_PLLCTL 4 +#define PMU2_SI_PLL_PLLCTL 10 + + +#define RES4325_BUCK_BOOST_BURST 0 +#define RES4325_CBUCK_BURST 1 +#define RES4325_CBUCK_PWM 2 +#define RES4325_CLDO_CBUCK_BURST 3 +#define RES4325_CLDO_CBUCK_PWM 4 +#define RES4325_BUCK_BOOST_PWM 5 +#define RES4325_ILP_REQUEST 6 +#define RES4325_ABUCK_BURST 7 +#define RES4325_ABUCK_PWM 8 +#define RES4325_LNLDO1_PU 9 +#define RES4325_OTP_PU 10 +#define RES4325_LNLDO3_PU 11 +#define RES4325_LNLDO4_PU 12 +#define RES4325_XTAL_PU 13 +#define RES4325_ALP_AVAIL 14 +#define RES4325_RX_PWRSW_PU 15 +#define RES4325_TX_PWRSW_PU 16 +#define RES4325_RFPLL_PWRSW_PU 17 +#define RES4325_LOGEN_PWRSW_PU 18 +#define RES4325_AFE_PWRSW_PU 19 +#define RES4325_BBPLL_PWRSW_PU 20 +#define RES4325_HT_AVAIL 21 + + +#define RES4325B0_CBUCK_LPOM 1 +#define RES4325B0_CBUCK_BURST 2 +#define RES4325B0_CBUCK_PWM 3 +#define RES4325B0_CLDO_PU 4 + + +#define RES4325C1_OTP_PWRSW_PU 10 +#define RES4325C1_LNLDO2_PU 12 + + +#define CST4325_SPROM_OTP_SEL_MASK 0x00000003 +#define CST4325_DEFCIS_SEL 0 +#define CST4325_SPROM_SEL 1 +#define CST4325_OTP_SEL 2 +#define CST4325_OTP_PWRDN 3 +#define CST4325_SDIO_USB_MODE_MASK 0x00000004 +#define CST4325_SDIO_USB_MODE_SHIFT 2 +#define CST4325_RCAL_VALID_MASK 0x00000008 +#define CST4325_RCAL_VALID_SHIFT 3 +#define CST4325_RCAL_VALUE_MASK 0x000001f0 +#define CST4325_RCAL_VALUE_SHIFT 4 +#define CST4325_PMUTOP_2B_MASK 0x00000200 +#define CST4325_PMUTOP_2B_SHIFT 9 + +#define RES4329_RESERVED0 0 +#define RES4329_CBUCK_LPOM 1 +#define RES4329_CBUCK_BURST 2 +#define RES4329_CBUCK_PWM 3 +#define RES4329_CLDO_PU 4 +#define RES4329_PALDO_PU 5 +#define RES4329_ILP_REQUEST 6 +#define RES4329_RESERVED7 7 +#define RES4329_RESERVED8 8 +#define RES4329_LNLDO1_PU 9 +#define RES4329_OTP_PU 10 +#define RES4329_RESERVED11 11 +#define RES4329_LNLDO2_PU 12 +#define RES4329_XTAL_PU 13 +#define RES4329_ALP_AVAIL 14 +#define RES4329_RX_PWRSW_PU 15 +#define RES4329_TX_PWRSW_PU 16 +#define RES4329_RFPLL_PWRSW_PU 17 +#define RES4329_LOGEN_PWRSW_PU 18 +#define RES4329_AFE_PWRSW_PU 19 +#define RES4329_BBPLL_PWRSW_PU 20 +#define RES4329_HT_AVAIL 21 + +#define CST4329_SPROM_OTP_SEL_MASK 0x00000003 +#define CST4329_DEFCIS_SEL 0 +#define CST4329_SPROM_SEL 1 +#define CST4329_OTP_SEL 2 +#define CST4329_OTP_PWRDN 3 +#define CST4329_SPI_SDIO_MODE_MASK 0x00000004 +#define CST4329_SPI_SDIO_MODE_SHIFT 2 + + +#define RES4312_SWITCHER_BURST 0 +#define RES4312_SWITCHER_PWM 1 +#define RES4312_PA_REF_LDO 2 +#define RES4312_CORE_LDO_BURST 3 +#define RES4312_CORE_LDO_PWM 4 +#define RES4312_RADIO_LDO 5 +#define RES4312_ILP_REQUEST 6 +#define RES4312_BG_FILTBYP 7 +#define RES4312_TX_FILTBYP 8 +#define RES4312_RX_FILTBYP 9 +#define RES4312_XTAL_PU 10 +#define RES4312_ALP_AVAIL 11 +#define RES4312_BB_PLL_FILTBYP 12 +#define RES4312_RF_PLL_FILTBYP 13 +#define RES4312_HT_AVAIL 14 + +#define RES4322_RF_LDO 0 +#define RES4322_ILP_REQUEST 1 +#define RES4322_XTAL_PU 2 +#define RES4322_ALP_AVAIL 3 +#define RES4322_SI_PLL_ON 4 +#define RES4322_HT_SI_AVAIL 5 +#define RES4322_PHY_PLL_ON 6 +#define RES4322_HT_PHY_AVAIL 7 +#define RES4322_OTP_PU 8 + + +#define CST4322_XTAL_FREQ_20_40MHZ 0x00000020 +#define CST4322_SPROM_OTP_SEL_MASK 0x000000c0 +#define CST4322_SPROM_OTP_SEL_SHIFT 6 +#define CST4322_NO_SPROM_OTP 0 +#define CST4322_SPROM_PRESENT 1 +#define CST4322_OTP_PRESENT 2 +#define CST4322_PCI_OR_USB 0x00000100 +#define CST4322_BOOT_MASK 0x00000600 +#define CST4322_BOOT_SHIFT 9 +#define CST4322_BOOT_FROM_SRAM 0 +#define CST4322_BOOT_FROM_ROM 1 +#define CST4322_BOOT_FROM_FLASH 2 +#define CST4322_BOOT_FROM_INVALID 3 +#define CST4322_ILP_DIV_EN 0x00000800 +#define CST4322_FLASH_TYPE_MASK 0x00001000 +#define CST4322_FLASH_TYPE_SHIFT 12 +#define CST4322_FLASH_TYPE_SHIFT_ST 0 +#define CST4322_FLASH_TYPE_SHIFT_ATMEL 1 +#define CST4322_ARM_TAP_SEL 0x00002000 +#define CST4322_RES_INIT_MODE_MASK 0x0000c000 +#define CST4322_RES_INIT_MODE_SHIFT 14 +#define CST4322_RES_INIT_MODE_ILPAVAIL 0 +#define CST4322_RES_INIT_MODE_ILPREQ 1 +#define CST4322_RES_INIT_MODE_ALPAVAIL 2 +#define CST4322_RES_INIT_MODE_HTAVAIL 3 +#define CST4322_PCIPLLCLK_GATING 0x00010000 +#define CST4322_CLK_SWITCH_PCI_TO_ALP 0x00020000 +#define CST4322_PCI_CARDBUS_MODE 0x00040000 + +#define RES4315_CBUCK_LPOM 1 +#define RES4315_CBUCK_BURST 2 +#define RES4315_CBUCK_PWM 3 +#define RES4315_CLDO_PU 4 +#define RES4315_PALDO_PU 5 +#define RES4315_ILP_REQUEST 6 +#define RES4315_LNLDO1_PU 9 +#define RES4315_OTP_PU 10 +#define RES4315_LNLDO2_PU 12 +#define RES4315_XTAL_PU 13 +#define RES4315_ALP_AVAIL 14 +#define RES4315_RX_PWRSW_PU 15 +#define RES4315_TX_PWRSW_PU 16 +#define RES4315_RFPLL_PWRSW_PU 17 +#define RES4315_LOGEN_PWRSW_PU 18 +#define RES4315_AFE_PWRSW_PU 19 +#define RES4315_BBPLL_PWRSW_PU 20 +#define RES4315_HT_AVAIL 21 + +#define CST4315_SPROM_OTP_SEL_MASK 0x00000003 +#define CST4315_DEFCIS_SEL 0x00000000 +#define CST4315_SPROM_SEL 0x00000001 +#define CST4315_OTP_SEL 0x00000002 +#define CST4315_OTP_PWRDN 0x00000003 +#define CST4315_SDIO_MODE 0x00000004 +#define CST4315_RCAL_VALID 0x00000008 +#define CST4315_RCAL_VALUE_MASK 0x000001f0 +#define CST4315_RCAL_VALUE_SHIFT 4 +#define CST4315_PALDO_EXTPNP 0x00000200 +#define CST4315_CBUCK_MODE_MASK 0x00000c00 +#define CST4315_CBUCK_MODE_BURST 0x00000400 +#define CST4315_CBUCK_MODE_LPBURST 0x00000c00 + +#define PMU_MAX_TRANSITION_DLY 15000 + + +#define PMURES_UP_TRANSITION 2 + + + + + +#define ECI_BW_20 0x0 +#define ECI_BW_25 0x1 +#define ECI_BW_30 0x2 +#define ECI_BW_35 0x3 +#define ECI_BW_40 0x4 +#define ECI_BW_45 0x5 +#define ECI_BW_50 0x6 +#define ECI_BW_ALL 0x7 + + +#define WLAN_NUM_ANT1 TXANT_0 +#define WLAN_NUM_ANT2 TXANT_1 + +#endif diff --git a/drivers/net/wireless/bcm4329/include/sbconfig.h b/drivers/net/wireless/bcm4329/include/sbconfig.h new file mode 100644 index 000000000000..da18ccbe9ab8 --- /dev/null +++ b/drivers/net/wireless/bcm4329/include/sbconfig.h @@ -0,0 +1,276 @@ +/* + * Broadcom SiliconBackplane hardware register definitions. + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: sbconfig.h,v 13.67.30.1 2008/05/07 20:17:27 Exp $ + */ + + +#ifndef _SBCONFIG_H +#define _SBCONFIG_H + + +#ifndef PAD +#define _PADLINE(line) pad ## line +#define _XSTR(line) _PADLINE(line) +#define PAD _XSTR(__LINE__) +#endif + + +#define SB_BUS_SIZE 0x10000 +#define SB_BUS_BASE(b) (SI_ENUM_BASE + (b) * SB_BUS_SIZE) +#define SB_BUS_MAXCORES (SB_BUS_SIZE / SI_CORE_SIZE) + + +#define SBCONFIGOFF 0xf00 +#define SBCONFIGSIZE 256 + +#define SBIPSFLAG 0x08 +#define SBTPSFLAG 0x18 +#define SBTMERRLOGA 0x48 +#define SBTMERRLOG 0x50 +#define SBADMATCH3 0x60 +#define SBADMATCH2 0x68 +#define SBADMATCH1 0x70 +#define SBIMSTATE 0x90 +#define SBINTVEC 0x94 +#define SBTMSTATELOW 0x98 +#define SBTMSTATEHIGH 0x9c +#define SBBWA0 0xa0 +#define SBIMCONFIGLOW 0xa8 +#define SBIMCONFIGHIGH 0xac +#define SBADMATCH0 0xb0 +#define SBTMCONFIGLOW 0xb8 +#define SBTMCONFIGHIGH 0xbc +#define SBBCONFIG 0xc0 +#define SBBSTATE 0xc8 +#define SBACTCNFG 0xd8 +#define SBFLAGST 0xe8 +#define SBIDLOW 0xf8 +#define SBIDHIGH 0xfc + + + +#define SBIMERRLOGA 0xea8 +#define SBIMERRLOG 0xeb0 +#define SBTMPORTCONNID0 0xed8 +#define SBTMPORTLOCK0 0xef8 + +#ifndef _LANGUAGE_ASSEMBLY + +typedef volatile struct _sbconfig { + uint32 PAD[2]; + uint32 sbipsflag; + uint32 PAD[3]; + uint32 sbtpsflag; + uint32 PAD[11]; + uint32 sbtmerrloga; + uint32 PAD; + uint32 sbtmerrlog; + uint32 PAD[3]; + uint32 sbadmatch3; + uint32 PAD; + uint32 sbadmatch2; + uint32 PAD; + uint32 sbadmatch1; + uint32 PAD[7]; + uint32 sbimstate; + uint32 sbintvec; + uint32 sbtmstatelow; + uint32 sbtmstatehigh; + uint32 sbbwa0; + uint32 PAD; + uint32 sbimconfiglow; + uint32 sbimconfighigh; + uint32 sbadmatch0; + uint32 PAD; + uint32 sbtmconfiglow; + uint32 sbtmconfighigh; + uint32 sbbconfig; + uint32 PAD; + uint32 sbbstate; + uint32 PAD[3]; + uint32 sbactcnfg; + uint32 PAD[3]; + uint32 sbflagst; + uint32 PAD[3]; + uint32 sbidlow; + uint32 sbidhigh; +} sbconfig_t; + +#endif + + +#define SBIPS_INT1_MASK 0x3f +#define SBIPS_INT1_SHIFT 0 +#define SBIPS_INT2_MASK 0x3f00 +#define SBIPS_INT2_SHIFT 8 +#define SBIPS_INT3_MASK 0x3f0000 +#define SBIPS_INT3_SHIFT 16 +#define SBIPS_INT4_MASK 0x3f000000 +#define SBIPS_INT4_SHIFT 24 + + +#define SBTPS_NUM0_MASK 0x3f +#define SBTPS_F0EN0 0x40 + + +#define SBTMEL_CM 0x00000007 +#define SBTMEL_CI 0x0000ff00 +#define SBTMEL_EC 0x0f000000 +#define SBTMEL_ME 0x80000000 + + +#define SBIM_PC 0xf +#define SBIM_AP_MASK 0x30 +#define SBIM_AP_BOTH 0x00 +#define SBIM_AP_TS 0x10 +#define SBIM_AP_TK 0x20 +#define SBIM_AP_RSV 0x30 +#define SBIM_IBE 0x20000 +#define SBIM_TO 0x40000 +#define SBIM_BY 0x01800000 +#define SBIM_RJ 0x02000000 + + +#define SBTML_RESET 0x0001 +#define SBTML_REJ_MASK 0x0006 +#define SBTML_REJ 0x0002 +#define SBTML_TMPREJ 0x0004 + +#define SBTML_SICF_SHIFT 16 + + +#define SBTMH_SERR 0x0001 +#define SBTMH_INT 0x0002 +#define SBTMH_BUSY 0x0004 +#define SBTMH_TO 0x0020 + +#define SBTMH_SISF_SHIFT 16 + + +#define SBBWA_TAB0_MASK 0xffff +#define SBBWA_TAB1_MASK 0xffff +#define SBBWA_TAB1_SHIFT 16 + + +#define SBIMCL_STO_MASK 0x7 +#define SBIMCL_RTO_MASK 0x70 +#define SBIMCL_RTO_SHIFT 4 +#define SBIMCL_CID_MASK 0xff0000 +#define SBIMCL_CID_SHIFT 16 + + +#define SBIMCH_IEM_MASK 0xc +#define SBIMCH_TEM_MASK 0x30 +#define SBIMCH_TEM_SHIFT 4 +#define SBIMCH_BEM_MASK 0xc0 +#define SBIMCH_BEM_SHIFT 6 + + +#define SBAM_TYPE_MASK 0x3 +#define SBAM_AD64 0x4 +#define SBAM_ADINT0_MASK 0xf8 +#define SBAM_ADINT0_SHIFT 3 +#define SBAM_ADINT1_MASK 0x1f8 +#define SBAM_ADINT1_SHIFT 3 +#define SBAM_ADINT2_MASK 0x1f8 +#define SBAM_ADINT2_SHIFT 3 +#define SBAM_ADEN 0x400 +#define SBAM_ADNEG 0x800 +#define SBAM_BASE0_MASK 0xffffff00 +#define SBAM_BASE0_SHIFT 8 +#define SBAM_BASE1_MASK 0xfffff000 +#define SBAM_BASE1_SHIFT 12 +#define SBAM_BASE2_MASK 0xffff0000 +#define SBAM_BASE2_SHIFT 16 + + +#define SBTMCL_CD_MASK 0xff +#define SBTMCL_CO_MASK 0xf800 +#define SBTMCL_CO_SHIFT 11 +#define SBTMCL_IF_MASK 0xfc0000 +#define SBTMCL_IF_SHIFT 18 +#define SBTMCL_IM_MASK 0x3000000 +#define SBTMCL_IM_SHIFT 24 + + +#define SBTMCH_BM_MASK 0x3 +#define SBTMCH_RM_MASK 0x3 +#define SBTMCH_RM_SHIFT 2 +#define SBTMCH_SM_MASK 0x30 +#define SBTMCH_SM_SHIFT 4 +#define SBTMCH_EM_MASK 0x300 +#define SBTMCH_EM_SHIFT 8 +#define SBTMCH_IM_MASK 0xc00 +#define SBTMCH_IM_SHIFT 10 + + +#define SBBC_LAT_MASK 0x3 +#define SBBC_MAX0_MASK 0xf0000 +#define SBBC_MAX0_SHIFT 16 +#define SBBC_MAX1_MASK 0xf00000 +#define SBBC_MAX1_SHIFT 20 + + +#define SBBS_SRD 0x1 +#define SBBS_HRD 0x2 + + +#define SBIDL_CS_MASK 0x3 +#define SBIDL_AR_MASK 0x38 +#define SBIDL_AR_SHIFT 3 +#define SBIDL_SYNCH 0x40 +#define SBIDL_INIT 0x80 +#define SBIDL_MINLAT_MASK 0xf00 +#define SBIDL_MINLAT_SHIFT 8 +#define SBIDL_MAXLAT 0xf000 +#define SBIDL_MAXLAT_SHIFT 12 +#define SBIDL_FIRST 0x10000 +#define SBIDL_CW_MASK 0xc0000 +#define SBIDL_CW_SHIFT 18 +#define SBIDL_TP_MASK 0xf00000 +#define SBIDL_TP_SHIFT 20 +#define SBIDL_IP_MASK 0xf000000 +#define SBIDL_IP_SHIFT 24 +#define SBIDL_RV_MASK 0xf0000000 +#define SBIDL_RV_SHIFT 28 +#define SBIDL_RV_2_2 0x00000000 +#define SBIDL_RV_2_3 0x10000000 + + +#define SBIDH_RC_MASK 0x000f +#define SBIDH_RCE_MASK 0x7000 +#define SBIDH_RCE_SHIFT 8 +#define SBCOREREV(sbidh) \ + ((((sbidh) & SBIDH_RCE_MASK) >> SBIDH_RCE_SHIFT) | ((sbidh) & SBIDH_RC_MASK)) +#define SBIDH_CC_MASK 0x8ff0 +#define SBIDH_CC_SHIFT 4 +#define SBIDH_VC_MASK 0xffff0000 +#define SBIDH_VC_SHIFT 16 + +#define SB_COMMIT 0xfd8 + + +#define SB_VEND_BCM 0x4243 + +#endif diff --git a/drivers/net/wireless/bcm4329/include/sbhnddma.h b/drivers/net/wireless/bcm4329/include/sbhnddma.h new file mode 100644 index 000000000000..7681395f5b3b --- /dev/null +++ b/drivers/net/wireless/bcm4329/include/sbhnddma.h @@ -0,0 +1,294 @@ +/* + * Generic Broadcom Home Networking Division (HND) DMA engine HW interface + * This supports the following chips: BCM42xx, 44xx, 47xx . + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: sbhnddma.h,v 13.11.250.5.16.1 2009/07/21 14:04:51 Exp $ + */ + + +#ifndef _sbhnddma_h_ +#define _sbhnddma_h_ + + + + + + + +typedef volatile struct { + uint32 control; + uint32 addr; + uint32 ptr; + uint32 status; +} dma32regs_t; + +typedef volatile struct { + dma32regs_t xmt; + dma32regs_t rcv; +} dma32regp_t; + +typedef volatile struct { + uint32 fifoaddr; + uint32 fifodatalow; + uint32 fifodatahigh; + uint32 pad; +} dma32diag_t; + + +typedef volatile struct { + uint32 ctrl; + uint32 addr; +} dma32dd_t; + + +#define D32RINGALIGN_BITS 12 +#define D32MAXRINGSZ (1 << D32RINGALIGN_BITS) +#define D32RINGALIGN (1 << D32RINGALIGN_BITS) +#define D32MAXDD (D32MAXRINGSZ / sizeof (dma32dd_t)) + + +#define XC_XE ((uint32)1 << 0) +#define XC_SE ((uint32)1 << 1) +#define XC_LE ((uint32)1 << 2) +#define XC_FL ((uint32)1 << 4) +#define XC_PD ((uint32)1 << 11) +#define XC_AE ((uint32)3 << 16) +#define XC_AE_SHIFT 16 + + +#define XP_LD_MASK 0xfff + + +#define XS_CD_MASK 0x0fff +#define XS_XS_MASK 0xf000 +#define XS_XS_SHIFT 12 +#define XS_XS_DISABLED 0x0000 +#define XS_XS_ACTIVE 0x1000 +#define XS_XS_IDLE 0x2000 +#define XS_XS_STOPPED 0x3000 +#define XS_XS_SUSP 0x4000 +#define XS_XE_MASK 0xf0000 +#define XS_XE_SHIFT 16 +#define XS_XE_NOERR 0x00000 +#define XS_XE_DPE 0x10000 +#define XS_XE_DFU 0x20000 +#define XS_XE_BEBR 0x30000 +#define XS_XE_BEDA 0x40000 +#define XS_AD_MASK 0xfff00000 +#define XS_AD_SHIFT 20 + + +#define RC_RE ((uint32)1 << 0) +#define RC_RO_MASK 0xfe +#define RC_RO_SHIFT 1 +#define RC_FM ((uint32)1 << 8) +#define RC_SH ((uint32)1 << 9) +#define RC_OC ((uint32)1 << 10) +#define RC_PD ((uint32)1 << 11) +#define RC_AE ((uint32)3 << 16) +#define RC_AE_SHIFT 16 + + +#define RP_LD_MASK 0xfff + + +#define RS_CD_MASK 0x0fff +#define RS_RS_MASK 0xf000 +#define RS_RS_SHIFT 12 +#define RS_RS_DISABLED 0x0000 +#define RS_RS_ACTIVE 0x1000 +#define RS_RS_IDLE 0x2000 +#define RS_RS_STOPPED 0x3000 +#define RS_RE_MASK 0xf0000 +#define RS_RE_SHIFT 16 +#define RS_RE_NOERR 0x00000 +#define RS_RE_DPE 0x10000 +#define RS_RE_DFO 0x20000 +#define RS_RE_BEBW 0x30000 +#define RS_RE_BEDA 0x40000 +#define RS_AD_MASK 0xfff00000 +#define RS_AD_SHIFT 20 + + +#define FA_OFF_MASK 0xffff +#define FA_SEL_MASK 0xf0000 +#define FA_SEL_SHIFT 16 +#define FA_SEL_XDD 0x00000 +#define FA_SEL_XDP 0x10000 +#define FA_SEL_RDD 0x40000 +#define FA_SEL_RDP 0x50000 +#define FA_SEL_XFD 0x80000 +#define FA_SEL_XFP 0x90000 +#define FA_SEL_RFD 0xc0000 +#define FA_SEL_RFP 0xd0000 +#define FA_SEL_RSD 0xe0000 +#define FA_SEL_RSP 0xf0000 + + +#define CTRL_BC_MASK 0x1fff +#define CTRL_AE ((uint32)3 << 16) +#define CTRL_AE_SHIFT 16 +#define CTRL_EOT ((uint32)1 << 28) +#define CTRL_IOC ((uint32)1 << 29) +#define CTRL_EOF ((uint32)1 << 30) +#define CTRL_SOF ((uint32)1 << 31) + + +#define CTRL_CORE_MASK 0x0ff00000 + + + + +typedef volatile struct { + uint32 control; + uint32 ptr; + uint32 addrlow; + uint32 addrhigh; + uint32 status0; + uint32 status1; +} dma64regs_t; + +typedef volatile struct { + dma64regs_t tx; + dma64regs_t rx; +} dma64regp_t; + +typedef volatile struct { + uint32 fifoaddr; + uint32 fifodatalow; + uint32 fifodatahigh; + uint32 pad; +} dma64diag_t; + + +typedef volatile struct { + uint32 ctrl1; + uint32 ctrl2; + uint32 addrlow; + uint32 addrhigh; +} dma64dd_t; + + +#define D64RINGALIGN_BITS 13 +#define D64MAXRINGSZ (1 << D64RINGALIGN_BITS) +#define D64RINGALIGN (1 << D64RINGALIGN_BITS) +#define D64MAXDD (D64MAXRINGSZ / sizeof (dma64dd_t)) + + +#define D64_XC_XE 0x00000001 +#define D64_XC_SE 0x00000002 +#define D64_XC_LE 0x00000004 +#define D64_XC_FL 0x00000010 +#define D64_XC_PD 0x00000800 +#define D64_XC_AE 0x00030000 +#define D64_XC_AE_SHIFT 16 + + +#define D64_XP_LD_MASK 0x00000fff + + +#define D64_XS0_CD_MASK 0x00001fff +#define D64_XS0_XS_MASK 0xf0000000 +#define D64_XS0_XS_SHIFT 28 +#define D64_XS0_XS_DISABLED 0x00000000 +#define D64_XS0_XS_ACTIVE 0x10000000 +#define D64_XS0_XS_IDLE 0x20000000 +#define D64_XS0_XS_STOPPED 0x30000000 +#define D64_XS0_XS_SUSP 0x40000000 + +#define D64_XS1_AD_MASK 0x0001ffff +#define D64_XS1_XE_MASK 0xf0000000 +#define D64_XS1_XE_SHIFT 28 +#define D64_XS1_XE_NOERR 0x00000000 +#define D64_XS1_XE_DPE 0x10000000 +#define D64_XS1_XE_DFU 0x20000000 +#define D64_XS1_XE_DTE 0x30000000 +#define D64_XS1_XE_DESRE 0x40000000 +#define D64_XS1_XE_COREE 0x50000000 + + +#define D64_RC_RE 0x00000001 +#define D64_RC_RO_MASK 0x000000fe +#define D64_RC_RO_SHIFT 1 +#define D64_RC_FM 0x00000100 +#define D64_RC_SH 0x00000200 +#define D64_RC_OC 0x00000400 +#define D64_RC_PD 0x00000800 +#define D64_RC_AE 0x00030000 +#define D64_RC_AE_SHIFT 16 + + +#define D64_RP_LD_MASK 0x00000fff + + +#define D64_RS0_CD_MASK 0x00001fff +#define D64_RS0_RS_MASK 0xf0000000 +#define D64_RS0_RS_SHIFT 28 +#define D64_RS0_RS_DISABLED 0x00000000 +#define D64_RS0_RS_ACTIVE 0x10000000 +#define D64_RS0_RS_IDLE 0x20000000 +#define D64_RS0_RS_STOPPED 0x30000000 +#define D64_RS0_RS_SUSP 0x40000000 + +#define D64_RS1_AD_MASK 0x0001ffff +#define D64_RS1_RE_MASK 0xf0000000 +#define D64_RS1_RE_SHIFT 28 +#define D64_RS1_RE_NOERR 0x00000000 +#define D64_RS1_RE_DPO 0x10000000 +#define D64_RS1_RE_DFU 0x20000000 +#define D64_RS1_RE_DTE 0x30000000 +#define D64_RS1_RE_DESRE 0x40000000 +#define D64_RS1_RE_COREE 0x50000000 + + +#define D64_FA_OFF_MASK 0xffff +#define D64_FA_SEL_MASK 0xf0000 +#define D64_FA_SEL_SHIFT 16 +#define D64_FA_SEL_XDD 0x00000 +#define D64_FA_SEL_XDP 0x10000 +#define D64_FA_SEL_RDD 0x40000 +#define D64_FA_SEL_RDP 0x50000 +#define D64_FA_SEL_XFD 0x80000 +#define D64_FA_SEL_XFP 0x90000 +#define D64_FA_SEL_RFD 0xc0000 +#define D64_FA_SEL_RFP 0xd0000 +#define D64_FA_SEL_RSD 0xe0000 +#define D64_FA_SEL_RSP 0xf0000 + + +#define D64_CTRL1_EOT ((uint32)1 << 28) +#define D64_CTRL1_IOC ((uint32)1 << 29) +#define D64_CTRL1_EOF ((uint32)1 << 30) +#define D64_CTRL1_SOF ((uint32)1 << 31) + + +#define D64_CTRL2_BC_MASK 0x00007fff +#define D64_CTRL2_AE 0x00030000 +#define D64_CTRL2_AE_SHIFT 16 +#define D64_CTRL2_PARITY 0x00040000 + + +#define D64_CTRL_CORE_MASK 0x0ff00000 + + +#endif diff --git a/drivers/net/wireless/bcm4329/include/sbpcmcia.h b/drivers/net/wireless/bcm4329/include/sbpcmcia.h new file mode 100644 index 000000000000..d6d80334258a --- /dev/null +++ b/drivers/net/wireless/bcm4329/include/sbpcmcia.h @@ -0,0 +1,109 @@ +/* + * BCM43XX Sonics SiliconBackplane PCMCIA core hardware definitions. + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: sbpcmcia.h,v 13.31.4.1.2.3.8.7 2009/06/22 05:14:24 Exp $ + */ + + +#ifndef _SBPCMCIA_H +#define _SBPCMCIA_H + + + + +#define PCMCIA_FCR (0x700 / 2) + +#define FCR0_OFF 0 +#define FCR1_OFF (0x40 / 2) +#define FCR2_OFF (0x80 / 2) +#define FCR3_OFF (0xc0 / 2) + +#define PCMCIA_FCR0 (0x700 / 2) +#define PCMCIA_FCR1 (0x740 / 2) +#define PCMCIA_FCR2 (0x780 / 2) +#define PCMCIA_FCR3 (0x7c0 / 2) + + + +#define PCMCIA_COR 0 + +#define COR_RST 0x80 +#define COR_LEV 0x40 +#define COR_IRQEN 0x04 +#define COR_BLREN 0x01 +#define COR_FUNEN 0x01 + + +#define PCICIA_FCSR (2 / 2) +#define PCICIA_PRR (4 / 2) +#define PCICIA_SCR (6 / 2) +#define PCICIA_ESR (8 / 2) + + +#define PCM_MEMOFF 0x0000 +#define F0_MEMOFF 0x1000 +#define F1_MEMOFF 0x2000 +#define F2_MEMOFF 0x3000 +#define F3_MEMOFF 0x4000 + + +#define MEM_ADDR0 (0x728 / 2) +#define MEM_ADDR1 (0x72a / 2) +#define MEM_ADDR2 (0x72c / 2) + + +#define PCMCIA_ADDR0 (0x072e / 2) +#define PCMCIA_ADDR1 (0x0730 / 2) +#define PCMCIA_ADDR2 (0x0732 / 2) + +#define MEM_SEG (0x0734 / 2) +#define SROM_CS (0x0736 / 2) +#define SROM_DATAL (0x0738 / 2) +#define SROM_DATAH (0x073a / 2) +#define SROM_ADDRL (0x073c / 2) +#define SROM_ADDRH (0x073e / 2) +#define SROM_INFO2 (0x0772 / 2) +#define SROM_INFO (0x07be / 2) + + +#define SROM_IDLE 0 +#define SROM_WRITE 1 +#define SROM_READ 2 +#define SROM_WEN 4 +#define SROM_WDS 7 +#define SROM_DONE 8 + + +#define SRI_SZ_MASK 0x03 +#define SRI_BLANK 0x04 +#define SRI_OTP 0x80 + + + +#define SBTML_INT_ACK 0x40000 +#define SBTML_INT_EN 0x20000 + + +#define SBTMH_INT_STATUS 0x40000 + +#endif diff --git a/drivers/net/wireless/bcm4329/include/sbsdio.h b/drivers/net/wireless/bcm4329/include/sbsdio.h new file mode 100644 index 000000000000..75aaf4d88f7d --- /dev/null +++ b/drivers/net/wireless/bcm4329/include/sbsdio.h @@ -0,0 +1,166 @@ +/* + * SDIO device core hardware definitions. + * sdio is a portion of the pcmcia core in core rev 3 - rev 8 + * + * SDIO core support 1bit, 4 bit SDIO mode as well as SPI mode. + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: sbsdio.h,v 13.29.4.1.22.3 2009/03/11 20:26:57 Exp $ + */ + +#ifndef _SBSDIO_H +#define _SBSDIO_H + +#define SBSDIO_NUM_FUNCTION 3 /* as of sdiod rev 0, supports 3 functions */ + +/* function 1 miscellaneous registers */ +#define SBSDIO_SPROM_CS 0x10000 /* sprom command and status */ +#define SBSDIO_SPROM_INFO 0x10001 /* sprom info register */ +#define SBSDIO_SPROM_DATA_LOW 0x10002 /* sprom indirect access data byte 0 */ +#define SBSDIO_SPROM_DATA_HIGH 0x10003 /* sprom indirect access data byte 1 */ +#define SBSDIO_SPROM_ADDR_LOW 0x10004 /* sprom indirect access addr byte 0 */ +#define SBSDIO_SPROM_ADDR_HIGH 0x10005 /* sprom indirect access addr byte 0 */ +#define SBSDIO_CHIP_CTRL_DATA 0x10006 /* xtal_pu (gpio) output */ +#define SBSDIO_CHIP_CTRL_EN 0x10007 /* xtal_pu (gpio) enable */ +#define SBSDIO_WATERMARK 0x10008 /* rev < 7, watermark for sdio device */ +#define SBSDIO_DEVICE_CTL 0x10009 /* control busy signal generation */ + +/* registers introduced in rev 8, some content (mask/bits) defs in sbsdpcmdev.h */ +#define SBSDIO_FUNC1_SBADDRLOW 0x1000A /* SB Address Window Low (b15) */ +#define SBSDIO_FUNC1_SBADDRMID 0x1000B /* SB Address Window Mid (b23:b16) */ +#define SBSDIO_FUNC1_SBADDRHIGH 0x1000C /* SB Address Window High (b31:b24) */ +#define SBSDIO_FUNC1_FRAMECTRL 0x1000D /* Frame Control (frame term/abort) */ +#define SBSDIO_FUNC1_CHIPCLKCSR 0x1000E /* ChipClockCSR (ALP/HT ctl/status) */ +#define SBSDIO_FUNC1_SDIOPULLUP 0x1000F /* SdioPullUp (on cmd, d0-d2) */ +#define SBSDIO_FUNC1_WFRAMEBCLO 0x10019 /* Write Frame Byte Count Low */ +#define SBSDIO_FUNC1_WFRAMEBCHI 0x1001A /* Write Frame Byte Count High */ +#define SBSDIO_FUNC1_RFRAMEBCLO 0x1001B /* Read Frame Byte Count Low */ +#define SBSDIO_FUNC1_RFRAMEBCHI 0x1001C /* Read Frame Byte Count High */ + +#define SBSDIO_FUNC1_MISC_REG_START 0x10000 /* f1 misc register start */ +#define SBSDIO_FUNC1_MISC_REG_LIMIT 0x1001C /* f1 misc register end */ + +/* SBSDIO_SPROM_CS */ +#define SBSDIO_SPROM_IDLE 0 +#define SBSDIO_SPROM_WRITE 1 +#define SBSDIO_SPROM_READ 2 +#define SBSDIO_SPROM_WEN 4 +#define SBSDIO_SPROM_WDS 7 +#define SBSDIO_SPROM_DONE 8 + +/* SBSDIO_SPROM_INFO */ +#define SROM_SZ_MASK 0x03 /* SROM size, 1: 4k, 2: 16k */ +#define SROM_BLANK 0x04 /* depreciated in corerev 6 */ +#define SROM_OTP 0x80 /* OTP present */ + +/* SBSDIO_CHIP_CTRL */ +#define SBSDIO_CHIP_CTRL_XTAL 0x01 /* or'd with onchip xtal_pu, + * 1: power on oscillator + * (for 4318 only) + */ +/* SBSDIO_WATERMARK */ +#define SBSDIO_WATERMARK_MASK 0x7f /* number of words - 1 for sd device + * to wait before sending data to host + */ + +/* SBSDIO_DEVICE_CTL */ +#define SBSDIO_DEVCTL_SETBUSY 0x01 /* 1: device will assert busy signal when + * receiving CMD53 + */ +#define SBSDIO_DEVCTL_SPI_INTR_SYNC 0x02 /* 1: assertion of sdio interrupt is + * synchronous to the sdio clock + */ +#define SBSDIO_DEVCTL_CA_INT_ONLY 0x04 /* 1: mask all interrupts to host + * except the chipActive (rev 8) + */ +#define SBSDIO_DEVCTL_PADS_ISO 0x08 /* 1: isolate internal sdio signals, put + * external pads in tri-state; requires + * sdio bus power cycle to clear (rev 9) + */ +#define SBSDIO_DEVCTL_SB_RST_CTL 0x30 /* Force SD->SB reset mapping (rev 11) */ +#define SBSDIO_DEVCTL_RST_CORECTL 0x00 /* Determined by CoreControl bit */ +#define SBSDIO_DEVCTL_RST_BPRESET 0x10 /* Force backplane reset */ +#define SBSDIO_DEVCTL_RST_NOBPRESET 0x20 /* Force no backplane reset */ + + +/* SBSDIO_FUNC1_CHIPCLKCSR */ +#define SBSDIO_FORCE_ALP 0x01 /* Force ALP request to backplane */ +#define SBSDIO_FORCE_HT 0x02 /* Force HT request to backplane */ +#define SBSDIO_FORCE_ILP 0x04 /* Force ILP request to backplane */ +#define SBSDIO_ALP_AVAIL_REQ 0x08 /* Make ALP ready (power up xtal) */ +#define SBSDIO_HT_AVAIL_REQ 0x10 /* Make HT ready (power up PLL) */ +#define SBSDIO_FORCE_HW_CLKREQ_OFF 0x20 /* Squelch clock requests from HW */ +#define SBSDIO_ALP_AVAIL 0x40 /* Status: ALP is ready */ +#define SBSDIO_HT_AVAIL 0x80 /* Status: HT is ready */ +/* In rev8, actual avail bits followed original docs */ +#define SBSDIO_Rev8_HT_AVAIL 0x40 +#define SBSDIO_Rev8_ALP_AVAIL 0x80 + +#define SBSDIO_AVBITS (SBSDIO_HT_AVAIL | SBSDIO_ALP_AVAIL) +#define SBSDIO_ALPAV(regval) ((regval) & SBSDIO_AVBITS) +#define SBSDIO_HTAV(regval) (((regval) & SBSDIO_AVBITS) == SBSDIO_AVBITS) +#define SBSDIO_ALPONLY(regval) (SBSDIO_ALPAV(regval) && !SBSDIO_HTAV(regval)) +#define SBSDIO_CLKAV(regval, alponly) (SBSDIO_ALPAV(regval) && \ + (alponly ? 1 : SBSDIO_HTAV(regval))) + +/* SBSDIO_FUNC1_SDIOPULLUP */ +#define SBSDIO_PULLUP_D0 0x01 /* Enable D0/MISO pullup */ +#define SBSDIO_PULLUP_D1 0x02 /* Enable D1/INT# pullup */ +#define SBSDIO_PULLUP_D2 0x04 /* Enable D2 pullup */ +#define SBSDIO_PULLUP_CMD 0x08 /* Enable CMD/MOSI pullup */ +#define SBSDIO_PULLUP_ALL 0x0f /* All valid bits */ + +/* function 1 OCP space */ +#define SBSDIO_SB_OFT_ADDR_MASK 0x07FFF /* sb offset addr is <= 15 bits, 32k */ +#define SBSDIO_SB_OFT_ADDR_LIMIT 0x08000 +#define SBSDIO_SB_ACCESS_2_4B_FLAG 0x08000 /* with b15, maps to 32-bit SB access */ + +/* some duplication with sbsdpcmdev.h here */ +/* valid bits in SBSDIO_FUNC1_SBADDRxxx regs */ +#define SBSDIO_SBADDRLOW_MASK 0x80 /* Valid bits in SBADDRLOW */ +#define SBSDIO_SBADDRMID_MASK 0xff /* Valid bits in SBADDRMID */ +#define SBSDIO_SBADDRHIGH_MASK 0xffU /* Valid bits in SBADDRHIGH */ +#define SBSDIO_SBWINDOW_MASK 0xffff8000 /* Address bits from SBADDR regs */ + +/* direct(mapped) cis space */ +#define SBSDIO_CIS_BASE_COMMON 0x1000 /* MAPPED common CIS address */ +#define SBSDIO_CIS_SIZE_LIMIT 0x200 /* maximum bytes in one CIS */ +#define SBSDIO_OTP_CIS_SIZE_LIMIT 0x078 /* maximum bytes OTP CIS */ + +#define SBSDIO_CIS_OFT_ADDR_MASK 0x1FFFF /* cis offset addr is < 17 bits */ + +#define SBSDIO_CIS_MANFID_TUPLE_LEN 6 /* manfid tuple length, include tuple, + * link bytes + */ + +/* indirect cis access (in sprom) */ +#define SBSDIO_SPROM_CIS_OFFSET 0x8 /* 8 control bytes first, CIS starts from + * 8th byte + */ + +#define SBSDIO_BYTEMODE_DATALEN_MAX 64 /* sdio byte mode: maximum length of one + * data comamnd + */ + +#define SBSDIO_CORE_ADDR_MASK 0x1FFFF /* sdio core function one address mask */ + +#endif /* _SBSDIO_H */ diff --git a/drivers/net/wireless/bcm4329/include/sbsdpcmdev.h b/drivers/net/wireless/bcm4329/include/sbsdpcmdev.h new file mode 100644 index 000000000000..7c7c7e4de0f6 --- /dev/null +++ b/drivers/net/wireless/bcm4329/include/sbsdpcmdev.h @@ -0,0 +1,288 @@ +/* + * Broadcom SiliconBackplane SDIO/PCMCIA hardware-specific device core support + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: sbsdpcmdev.h,v 13.29.4.1.4.6.6.2 2008/12/31 21:16:51 Exp $ + */ + +#ifndef _sbsdpcmdev_h_ +#define _sbsdpcmdev_h_ + +/* cpp contortions to concatenate w/arg prescan */ +#ifndef PAD +#define _PADLINE(line) pad ## line +#define _XSTR(line) _PADLINE(line) +#define PAD _XSTR(__LINE__) +#endif /* PAD */ + + +typedef volatile struct { + dma64regs_t xmt; /* dma tx */ + uint32 PAD[2]; + dma64regs_t rcv; /* dma rx */ + uint32 PAD[2]; +} dma64p_t; + +/* dma64 sdiod corerev >= 1 */ +typedef volatile struct { + dma64p_t dma64regs[2]; + dma64diag_t dmafifo; /* DMA Diagnostic Regs, 0x280-0x28c */ + uint32 PAD[92]; +} sdiodma64_t; + +/* dma32 sdiod corerev == 0 */ +typedef volatile struct { + dma32regp_t dma32regs[2]; /* dma tx & rx, 0x200-0x23c */ + dma32diag_t dmafifo; /* DMA Diagnostic Regs, 0x240-0x24c */ + uint32 PAD[108]; +} sdiodma32_t; + +/* dma32 regs for pcmcia core */ +typedef volatile struct { + dma32regp_t dmaregs; /* DMA Regs, 0x200-0x21c, rev8 */ + dma32diag_t dmafifo; /* DMA Diagnostic Regs, 0x220-0x22c */ + uint32 PAD[116]; +} pcmdma32_t; + +/* core registers */ +typedef volatile struct { + uint32 corecontrol; /* CoreControl, 0x000, rev8 */ + uint32 corestatus; /* CoreStatus, 0x004, rev8 */ + uint32 PAD[1]; + uint32 biststatus; /* BistStatus, 0x00c, rev8 */ + + /* PCMCIA access */ + uint16 pcmciamesportaladdr; /* PcmciaMesPortalAddr, 0x010, rev8 */ + uint16 PAD[1]; + uint16 pcmciamesportalmask; /* PcmciaMesPortalMask, 0x014, rev8 */ + uint16 PAD[1]; + uint16 pcmciawrframebc; /* PcmciaWrFrameBC, 0x018, rev8 */ + uint16 PAD[1]; + uint16 pcmciaunderflowtimer; /* PcmciaUnderflowTimer, 0x01c, rev8 */ + uint16 PAD[1]; + + /* interrupt */ + uint32 intstatus; /* IntStatus, 0x020, rev8 */ + uint32 hostintmask; /* IntHostMask, 0x024, rev8 */ + uint32 intmask; /* IntSbMask, 0x028, rev8 */ + uint32 sbintstatus; /* SBIntStatus, 0x02c, rev8 */ + uint32 sbintmask; /* SBIntMask, 0x030, rev8 */ + uint32 PAD[3]; + uint32 tosbmailbox; /* ToSBMailbox, 0x040, rev8 */ + uint32 tohostmailbox; /* ToHostMailbox, 0x044, rev8 */ + uint32 tosbmailboxdata; /* ToSbMailboxData, 0x048, rev8 */ + uint32 tohostmailboxdata; /* ToHostMailboxData, 0x04c, rev8 */ + + /* synchronized access to registers in SDIO clock domain */ + uint32 sdioaccess; /* SdioAccess, 0x050, rev8 */ + uint32 PAD[3]; + + /* PCMCIA frame control */ + uint8 pcmciaframectrl; /* pcmciaFrameCtrl, 0x060, rev8 */ + uint8 PAD[3]; + uint8 pcmciawatermark; /* pcmciaWaterMark, 0x064, rev8 */ + uint8 PAD[155]; + + /* interrupt batching control */ + uint32 intrcvlazy; /* IntRcvLazy, 0x100, rev8 */ + uint32 PAD[3]; + + /* counters */ + uint32 cmd52rd; /* Cmd52RdCount, 0x110, rev8, SDIO: cmd52 reads */ + uint32 cmd52wr; /* Cmd52WrCount, 0x114, rev8, SDIO: cmd52 writes */ + uint32 cmd53rd; /* Cmd53RdCount, 0x118, rev8, SDIO: cmd53 reads */ + uint32 cmd53wr; /* Cmd53WrCount, 0x11c, rev8, SDIO: cmd53 writes */ + uint32 abort; /* AbortCount, 0x120, rev8, SDIO: aborts */ + uint32 datacrcerror; /* DataCrcErrorCount, 0x124, rev8, SDIO: frames w/bad CRC */ + uint32 rdoutofsync; /* RdOutOfSyncCount, 0x128, rev8, SDIO/PCMCIA: Rd Frm OOS */ + uint32 wroutofsync; /* RdOutOfSyncCount, 0x12c, rev8, SDIO/PCMCIA: Wr Frm OOS */ + uint32 writebusy; /* WriteBusyCount, 0x130, rev8, SDIO: dev asserted "busy" */ + uint32 readwait; /* ReadWaitCount, 0x134, rev8, SDIO: read: no data avail */ + uint32 readterm; /* ReadTermCount, 0x138, rev8, SDIO: rd frm terminates */ + uint32 writeterm; /* WriteTermCount, 0x13c, rev8, SDIO: wr frm terminates */ + uint32 PAD[40]; + uint32 clockctlstatus; /* ClockCtlStatus, 0x1e0, rev8 */ + uint32 PAD[7]; + + /* DMA engines */ + volatile union { + pcmdma32_t pcm32; + sdiodma32_t sdiod32; + sdiodma64_t sdiod64; + } dma; + + /* SDIO/PCMCIA CIS region */ + char cis[512]; /* 512 byte CIS, 0x400-0x5ff, rev6 */ + + /* PCMCIA function control registers */ + char pcmciafcr[256]; /* PCMCIA FCR, 0x600-6ff, rev6 */ + uint16 PAD[55]; + + /* PCMCIA backplane access */ + uint16 backplanecsr; /* BackplaneCSR, 0x76E, rev6 */ + uint16 backplaneaddr0; /* BackplaneAddr0, 0x770, rev6 */ + uint16 backplaneaddr1; /* BackplaneAddr1, 0x772, rev6 */ + uint16 backplaneaddr2; /* BackplaneAddr2, 0x774, rev6 */ + uint16 backplaneaddr3; /* BackplaneAddr3, 0x776, rev6 */ + uint16 backplanedata0; /* BackplaneData0, 0x778, rev6 */ + uint16 backplanedata1; /* BackplaneData1, 0x77a, rev6 */ + uint16 backplanedata2; /* BackplaneData2, 0x77c, rev6 */ + uint16 backplanedata3; /* BackplaneData3, 0x77e, rev6 */ + uint16 PAD[31]; + + /* sprom "size" & "blank" info */ + uint16 spromstatus; /* SPROMStatus, 0x7BE, rev2 */ + uint32 PAD[464]; + + /* Sonics SiliconBackplane registers */ + sbconfig_t sbconfig; /* SbConfig Regs, 0xf00-0xfff, rev8 */ +} sdpcmd_regs_t; + +/* corecontrol */ +#define CC_CISRDY (1 << 0) /* CIS Ready */ +#define CC_BPRESEN (1 << 1) /* CCCR RES signal causes backplane reset */ +#define CC_F2RDY (1 << 2) /* set CCCR IOR2 bit */ +#define CC_CLRPADSISO (1 << 3) /* clear SDIO pads isolation bit (rev 11) */ + +/* corestatus */ +#define CS_PCMCIAMODE (1 << 0) /* Device Mode; 0=SDIO, 1=PCMCIA */ +#define CS_SMARTDEV (1 << 1) /* 1=smartDev enabled */ +#define CS_F2ENABLED (1 << 2) /* 1=host has enabled the device */ + +#define PCMCIA_MES_PA_MASK 0x7fff /* PCMCIA Message Portal Address Mask */ +#define PCMCIA_MES_PM_MASK 0x7fff /* PCMCIA Message Portal Mask Mask */ +#define PCMCIA_WFBC_MASK 0xffff /* PCMCIA Write Frame Byte Count Mask */ +#define PCMCIA_UT_MASK 0x07ff /* PCMCIA Underflow Timer Mask */ + +/* intstatus */ +#define I_SMB_SW0 (1 << 0) /* To SB Mail S/W interrupt 0 */ +#define I_SMB_SW1 (1 << 1) /* To SB Mail S/W interrupt 1 */ +#define I_SMB_SW2 (1 << 2) /* To SB Mail S/W interrupt 2 */ +#define I_SMB_SW3 (1 << 3) /* To SB Mail S/W interrupt 3 */ +#define I_SMB_SW_MASK 0x0000000f /* To SB Mail S/W interrupts mask */ +#define I_SMB_SW_SHIFT 0 /* To SB Mail S/W interrupts shift */ +#define I_HMB_SW0 (1 << 4) /* To Host Mail S/W interrupt 0 */ +#define I_HMB_SW1 (1 << 5) /* To Host Mail S/W interrupt 1 */ +#define I_HMB_SW2 (1 << 6) /* To Host Mail S/W interrupt 2 */ +#define I_HMB_SW3 (1 << 7) /* To Host Mail S/W interrupt 3 */ +#define I_HMB_SW_MASK 0x000000f0 /* To Host Mail S/W interrupts mask */ +#define I_HMB_SW_SHIFT 4 /* To Host Mail S/W interrupts shift */ +#define I_WR_OOSYNC (1 << 8) /* Write Frame Out Of Sync */ +#define I_RD_OOSYNC (1 << 9) /* Read Frame Out Of Sync */ +#define I_PC (1 << 10) /* descriptor error */ +#define I_PD (1 << 11) /* data error */ +#define I_DE (1 << 12) /* Descriptor protocol Error */ +#define I_RU (1 << 13) /* Receive descriptor Underflow */ +#define I_RO (1 << 14) /* Receive fifo Overflow */ +#define I_XU (1 << 15) /* Transmit fifo Underflow */ +#define I_RI (1 << 16) /* Receive Interrupt */ +#define I_BUSPWR (1 << 17) /* SDIO Bus Power Change (rev 9) */ +#define I_XI (1 << 24) /* Transmit Interrupt */ +#define I_RF_TERM (1 << 25) /* Read Frame Terminate */ +#define I_WF_TERM (1 << 26) /* Write Frame Terminate */ +#define I_PCMCIA_XU (1 << 27) /* PCMCIA Transmit FIFO Underflow */ +#define I_SBINT (1 << 28) /* sbintstatus Interrupt */ +#define I_CHIPACTIVE (1 << 29) /* chip transitioned from doze to active state */ +#define I_SRESET (1 << 30) /* CCCR RES interrupt */ +#define I_IOE2 (1U << 31) /* CCCR IOE2 Bit Changed */ +#define I_ERRORS (I_PC | I_PD | I_DE | I_RU | I_RO | I_XU) /* DMA Errors */ +#define I_DMA (I_RI | I_XI | I_ERRORS) + +/* sbintstatus */ +#define I_SB_SERR (1 << 8) /* Backplane SError (write) */ +#define I_SB_RESPERR (1 << 9) /* Backplane Response Error (read) */ +#define I_SB_SPROMERR (1 << 10) /* Error accessing the sprom */ + +/* sdioaccess */ +#define SDA_DATA_MASK 0x000000ff /* Read/Write Data Mask */ +#define SDA_ADDR_MASK 0x000fff00 /* Read/Write Address Mask */ +#define SDA_ADDR_SHIFT 8 /* Read/Write Address Shift */ +#define SDA_WRITE 0x01000000 /* Write bit */ +#define SDA_READ 0x00000000 /* Write bit cleared for Read */ +#define SDA_BUSY 0x80000000 /* Busy bit */ + +/* sdioaccess-accessible register address spaces */ +#define SDA_CCCR_SPACE 0x000 /* sdioAccess CCCR register space */ +#define SDA_F1_FBR_SPACE 0x100 /* sdioAccess F1 FBR register space */ +#define SDA_F2_FBR_SPACE 0x200 /* sdioAccess F2 FBR register space */ +#define SDA_F1_REG_SPACE 0x300 /* sdioAccess F1 core-specific register space */ + +/* SDA_F1_REG_SPACE sdioaccess-accessible F1 reg space register offsets */ +#define SDA_CHIPCONTROLDATA 0x006 /* ChipControlData */ +#define SDA_CHIPCONTROLENAB 0x007 /* ChipControlEnable */ +#define SDA_F2WATERMARK 0x008 /* Function 2 Watermark */ +#define SDA_DEVICECONTROL 0x009 /* DeviceControl */ +#define SDA_SBADDRLOW 0x00a /* SbAddrLow */ +#define SDA_SBADDRMID 0x00b /* SbAddrMid */ +#define SDA_SBADDRHIGH 0x00c /* SbAddrHigh */ +#define SDA_FRAMECTRL 0x00d /* FrameCtrl */ +#define SDA_CHIPCLOCKCSR 0x00e /* ChipClockCSR */ +#define SDA_SDIOPULLUP 0x00f /* SdioPullUp */ +#define SDA_SDIOWRFRAMEBCLOW 0x019 /* SdioWrFrameBCLow */ +#define SDA_SDIOWRFRAMEBCHIGH 0x01a /* SdioWrFrameBCHigh */ +#define SDA_SDIORDFRAMEBCLOW 0x01b /* SdioRdFrameBCLow */ +#define SDA_SDIORDFRAMEBCHIGH 0x01c /* SdioRdFrameBCHigh */ + +/* SDA_F2WATERMARK */ +#define SDA_F2WATERMARK_MASK 0x7f /* F2Watermark Mask */ + +/* SDA_SBADDRLOW */ +#define SDA_SBADDRLOW_MASK 0x80 /* SbAddrLow Mask */ + +/* SDA_SBADDRMID */ +#define SDA_SBADDRMID_MASK 0xff /* SbAddrMid Mask */ + +/* SDA_SBADDRHIGH */ +#define SDA_SBADDRHIGH_MASK 0xff /* SbAddrHigh Mask */ + +/* SDA_FRAMECTRL */ +#define SFC_RF_TERM (1 << 0) /* Read Frame Terminate */ +#define SFC_WF_TERM (1 << 1) /* Write Frame Terminate */ +#define SFC_CRC4WOOS (1 << 2) /* HW reports CRC error for write out of sync */ +#define SFC_ABORTALL (1 << 3) /* Abort cancels all in-progress frames */ + +/* pcmciaframectrl */ +#define PFC_RF_TERM (1 << 0) /* Read Frame Terminate */ +#define PFC_WF_TERM (1 << 1) /* Write Frame Terminate */ + +/* intrcvlazy */ +#define IRL_TO_MASK 0x00ffffff /* timeout */ +#define IRL_FC_MASK 0xff000000 /* frame count */ +#define IRL_FC_SHIFT 24 /* frame count */ + +/* rx header */ +typedef volatile struct { + uint16 len; + uint16 flags; +} sdpcmd_rxh_t; + +/* rx header flags */ +#define RXF_CRC 0x0001 /* CRC error detected */ +#define RXF_WOOS 0x0002 /* write frame out of sync */ +#define RXF_WF_TERM 0x0004 /* write frame terminated */ +#define RXF_ABORT 0x0008 /* write frame aborted */ +#define RXF_DISCARD (RXF_CRC | RXF_WOOS | RXF_WF_TERM | RXF_ABORT) /* bad frame */ + +/* HW frame tag */ +#define SDPCM_FRAMETAG_LEN 4 /* HW frametag: 2 bytes len, 2 bytes check val */ + +#endif /* _sbsdpcmdev_h_ */ diff --git a/drivers/net/wireless/bcm4329/include/sbsocram.h b/drivers/net/wireless/bcm4329/include/sbsocram.h new file mode 100644 index 000000000000..5ede0b66d97f --- /dev/null +++ b/drivers/net/wireless/bcm4329/include/sbsocram.h @@ -0,0 +1,150 @@ +/* + * BCM47XX Sonics SiliconBackplane embedded ram core + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: sbsocram.h,v 13.9.162.2 2008/12/12 14:13:27 Exp $ + */ + + +#ifndef _SBSOCRAM_H +#define _SBSOCRAM_H + +#ifndef _LANGUAGE_ASSEMBLY + + +#ifndef PAD +#define _PADLINE(line) pad ## line +#define _XSTR(line) _PADLINE(line) +#define PAD _XSTR(__LINE__) +#endif + + +typedef volatile struct sbsocramregs { + uint32 coreinfo; + uint32 bwalloc; + uint32 extracoreinfo; + uint32 biststat; + uint32 bankidx; + uint32 standbyctrl; + + uint32 errlogstatus; + uint32 errlogaddr; + + uint32 cambankidx; + uint32 cambankstandbyctrl; + uint32 cambankpatchctrl; + uint32 cambankpatchtblbaseaddr; + uint32 cambankcmdreg; + uint32 cambankdatareg; + uint32 cambankmaskreg; + uint32 PAD[17]; + uint32 extmemconfig; + uint32 extmemparitycsr; + uint32 extmemparityerrdata; + uint32 extmemparityerrcnt; + uint32 extmemwrctrlandsize; + uint32 PAD[84]; + uint32 workaround; + uint32 pwrctl; +} sbsocramregs_t; + +#endif + + +#define SR_COREINFO 0x00 +#define SR_BWALLOC 0x04 +#define SR_BISTSTAT 0x0c +#define SR_BANKINDEX 0x10 +#define SR_BANKSTBYCTL 0x14 +#define SR_PWRCTL 0x1e8 + + +#define SRCI_PT_MASK 0x00070000 +#define SRCI_PT_SHIFT 16 + +#define SRCI_PT_OCP_OCP 0 +#define SRCI_PT_AXI_OCP 1 +#define SRCI_PT_ARM7AHB_OCP 2 +#define SRCI_PT_CM3AHB_OCP 3 +#define SRCI_PT_AXI_AXI 4 +#define SRCI_PT_AHB_AXI 5 + +#define SRCI_LSS_MASK 0x00f00000 +#define SRCI_LSS_SHIFT 20 +#define SRCI_LRS_MASK 0x0f000000 +#define SRCI_LRS_SHIFT 24 + + +#define SRCI_MS0_MASK 0xf +#define SR_MS0_BASE 16 + + +#define SRCI_ROMNB_MASK 0xf000 +#define SRCI_ROMNB_SHIFT 12 +#define SRCI_ROMBSZ_MASK 0xf00 +#define SRCI_ROMBSZ_SHIFT 8 +#define SRCI_SRNB_MASK 0xf0 +#define SRCI_SRNB_SHIFT 4 +#define SRCI_SRBSZ_MASK 0xf +#define SRCI_SRBSZ_SHIFT 0 + +#define SR_BSZ_BASE 14 + + +#define SRSC_SBYOVR_MASK 0x80000000 +#define SRSC_SBYOVR_SHIFT 31 +#define SRSC_SBYOVRVAL_MASK 0x60000000 +#define SRSC_SBYOVRVAL_SHIFT 29 +#define SRSC_SBYEN_MASK 0x01000000 +#define SRSC_SBYEN_SHIFT 24 + + +#define SRPC_PMU_STBYDIS_MASK 0x00000010 +#define SRPC_PMU_STBYDIS_SHIFT 4 +#define SRPC_STBYOVRVAL_MASK 0x00000008 +#define SRPC_STBYOVRVAL_SHIFT 3 +#define SRPC_STBYOVR_MASK 0x00000007 +#define SRPC_STBYOVR_SHIFT 0 + + +#define SRECC_NUM_BANKS_MASK 0x000000F0 +#define SRECC_NUM_BANKS_SHIFT 4 +#define SRECC_BANKSIZE_MASK 0x0000000F +#define SRECC_BANKSIZE_SHIFT 0 + +#define SRECC_BANKSIZE(value) (1 << (value)) + + +#define SRCBPC_PATCHENABLE 0x80000000 + +#define SRP_ADDRESS 0x0001FFFC +#define SRP_VALID 0x8000 + + +#define SRCMD_WRITE 0x00020000 +#define SRCMD_READ 0x00010000 +#define SRCMD_DONE 0x80000000 + +#define SRCMD_DONE_DLY 1000 + + +#endif diff --git a/drivers/net/wireless/bcm4329/include/sdio.h b/drivers/net/wireless/bcm4329/include/sdio.h new file mode 100644 index 000000000000..280cb845fb04 --- /dev/null +++ b/drivers/net/wireless/bcm4329/include/sdio.h @@ -0,0 +1,566 @@ +/* + * SDIO spec header file + * Protocol and standard (common) device definitions + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: sdio.h,v 13.24.4.1.4.1.16.1 2009/08/12 01:08:02 Exp $ + */ + +#ifndef _SDIO_H +#define _SDIO_H + + +/* CCCR structure for function 0 */ +typedef volatile struct { + uint8 cccr_sdio_rev; /* RO, cccr and sdio revision */ + uint8 sd_rev; /* RO, sd spec revision */ + uint8 io_en; /* I/O enable */ + uint8 io_rdy; /* I/O ready reg */ + uint8 intr_ctl; /* Master and per function interrupt enable control */ + uint8 intr_status; /* RO, interrupt pending status */ + uint8 io_abort; /* read/write abort or reset all functions */ + uint8 bus_inter; /* bus interface control */ + uint8 capability; /* RO, card capability */ + + uint8 cis_base_low; /* 0x9 RO, common CIS base address, LSB */ + uint8 cis_base_mid; + uint8 cis_base_high; /* 0xB RO, common CIS base address, MSB */ + + /* suspend/resume registers */ + uint8 bus_suspend; /* 0xC */ + uint8 func_select; /* 0xD */ + uint8 exec_flag; /* 0xE */ + uint8 ready_flag; /* 0xF */ + + uint8 fn0_blk_size[2]; /* 0x10(LSB), 0x11(MSB) */ + + uint8 power_control; /* 0x12 (SDIO version 1.10) */ + + uint8 speed_control; /* 0x13 */ +} sdio_regs_t; + +/* SDIO Device CCCR offsets */ +#define SDIOD_CCCR_REV 0x00 +#define SDIOD_CCCR_SDREV 0x01 +#define SDIOD_CCCR_IOEN 0x02 +#define SDIOD_CCCR_IORDY 0x03 +#define SDIOD_CCCR_INTEN 0x04 +#define SDIOD_CCCR_INTPEND 0x05 +#define SDIOD_CCCR_IOABORT 0x06 +#define SDIOD_CCCR_BICTRL 0x07 +#define SDIOD_CCCR_CAPABLITIES 0x08 +#define SDIOD_CCCR_CISPTR_0 0x09 +#define SDIOD_CCCR_CISPTR_1 0x0A +#define SDIOD_CCCR_CISPTR_2 0x0B +#define SDIOD_CCCR_BUSSUSP 0x0C +#define SDIOD_CCCR_FUNCSEL 0x0D +#define SDIOD_CCCR_EXECFLAGS 0x0E +#define SDIOD_CCCR_RDYFLAGS 0x0F +#define SDIOD_CCCR_BLKSIZE_0 0x10 +#define SDIOD_CCCR_BLKSIZE_1 0x11 +#define SDIOD_CCCR_POWER_CONTROL 0x12 +#define SDIOD_CCCR_SPEED_CONTROL 0x13 + +/* Broadcom extensions (corerev >= 1) */ +#define SDIOD_CCCR_BRCM_SEPINT 0xf2 + +/* cccr_sdio_rev */ +#define SDIO_REV_SDIOID_MASK 0xf0 /* SDIO spec revision number */ +#define SDIO_REV_CCCRID_MASK 0x0f /* CCCR format version number */ + +/* sd_rev */ +#define SD_REV_PHY_MASK 0x0f /* SD format version number */ + +/* io_en */ +#define SDIO_FUNC_ENABLE_1 0x02 /* function 1 I/O enable */ +#define SDIO_FUNC_ENABLE_2 0x04 /* function 2 I/O enable */ + +/* io_rdys */ +#define SDIO_FUNC_READY_1 0x02 /* function 1 I/O ready */ +#define SDIO_FUNC_READY_2 0x04 /* function 2 I/O ready */ + +/* intr_ctl */ +#define INTR_CTL_MASTER_EN 0x1 /* interrupt enable master */ +#define INTR_CTL_FUNC1_EN 0x2 /* interrupt enable for function 1 */ +#define INTR_CTL_FUNC2_EN 0x4 /* interrupt enable for function 2 */ + +/* intr_status */ +#define INTR_STATUS_FUNC1 0x2 /* interrupt pending for function 1 */ +#define INTR_STATUS_FUNC2 0x4 /* interrupt pending for function 2 */ + +/* io_abort */ +#define IO_ABORT_RESET_ALL 0x08 /* I/O card reset */ +#define IO_ABORT_FUNC_MASK 0x07 /* abort selction: function x */ + +/* bus_inter */ +#define BUS_CARD_DETECT_DIS 0x80 /* Card Detect disable */ +#define BUS_SPI_CONT_INTR_CAP 0x40 /* support continuous SPI interrupt */ +#define BUS_SPI_CONT_INTR_EN 0x20 /* continuous SPI interrupt enable */ +#define BUS_SD_DATA_WIDTH_MASK 0x03 /* bus width mask */ +#define BUS_SD_DATA_WIDTH_4BIT 0x02 /* bus width 4-bit mode */ +#define BUS_SD_DATA_WIDTH_1BIT 0x00 /* bus width 1-bit mode */ + +/* capability */ +#define SDIO_CAP_4BLS 0x80 /* 4-bit support for low speed card */ +#define SDIO_CAP_LSC 0x40 /* low speed card */ +#define SDIO_CAP_E4MI 0x20 /* enable interrupt between block of data in 4-bit mode */ +#define SDIO_CAP_S4MI 0x10 /* support interrupt between block of data in 4-bit mode */ +#define SDIO_CAP_SBS 0x08 /* support suspend/resume */ +#define SDIO_CAP_SRW 0x04 /* support read wait */ +#define SDIO_CAP_SMB 0x02 /* support multi-block transfer */ +#define SDIO_CAP_SDC 0x01 /* Support Direct commands during multi-byte transfer */ + +/* power_control */ +#define SDIO_POWER_SMPC 0x01 /* supports master power control (RO) */ +#define SDIO_POWER_EMPC 0x02 /* enable master power control (allow > 200mA) (RW) */ + +/* speed_control (control device entry into high-speed clocking mode) */ +#define SDIO_SPEED_SHS 0x01 /* supports high-speed [clocking] mode (RO) */ +#define SDIO_SPEED_EHS 0x02 /* enable high-speed [clocking] mode (RW) */ + +/* brcm sepint */ +#define SDIO_SEPINT_MASK 0x01 /* route sdpcmdev intr onto separate pad (chip-specific) */ +#define SDIO_SEPINT_OE 0x02 /* 1 asserts output enable for above pad */ +#define SDIO_SEPINT_ACT_HI 0x04 /* use active high interrupt level instead of active low */ + +/* FBR structure for function 1-7, FBR addresses and register offsets */ +typedef volatile struct { + uint8 devctr; /* device interface, CSA control */ + uint8 ext_dev; /* extended standard I/O device type code */ + uint8 pwr_sel; /* power selection support */ + uint8 PAD[6]; /* reserved */ + + uint8 cis_low; /* CIS LSB */ + uint8 cis_mid; + uint8 cis_high; /* CIS MSB */ + uint8 csa_low; /* code storage area, LSB */ + uint8 csa_mid; + uint8 csa_high; /* code storage area, MSB */ + uint8 csa_dat_win; /* data access window to function */ + + uint8 fnx_blk_size[2]; /* block size, little endian */ +} sdio_fbr_t; + +/* Maximum number of I/O funcs */ +#define SDIOD_MAX_IOFUNCS 7 + +/* SDIO Device FBR Start Address */ +#define SDIOD_FBR_STARTADDR 0x100 + +/* SDIO Device FBR Size */ +#define SDIOD_FBR_SIZE 0x100 + +/* Macro to calculate FBR register base */ +#define SDIOD_FBR_BASE(n) ((n) * 0x100) + +/* Function register offsets */ +#define SDIOD_FBR_DEVCTR 0x00 /* basic info for function */ +#define SDIOD_FBR_EXT_DEV 0x01 /* extended I/O device code */ +#define SDIOD_FBR_PWR_SEL 0x02 /* power selection bits */ + +/* SDIO Function CIS ptr offset */ +#define SDIOD_FBR_CISPTR_0 0x09 +#define SDIOD_FBR_CISPTR_1 0x0A +#define SDIOD_FBR_CISPTR_2 0x0B + +/* Code Storage Area pointer */ +#define SDIOD_FBR_CSA_ADDR_0 0x0C +#define SDIOD_FBR_CSA_ADDR_1 0x0D +#define SDIOD_FBR_CSA_ADDR_2 0x0E +#define SDIOD_FBR_CSA_DATA 0x0F + +/* SDIO Function I/O Block Size */ +#define SDIOD_FBR_BLKSIZE_0 0x10 +#define SDIOD_FBR_BLKSIZE_1 0x11 + +/* devctr */ +#define SDIOD_FBR_DEVCTR_DIC 0x0f /* device interface code */ +#define SDIOD_FBR_DECVTR_CSA 0x40 /* CSA support flag */ +#define SDIOD_FBR_DEVCTR_CSA_EN 0x80 /* CSA enabled */ +/* interface codes */ +#define SDIOD_DIC_NONE 0 /* SDIO standard interface is not supported */ +#define SDIOD_DIC_UART 1 +#define SDIOD_DIC_BLUETOOTH_A 2 +#define SDIOD_DIC_BLUETOOTH_B 3 +#define SDIOD_DIC_GPS 4 +#define SDIOD_DIC_CAMERA 5 +#define SDIOD_DIC_PHS 6 +#define SDIOD_DIC_WLAN 7 +#define SDIOD_DIC_EXT 0xf /* extended device interface, read ext_dev register */ + +/* pwr_sel */ +#define SDIOD_PWR_SEL_SPS 0x01 /* supports power selection */ +#define SDIOD_PWR_SEL_EPS 0x02 /* enable power selection (low-current mode) */ + +/* misc defines */ +#define SDIO_FUNC_0 0 +#define SDIO_FUNC_1 1 +#define SDIO_FUNC_2 2 +#define SDIO_FUNC_3 3 +#define SDIO_FUNC_4 4 +#define SDIO_FUNC_5 5 +#define SDIO_FUNC_6 6 +#define SDIO_FUNC_7 7 + +#define SD_CARD_TYPE_UNKNOWN 0 /* bad type or unrecognized */ +#define SD_CARD_TYPE_IO 1 /* IO only card */ +#define SD_CARD_TYPE_MEMORY 2 /* memory only card */ +#define SD_CARD_TYPE_COMBO 3 /* IO and memory combo card */ + +#define SDIO_MAX_BLOCK_SIZE 2048 /* maximum block size for block mode operation */ +#define SDIO_MIN_BLOCK_SIZE 1 /* minimum block size for block mode operation */ + +/* Card registers: status bit position */ +#define CARDREG_STATUS_BIT_OUTOFRANGE 31 +#define CARDREG_STATUS_BIT_COMCRCERROR 23 +#define CARDREG_STATUS_BIT_ILLEGALCOMMAND 22 +#define CARDREG_STATUS_BIT_ERROR 19 +#define CARDREG_STATUS_BIT_IOCURRENTSTATE3 12 +#define CARDREG_STATUS_BIT_IOCURRENTSTATE2 11 +#define CARDREG_STATUS_BIT_IOCURRENTSTATE1 10 +#define CARDREG_STATUS_BIT_IOCURRENTSTATE0 9 +#define CARDREG_STATUS_BIT_FUN_NUM_ERROR 4 + + + +#define SD_CMD_GO_IDLE_STATE 0 /* mandatory for SDIO */ +#define SD_CMD_SEND_OPCOND 1 +#define SD_CMD_MMC_SET_RCA 3 +#define SD_CMD_IO_SEND_OP_COND 5 /* mandatory for SDIO */ +#define SD_CMD_SELECT_DESELECT_CARD 7 +#define SD_CMD_SEND_CSD 9 +#define SD_CMD_SEND_CID 10 +#define SD_CMD_STOP_TRANSMISSION 12 +#define SD_CMD_SEND_STATUS 13 +#define SD_CMD_GO_INACTIVE_STATE 15 +#define SD_CMD_SET_BLOCKLEN 16 +#define SD_CMD_READ_SINGLE_BLOCK 17 +#define SD_CMD_READ_MULTIPLE_BLOCK 18 +#define SD_CMD_WRITE_BLOCK 24 +#define SD_CMD_WRITE_MULTIPLE_BLOCK 25 +#define SD_CMD_PROGRAM_CSD 27 +#define SD_CMD_SET_WRITE_PROT 28 +#define SD_CMD_CLR_WRITE_PROT 29 +#define SD_CMD_SEND_WRITE_PROT 30 +#define SD_CMD_ERASE_WR_BLK_START 32 +#define SD_CMD_ERASE_WR_BLK_END 33 +#define SD_CMD_ERASE 38 +#define SD_CMD_LOCK_UNLOCK 42 +#define SD_CMD_IO_RW_DIRECT 52 /* mandatory for SDIO */ +#define SD_CMD_IO_RW_EXTENDED 53 /* mandatory for SDIO */ +#define SD_CMD_APP_CMD 55 +#define SD_CMD_GEN_CMD 56 +#define SD_CMD_READ_OCR 58 +#define SD_CMD_CRC_ON_OFF 59 /* mandatory for SDIO */ +#define SD_ACMD_SD_STATUS 13 +#define SD_ACMD_SEND_NUM_WR_BLOCKS 22 +#define SD_ACMD_SET_WR_BLOCK_ERASE_CNT 23 +#define SD_ACMD_SD_SEND_OP_COND 41 +#define SD_ACMD_SET_CLR_CARD_DETECT 42 +#define SD_ACMD_SEND_SCR 51 + +/* argument for SD_CMD_IO_RW_DIRECT and SD_CMD_IO_RW_EXTENDED */ +#define SD_IO_OP_READ 0 /* Read_Write: Read */ +#define SD_IO_OP_WRITE 1 /* Read_Write: Write */ +#define SD_IO_RW_NORMAL 0 /* no RAW */ +#define SD_IO_RW_RAW 1 /* RAW */ +#define SD_IO_BYTE_MODE 0 /* Byte Mode */ +#define SD_IO_BLOCK_MODE 1 /* BlockMode */ +#define SD_IO_FIXED_ADDRESS 0 /* fix Address */ +#define SD_IO_INCREMENT_ADDRESS 1 /* IncrementAddress */ + +/* build SD_CMD_IO_RW_DIRECT Argument */ +#define SDIO_IO_RW_DIRECT_ARG(rw, raw, func, addr, data) \ + ((((rw) & 1) << 31) | (((func) & 0x7) << 28) | (((raw) & 1) << 27) | \ + (((addr) & 0x1FFFF) << 9) | ((data) & 0xFF)) + +/* build SD_CMD_IO_RW_EXTENDED Argument */ +#define SDIO_IO_RW_EXTENDED_ARG(rw, blk, func, addr, inc_addr, count) \ + ((((rw) & 1) << 31) | (((func) & 0x7) << 28) | (((blk) & 1) << 27) | \ + (((inc_addr) & 1) << 26) | (((addr) & 0x1FFFF) << 9) | ((count) & 0x1FF)) + +/* SDIO response parameters */ +#define SD_RSP_NO_NONE 0 +#define SD_RSP_NO_1 1 +#define SD_RSP_NO_2 2 +#define SD_RSP_NO_3 3 +#define SD_RSP_NO_4 4 +#define SD_RSP_NO_5 5 +#define SD_RSP_NO_6 6 + + /* Modified R6 response (to CMD3) */ +#define SD_RSP_MR6_COM_CRC_ERROR 0x8000 +#define SD_RSP_MR6_ILLEGAL_COMMAND 0x4000 +#define SD_RSP_MR6_ERROR 0x2000 + + /* Modified R1 in R4 Response (to CMD5) */ +#define SD_RSP_MR1_SBIT 0x80 +#define SD_RSP_MR1_PARAMETER_ERROR 0x40 +#define SD_RSP_MR1_RFU5 0x20 +#define SD_RSP_MR1_FUNC_NUM_ERROR 0x10 +#define SD_RSP_MR1_COM_CRC_ERROR 0x08 +#define SD_RSP_MR1_ILLEGAL_COMMAND 0x04 +#define SD_RSP_MR1_RFU1 0x02 +#define SD_RSP_MR1_IDLE_STATE 0x01 + + /* R5 response (to CMD52 and CMD53) */ +#define SD_RSP_R5_COM_CRC_ERROR 0x80 +#define SD_RSP_R5_ILLEGAL_COMMAND 0x40 +#define SD_RSP_R5_IO_CURRENTSTATE1 0x20 +#define SD_RSP_R5_IO_CURRENTSTATE0 0x10 +#define SD_RSP_R5_ERROR 0x08 +#define SD_RSP_R5_RFU 0x04 +#define SD_RSP_R5_FUNC_NUM_ERROR 0x02 +#define SD_RSP_R5_OUT_OF_RANGE 0x01 + +#define SD_RSP_R5_ERRBITS 0xCB + + +/* ------------------------------------------------ + * SDIO Commands and responses + * + * I/O only commands are: + * CMD0, CMD3, CMD5, CMD7, CMD15, CMD52, CMD53 + * ------------------------------------------------ + */ + +/* SDIO Commands */ +#define SDIOH_CMD_0 0 +#define SDIOH_CMD_3 3 +#define SDIOH_CMD_5 5 +#define SDIOH_CMD_7 7 +#define SDIOH_CMD_15 15 +#define SDIOH_CMD_52 52 +#define SDIOH_CMD_53 53 +#define SDIOH_CMD_59 59 + +/* SDIO Command Responses */ +#define SDIOH_RSP_NONE 0 +#define SDIOH_RSP_R1 1 +#define SDIOH_RSP_R2 2 +#define SDIOH_RSP_R3 3 +#define SDIOH_RSP_R4 4 +#define SDIOH_RSP_R5 5 +#define SDIOH_RSP_R6 6 + +/* + * SDIO Response Error flags + */ +#define SDIOH_RSP5_ERROR_FLAGS 0xCB + +/* ------------------------------------------------ + * SDIO Command structures. I/O only commands are: + * + * CMD0, CMD3, CMD5, CMD7, CMD15, CMD52, CMD53 + * ------------------------------------------------ + */ + +#define CMD5_OCR_M BITFIELD_MASK(24) +#define CMD5_OCR_S 0 + +#define CMD7_RCA_M BITFIELD_MASK(16) +#define CMD7_RCA_S 16 + +#define CMD_15_RCA_M BITFIELD_MASK(16) +#define CMD_15_RCA_S 16 + +#define CMD52_DATA_M BITFIELD_MASK(8) /* Bits [7:0] - Write Data/Stuff bits of CMD52 + */ +#define CMD52_DATA_S 0 +#define CMD52_REG_ADDR_M BITFIELD_MASK(17) /* Bits [25:9] - register address */ +#define CMD52_REG_ADDR_S 9 +#define CMD52_RAW_M BITFIELD_MASK(1) /* Bit 27 - Read after Write flag */ +#define CMD52_RAW_S 27 +#define CMD52_FUNCTION_M BITFIELD_MASK(3) /* Bits [30:28] - Function number */ +#define CMD52_FUNCTION_S 28 +#define CMD52_RW_FLAG_M BITFIELD_MASK(1) /* Bit 31 - R/W flag */ +#define CMD52_RW_FLAG_S 31 + + +#define CMD53_BYTE_BLK_CNT_M BITFIELD_MASK(9) /* Bits [8:0] - Byte/Block Count of CMD53 */ +#define CMD53_BYTE_BLK_CNT_S 0 +#define CMD53_REG_ADDR_M BITFIELD_MASK(17) /* Bits [25:9] - register address */ +#define CMD53_REG_ADDR_S 9 +#define CMD53_OP_CODE_M BITFIELD_MASK(1) /* Bit 26 - R/W Operation Code */ +#define CMD53_OP_CODE_S 26 +#define CMD53_BLK_MODE_M BITFIELD_MASK(1) /* Bit 27 - Block Mode */ +#define CMD53_BLK_MODE_S 27 +#define CMD53_FUNCTION_M BITFIELD_MASK(3) /* Bits [30:28] - Function number */ +#define CMD53_FUNCTION_S 28 +#define CMD53_RW_FLAG_M BITFIELD_MASK(1) /* Bit 31 - R/W flag */ +#define CMD53_RW_FLAG_S 31 + +/* ------------------------------------------------------ + * SDIO Command Response structures for SD1 and SD4 modes + * ----------------------------------------------------- + */ +#define RSP4_IO_OCR_M BITFIELD_MASK(24) /* Bits [23:0] - Card's OCR Bits [23:0] */ +#define RSP4_IO_OCR_S 0 +#define RSP4_STUFF_M BITFIELD_MASK(3) /* Bits [26:24] - Stuff bits */ +#define RSP4_STUFF_S 24 +#define RSP4_MEM_PRESENT_M BITFIELD_MASK(1) /* Bit 27 - Memory present */ +#define RSP4_MEM_PRESENT_S 27 +#define RSP4_NUM_FUNCS_M BITFIELD_MASK(3) /* Bits [30:28] - Number of I/O funcs */ +#define RSP4_NUM_FUNCS_S 28 +#define RSP4_CARD_READY_M BITFIELD_MASK(1) /* Bit 31 - SDIO card ready */ +#define RSP4_CARD_READY_S 31 + +#define RSP6_STATUS_M BITFIELD_MASK(16) /* Bits [15:0] - Card status bits [19,22,23,12:0] + */ +#define RSP6_STATUS_S 0 +#define RSP6_IO_RCA_M BITFIELD_MASK(16) /* Bits [31:16] - RCA bits[31-16] */ +#define RSP6_IO_RCA_S 16 + +#define RSP1_AKE_SEQ_ERROR_M BITFIELD_MASK(1) /* Bit 3 - Authentication seq error */ +#define RSP1_AKE_SEQ_ERROR_S 3 +#define RSP1_APP_CMD_M BITFIELD_MASK(1) /* Bit 5 - Card expects ACMD */ +#define RSP1_APP_CMD_S 5 +#define RSP1_READY_FOR_DATA_M BITFIELD_MASK(1) /* Bit 8 - Ready for data (buff empty) */ +#define RSP1_READY_FOR_DATA_S 8 +#define RSP1_CURR_STATE_M BITFIELD_MASK(4) /* Bits [12:9] - State of card + * when Cmd was received + */ +#define RSP1_CURR_STATE_S 9 +#define RSP1_EARSE_RESET_M BITFIELD_MASK(1) /* Bit 13 - Erase seq cleared */ +#define RSP1_EARSE_RESET_S 13 +#define RSP1_CARD_ECC_DISABLE_M BITFIELD_MASK(1) /* Bit 14 - Card ECC disabled */ +#define RSP1_CARD_ECC_DISABLE_S 14 +#define RSP1_WP_ERASE_SKIP_M BITFIELD_MASK(1) /* Bit 15 - Partial blocks erased due to W/P */ +#define RSP1_WP_ERASE_SKIP_S 15 +#define RSP1_CID_CSD_OVERW_M BITFIELD_MASK(1) /* Bit 16 - Illegal write to CID or R/O bits + * of CSD + */ +#define RSP1_CID_CSD_OVERW_S 16 +#define RSP1_ERROR_M BITFIELD_MASK(1) /* Bit 19 - General/Unknown error */ +#define RSP1_ERROR_S 19 +#define RSP1_CC_ERROR_M BITFIELD_MASK(1) /* Bit 20 - Internal Card Control error */ +#define RSP1_CC_ERROR_S 20 +#define RSP1_CARD_ECC_FAILED_M BITFIELD_MASK(1) /* Bit 21 - Card internal ECC failed + * to correct data + */ +#define RSP1_CARD_ECC_FAILED_S 21 +#define RSP1_ILLEGAL_CMD_M BITFIELD_MASK(1) /* Bit 22 - Cmd not legal for the card state */ +#define RSP1_ILLEGAL_CMD_S 22 +#define RSP1_COM_CRC_ERROR_M BITFIELD_MASK(1) /* Bit 23 - CRC check of previous command failed + */ +#define RSP1_COM_CRC_ERROR_S 23 +#define RSP1_LOCK_UNLOCK_FAIL_M BITFIELD_MASK(1) /* Bit 24 - Card lock-unlock Cmd Seq error */ +#define RSP1_LOCK_UNLOCK_FAIL_S 24 +#define RSP1_CARD_LOCKED_M BITFIELD_MASK(1) /* Bit 25 - Card locked by the host */ +#define RSP1_CARD_LOCKED_S 25 +#define RSP1_WP_VIOLATION_M BITFIELD_MASK(1) /* Bit 26 - Attempt to program + * write-protected blocks + */ +#define RSP1_WP_VIOLATION_S 26 +#define RSP1_ERASE_PARAM_M BITFIELD_MASK(1) /* Bit 27 - Invalid erase blocks */ +#define RSP1_ERASE_PARAM_S 27 +#define RSP1_ERASE_SEQ_ERR_M BITFIELD_MASK(1) /* Bit 28 - Erase Cmd seq error */ +#define RSP1_ERASE_SEQ_ERR_S 28 +#define RSP1_BLK_LEN_ERR_M BITFIELD_MASK(1) /* Bit 29 - Block length error */ +#define RSP1_BLK_LEN_ERR_S 29 +#define RSP1_ADDR_ERR_M BITFIELD_MASK(1) /* Bit 30 - Misaligned address */ +#define RSP1_ADDR_ERR_S 30 +#define RSP1_OUT_OF_RANGE_M BITFIELD_MASK(1) /* Bit 31 - Cmd arg was out of range */ +#define RSP1_OUT_OF_RANGE_S 31 + + +#define RSP5_DATA_M BITFIELD_MASK(8) /* Bits [0:7] - data */ +#define RSP5_DATA_S 0 +#define RSP5_FLAGS_M BITFIELD_MASK(8) /* Bit [15:8] - Rsp flags */ +#define RSP5_FLAGS_S 8 +#define RSP5_STUFF_M BITFIELD_MASK(16) /* Bits [31:16] - Stuff bits */ +#define RSP5_STUFF_S 16 + +/* ---------------------------------------------- + * SDIO Command Response structures for SPI mode + * ---------------------------------------------- + */ +#define SPIRSP4_IO_OCR_M BITFIELD_MASK(16) /* Bits [15:0] - Card's OCR Bits [23:8] */ +#define SPIRSP4_IO_OCR_S 0 +#define SPIRSP4_STUFF_M BITFIELD_MASK(3) /* Bits [18:16] - Stuff bits */ +#define SPIRSP4_STUFF_S 16 +#define SPIRSP4_MEM_PRESENT_M BITFIELD_MASK(1) /* Bit 19 - Memory present */ +#define SPIRSP4_MEM_PRESENT_S 19 +#define SPIRSP4_NUM_FUNCS_M BITFIELD_MASK(3) /* Bits [22:20] - Number of I/O funcs */ +#define SPIRSP4_NUM_FUNCS_S 20 +#define SPIRSP4_CARD_READY_M BITFIELD_MASK(1) /* Bit 23 - SDIO card ready */ +#define SPIRSP4_CARD_READY_S 23 +#define SPIRSP4_IDLE_STATE_M BITFIELD_MASK(1) /* Bit 24 - idle state */ +#define SPIRSP4_IDLE_STATE_S 24 +#define SPIRSP4_ILLEGAL_CMD_M BITFIELD_MASK(1) /* Bit 26 - Illegal Cmd error */ +#define SPIRSP4_ILLEGAL_CMD_S 26 +#define SPIRSP4_COM_CRC_ERROR_M BITFIELD_MASK(1) /* Bit 27 - COM CRC error */ +#define SPIRSP4_COM_CRC_ERROR_S 27 +#define SPIRSP4_FUNC_NUM_ERROR_M BITFIELD_MASK(1) /* Bit 28 - Function number error + */ +#define SPIRSP4_FUNC_NUM_ERROR_S 28 +#define SPIRSP4_PARAM_ERROR_M BITFIELD_MASK(1) /* Bit 30 - Parameter Error Bit */ +#define SPIRSP4_PARAM_ERROR_S 30 +#define SPIRSP4_START_BIT_M BITFIELD_MASK(1) /* Bit 31 - Start Bit */ +#define SPIRSP4_START_BIT_S 31 + +#define SPIRSP5_DATA_M BITFIELD_MASK(8) /* Bits [23:16] - R/W Data */ +#define SPIRSP5_DATA_S 16 +#define SPIRSP5_IDLE_STATE_M BITFIELD_MASK(1) /* Bit 24 - Idle state */ +#define SPIRSP5_IDLE_STATE_S 24 +#define SPIRSP5_ILLEGAL_CMD_M BITFIELD_MASK(1) /* Bit 26 - Illegal Cmd error */ +#define SPIRSP5_ILLEGAL_CMD_S 26 +#define SPIRSP5_COM_CRC_ERROR_M BITFIELD_MASK(1) /* Bit 27 - COM CRC error */ +#define SPIRSP5_COM_CRC_ERROR_S 27 +#define SPIRSP5_FUNC_NUM_ERROR_M BITFIELD_MASK(1) /* Bit 28 - Function number error + */ +#define SPIRSP5_FUNC_NUM_ERROR_S 28 +#define SPIRSP5_PARAM_ERROR_M BITFIELD_MASK(1) /* Bit 30 - Parameter Error Bit */ +#define SPIRSP5_PARAM_ERROR_S 30 +#define SPIRSP5_START_BIT_M BITFIELD_MASK(1) /* Bit 31 - Start Bit */ +#define SPIRSP5_START_BIT_S 31 + +/* RSP6 card status format; Pg 68 Physical Layer spec v 1.10 */ +#define RSP6STAT_AKE_SEQ_ERROR_M BITFIELD_MASK(1) /* Bit 3 - Authentication seq error + */ +#define RSP6STAT_AKE_SEQ_ERROR_S 3 +#define RSP6STAT_APP_CMD_M BITFIELD_MASK(1) /* Bit 5 - Card expects ACMD */ +#define RSP6STAT_APP_CMD_S 5 +#define RSP6STAT_READY_FOR_DATA_M BITFIELD_MASK(1) /* Bit 8 - Ready for data + * (buff empty) + */ +#define RSP6STAT_READY_FOR_DATA_S 8 +#define RSP6STAT_CURR_STATE_M BITFIELD_MASK(4) /* Bits [12:9] - Card state at + * Cmd reception + */ +#define RSP6STAT_CURR_STATE_S 9 +#define RSP6STAT_ERROR_M BITFIELD_MASK(1) /* Bit 13 - General/Unknown error Bit 19 + */ +#define RSP6STAT_ERROR_S 13 +#define RSP6STAT_ILLEGAL_CMD_M BITFIELD_MASK(1) /* Bit 14 - Illegal cmd for + * card state Bit 22 + */ +#define RSP6STAT_ILLEGAL_CMD_S 14 +#define RSP6STAT_COM_CRC_ERROR_M BITFIELD_MASK(1) /* Bit 15 - CRC previous command + * failed Bit 23 + */ +#define RSP6STAT_COM_CRC_ERROR_S 15 + +#define SDIOH_XFER_TYPE_READ SD_IO_OP_READ +#define SDIOH_XFER_TYPE_WRITE SD_IO_OP_WRITE + +#endif /* _SDIO_H */ diff --git a/drivers/net/wireless/bcm4329/include/sdioh.h b/drivers/net/wireless/bcm4329/include/sdioh.h new file mode 100644 index 000000000000..8123452eac2b --- /dev/null +++ b/drivers/net/wireless/bcm4329/include/sdioh.h @@ -0,0 +1,299 @@ +/* + * SDIO Host Controller Spec header file + * Register map and definitions for the Standard Host Controller + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: sdioh.h,v 13.13.18.1.16.3 2009/12/08 22:34:21 Exp $ + */ + +#ifndef _SDIOH_H +#define _SDIOH_H + +#define SD_SysAddr 0x000 +#define SD_BlockSize 0x004 +#define SD_BlockCount 0x006 +#define SD_Arg0 0x008 +#define SD_Arg1 0x00A +#define SD_TransferMode 0x00C +#define SD_Command 0x00E +#define SD_Response0 0x010 +#define SD_Response1 0x012 +#define SD_Response2 0x014 +#define SD_Response3 0x016 +#define SD_Response4 0x018 +#define SD_Response5 0x01A +#define SD_Response6 0x01C +#define SD_Response7 0x01E +#define SD_BufferDataPort0 0x020 +#define SD_BufferDataPort1 0x022 +#define SD_PresentState 0x024 +#define SD_HostCntrl 0x028 +#define SD_PwrCntrl 0x029 +#define SD_BlockGapCntrl 0x02A +#define SD_WakeupCntrl 0x02B +#define SD_ClockCntrl 0x02C +#define SD_TimeoutCntrl 0x02E +#define SD_SoftwareReset 0x02F +#define SD_IntrStatus 0x030 +#define SD_ErrorIntrStatus 0x032 +#define SD_IntrStatusEnable 0x034 +#define SD_ErrorIntrStatusEnable 0x036 +#define SD_IntrSignalEnable 0x038 +#define SD_ErrorIntrSignalEnable 0x03A +#define SD_CMD12ErrorStatus 0x03C +#define SD_Capabilities 0x040 +#define SD_Capabilities_Reserved 0x044 +#define SD_MaxCurCap 0x048 +#define SD_MaxCurCap_Reserved 0x04C +#define SD_ADMA_SysAddr 0x58 +#define SD_SlotInterruptStatus 0x0FC +#define SD_HostControllerVersion 0x0FE + +/* SD specific registers in PCI config space */ +#define SD_SlotInfo 0x40 + +/* SD_Capabilities reg (0x040) */ +#define CAP_TO_CLKFREQ_M BITFIELD_MASK(6) +#define CAP_TO_CLKFREQ_S 0 +#define CAP_TO_CLKUNIT_M BITFIELD_MASK(1) +#define CAP_TO_CLKUNIT_S 7 +#define CAP_BASECLK_M BITFIELD_MASK(6) +#define CAP_BASECLK_S 8 +#define CAP_MAXBLOCK_M BITFIELD_MASK(2) +#define CAP_MAXBLOCK_S 16 +#define CAP_ADMA2_M BITFIELD_MASK(1) +#define CAP_ADMA2_S 19 +#define CAP_ADMA1_M BITFIELD_MASK(1) +#define CAP_ADMA1_S 20 +#define CAP_HIGHSPEED_M BITFIELD_MASK(1) +#define CAP_HIGHSPEED_S 21 +#define CAP_DMA_M BITFIELD_MASK(1) +#define CAP_DMA_S 22 +#define CAP_SUSPEND_M BITFIELD_MASK(1) +#define CAP_SUSPEND_S 23 +#define CAP_VOLT_3_3_M BITFIELD_MASK(1) +#define CAP_VOLT_3_3_S 24 +#define CAP_VOLT_3_0_M BITFIELD_MASK(1) +#define CAP_VOLT_3_0_S 25 +#define CAP_VOLT_1_8_M BITFIELD_MASK(1) +#define CAP_VOLT_1_8_S 26 +#define CAP_64BIT_HOST_M BITFIELD_MASK(1) +#define CAP_64BIT_HOST_S 28 + +/* SD_MaxCurCap reg (0x048) */ +#define CAP_CURR_3_3_M BITFIELD_MASK(8) +#define CAP_CURR_3_3_S 0 +#define CAP_CURR_3_0_M BITFIELD_MASK(8) +#define CAP_CURR_3_0_S 8 +#define CAP_CURR_1_8_M BITFIELD_MASK(8) +#define CAP_CURR_1_8_S 16 + +/* SD_SysAddr: Offset 0x0000, Size 4 bytes */ + +/* SD_BlockSize: Offset 0x004, Size 2 bytes */ +#define BLKSZ_BLKSZ_M BITFIELD_MASK(12) +#define BLKSZ_BLKSZ_S 0 +#define BLKSZ_BNDRY_M BITFIELD_MASK(3) +#define BLKSZ_BNDRY_S 12 + +/* SD_BlockCount: Offset 0x006, size 2 bytes */ + +/* SD_Arg0: Offset 0x008, size = 4 bytes */ +/* SD_TransferMode Offset 0x00C, size = 2 bytes */ +#define XFER_DMA_ENABLE_M BITFIELD_MASK(1) +#define XFER_DMA_ENABLE_S 0 +#define XFER_BLK_COUNT_EN_M BITFIELD_MASK(1) +#define XFER_BLK_COUNT_EN_S 1 +#define XFER_CMD_12_EN_M BITFIELD_MASK(1) +#define XFER_CMD_12_EN_S 2 +#define XFER_DATA_DIRECTION_M BITFIELD_MASK(1) +#define XFER_DATA_DIRECTION_S 4 +#define XFER_MULTI_BLOCK_M BITFIELD_MASK(1) +#define XFER_MULTI_BLOCK_S 5 + +/* SD_Command: Offset 0x00E, size = 2 bytes */ +/* resp_type field */ +#define RESP_TYPE_NONE 0 +#define RESP_TYPE_136 1 +#define RESP_TYPE_48 2 +#define RESP_TYPE_48_BUSY 3 +/* type field */ +#define CMD_TYPE_NORMAL 0 +#define CMD_TYPE_SUSPEND 1 +#define CMD_TYPE_RESUME 2 +#define CMD_TYPE_ABORT 3 + +#define CMD_RESP_TYPE_M BITFIELD_MASK(2) /* Bits [0-1] - Response type */ +#define CMD_RESP_TYPE_S 0 +#define CMD_CRC_EN_M BITFIELD_MASK(1) /* Bit 3 - CRC enable */ +#define CMD_CRC_EN_S 3 +#define CMD_INDEX_EN_M BITFIELD_MASK(1) /* Bit 4 - Enable index checking */ +#define CMD_INDEX_EN_S 4 +#define CMD_DATA_EN_M BITFIELD_MASK(1) /* Bit 5 - Using DAT line */ +#define CMD_DATA_EN_S 5 +#define CMD_TYPE_M BITFIELD_MASK(2) /* Bit [6-7] - Normal, abort, resume, etc + */ +#define CMD_TYPE_S 6 +#define CMD_INDEX_M BITFIELD_MASK(6) /* Bits [8-13] - Command number */ +#define CMD_INDEX_S 8 + +/* SD_BufferDataPort0 : Offset 0x020, size = 2 or 4 bytes */ +/* SD_BufferDataPort1 : Offset 0x022, size = 2 bytes */ +/* SD_PresentState : Offset 0x024, size = 4 bytes */ +#define PRES_CMD_INHIBIT_M BITFIELD_MASK(1) /* Bit 0 May use CMD */ +#define PRES_CMD_INHIBIT_S 0 +#define PRES_DAT_INHIBIT_M BITFIELD_MASK(1) /* Bit 1 May use DAT */ +#define PRES_DAT_INHIBIT_S 1 +#define PRES_DAT_BUSY_M BITFIELD_MASK(1) /* Bit 2 DAT is busy */ +#define PRES_DAT_BUSY_S 2 +#define PRES_PRESENT_RSVD_M BITFIELD_MASK(5) /* Bit [3-7] rsvd */ +#define PRES_PRESENT_RSVD_S 3 +#define PRES_WRITE_ACTIVE_M BITFIELD_MASK(1) /* Bit 8 Write is active */ +#define PRES_WRITE_ACTIVE_S 8 +#define PRES_READ_ACTIVE_M BITFIELD_MASK(1) /* Bit 9 Read is active */ +#define PRES_READ_ACTIVE_S 9 +#define PRES_WRITE_DATA_RDY_M BITFIELD_MASK(1) /* Bit 10 Write buf is avail */ +#define PRES_WRITE_DATA_RDY_S 10 +#define PRES_READ_DATA_RDY_M BITFIELD_MASK(1) /* Bit 11 Read buf data avail */ +#define PRES_READ_DATA_RDY_S 11 +#define PRES_CARD_PRESENT_M BITFIELD_MASK(1) /* Bit 16 Card present - debounced */ +#define PRES_CARD_PRESENT_S 16 +#define PRES_CARD_STABLE_M BITFIELD_MASK(1) /* Bit 17 Debugging */ +#define PRES_CARD_STABLE_S 17 +#define PRES_CARD_PRESENT_RAW_M BITFIELD_MASK(1) /* Bit 18 Not debounced */ +#define PRES_CARD_PRESENT_RAW_S 18 +#define PRES_WRITE_ENABLED_M BITFIELD_MASK(1) /* Bit 19 Write protected? */ +#define PRES_WRITE_ENABLED_S 19 +#define PRES_DAT_SIGNAL_M BITFIELD_MASK(4) /* Bit [20-23] Debugging */ +#define PRES_DAT_SIGNAL_S 20 +#define PRES_CMD_SIGNAL_M BITFIELD_MASK(1) /* Bit 24 Debugging */ +#define PRES_CMD_SIGNAL_S 24 + +/* SD_HostCntrl: Offset 0x028, size = 1 bytes */ +#define HOST_LED_M BITFIELD_MASK(1) /* Bit 0 LED On/Off */ +#define HOST_LED_S 0 +#define HOST_DATA_WIDTH_M BITFIELD_MASK(1) /* Bit 1 4 bit enable */ +#define HOST_DATA_WIDTH_S 1 +#define HOST_HI_SPEED_EN_M BITFIELD_MASK(1) /* Bit 2 High speed vs low speed */ +#define HOST_DMA_SEL_S 3 +#define HOST_DMA_SEL_M BITFIELD_MASK(2) /* Bit 4:3 DMA Select */ +#define HOST_HI_SPEED_EN_S 2 + +/* misc defines */ +#define SD1_MODE 0x1 /* SD Host Cntrlr Spec */ +#define SD4_MODE 0x2 /* SD Host Cntrlr Spec */ + +/* SD_PwrCntrl: Offset 0x029, size = 1 bytes */ +#define PWR_BUS_EN_M BITFIELD_MASK(1) /* Bit 0 Power the bus */ +#define PWR_BUS_EN_S 0 +#define PWR_VOLTS_M BITFIELD_MASK(3) /* Bit [1-3] Voltage Select */ +#define PWR_VOLTS_S 1 + +/* SD_SoftwareReset: Offset 0x02F, size = 1 byte */ +#define SW_RESET_ALL_M BITFIELD_MASK(1) /* Bit 0 Reset All */ +#define SW_RESET_ALL_S 0 +#define SW_RESET_CMD_M BITFIELD_MASK(1) /* Bit 1 CMD Line Reset */ +#define SW_RESET_CMD_S 1 +#define SW_RESET_DAT_M BITFIELD_MASK(1) /* Bit 2 DAT Line Reset */ +#define SW_RESET_DAT_S 2 + +/* SD_IntrStatus: Offset 0x030, size = 2 bytes */ +/* Defs also serve SD_IntrStatusEnable and SD_IntrSignalEnable */ +#define INTSTAT_CMD_COMPLETE_M BITFIELD_MASK(1) /* Bit 0 */ +#define INTSTAT_CMD_COMPLETE_S 0 +#define INTSTAT_XFER_COMPLETE_M BITFIELD_MASK(1) +#define INTSTAT_XFER_COMPLETE_S 1 +#define INTSTAT_BLOCK_GAP_EVENT_M BITFIELD_MASK(1) +#define INTSTAT_BLOCK_GAP_EVENT_S 2 +#define INTSTAT_DMA_INT_M BITFIELD_MASK(1) +#define INTSTAT_DMA_INT_S 3 +#define INTSTAT_BUF_WRITE_READY_M BITFIELD_MASK(1) +#define INTSTAT_BUF_WRITE_READY_S 4 +#define INTSTAT_BUF_READ_READY_M BITFIELD_MASK(1) +#define INTSTAT_BUF_READ_READY_S 5 +#define INTSTAT_CARD_INSERTION_M BITFIELD_MASK(1) +#define INTSTAT_CARD_INSERTION_S 6 +#define INTSTAT_CARD_REMOVAL_M BITFIELD_MASK(1) +#define INTSTAT_CARD_REMOVAL_S 7 +#define INTSTAT_CARD_INT_M BITFIELD_MASK(1) +#define INTSTAT_CARD_INT_S 8 +#define INTSTAT_ERROR_INT_M BITFIELD_MASK(1) /* Bit 15 */ +#define INTSTAT_ERROR_INT_S 15 + +/* SD_ErrorIntrStatus: Offset 0x032, size = 2 bytes */ +/* Defs also serve SD_ErrorIntrStatusEnable and SD_ErrorIntrSignalEnable */ +#define ERRINT_CMD_TIMEOUT_M BITFIELD_MASK(1) +#define ERRINT_CMD_TIMEOUT_S 0 +#define ERRINT_CMD_CRC_M BITFIELD_MASK(1) +#define ERRINT_CMD_CRC_S 1 +#define ERRINT_CMD_ENDBIT_M BITFIELD_MASK(1) +#define ERRINT_CMD_ENDBIT_S 2 +#define ERRINT_CMD_INDEX_M BITFIELD_MASK(1) +#define ERRINT_CMD_INDEX_S 3 +#define ERRINT_DATA_TIMEOUT_M BITFIELD_MASK(1) +#define ERRINT_DATA_TIMEOUT_S 4 +#define ERRINT_DATA_CRC_M BITFIELD_MASK(1) +#define ERRINT_DATA_CRC_S 5 +#define ERRINT_DATA_ENDBIT_M BITFIELD_MASK(1) +#define ERRINT_DATA_ENDBIT_S 6 +#define ERRINT_CURRENT_LIMIT_M BITFIELD_MASK(1) +#define ERRINT_CURRENT_LIMIT_S 7 +#define ERRINT_AUTO_CMD12_M BITFIELD_MASK(1) +#define ERRINT_AUTO_CMD12_S 8 +#define ERRINT_VENDOR_M BITFIELD_MASK(4) +#define ERRINT_VENDOR_S 12 + +/* Also provide definitions in "normal" form to allow combined masks */ +#define ERRINT_CMD_TIMEOUT_BIT 0x0001 +#define ERRINT_CMD_CRC_BIT 0x0002 +#define ERRINT_CMD_ENDBIT_BIT 0x0004 +#define ERRINT_CMD_INDEX_BIT 0x0008 +#define ERRINT_DATA_TIMEOUT_BIT 0x0010 +#define ERRINT_DATA_CRC_BIT 0x0020 +#define ERRINT_DATA_ENDBIT_BIT 0x0040 +#define ERRINT_CURRENT_LIMIT_BIT 0x0080 +#define ERRINT_AUTO_CMD12_BIT 0x0100 + +/* Masks to select CMD vs. DATA errors */ +#define ERRINT_CMD_ERRS (ERRINT_CMD_TIMEOUT_BIT | ERRINT_CMD_CRC_BIT |\ + ERRINT_CMD_ENDBIT_BIT | ERRINT_CMD_INDEX_BIT) +#define ERRINT_DATA_ERRS (ERRINT_DATA_TIMEOUT_BIT | ERRINT_DATA_CRC_BIT |\ + ERRINT_DATA_ENDBIT_BIT) +#define ERRINT_TRANSFER_ERRS (ERRINT_CMD_ERRS | ERRINT_DATA_ERRS) + +/* SD_WakeupCntr_BlockGapCntrl : Offset 0x02A , size = bytes */ +/* SD_ClockCntrl : Offset 0x02C , size = bytes */ +/* SD_SoftwareReset_TimeoutCntrl : Offset 0x02E , size = bytes */ +/* SD_IntrStatus : Offset 0x030 , size = bytes */ +/* SD_ErrorIntrStatus : Offset 0x032 , size = bytes */ +/* SD_IntrStatusEnable : Offset 0x034 , size = bytes */ +/* SD_ErrorIntrStatusEnable : Offset 0x036 , size = bytes */ +/* SD_IntrSignalEnable : Offset 0x038 , size = bytes */ +/* SD_ErrorIntrSignalEnable : Offset 0x03A , size = bytes */ +/* SD_CMD12ErrorStatus : Offset 0x03C , size = bytes */ +/* SD_Capabilities : Offset 0x040 , size = bytes */ +/* SD_MaxCurCap : Offset 0x048 , size = bytes */ +/* SD_MaxCurCap_Reserved: Offset 0x04C , size = bytes */ +/* SD_SlotInterruptStatus: Offset 0x0FC , size = bytes */ +/* SD_HostControllerVersion : Offset 0x0FE , size = bytes */ + +#endif /* _SDIOH_H */ diff --git a/drivers/net/wireless/bcm4329/include/sdiovar.h b/drivers/net/wireless/bcm4329/include/sdiovar.h new file mode 100644 index 000000000000..0179d4cb96db --- /dev/null +++ b/drivers/net/wireless/bcm4329/include/sdiovar.h @@ -0,0 +1,58 @@ +/* + * Structure used by apps whose drivers access SDIO drivers. + * Pulled out separately so dhdu and wlu can both use it. + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: sdiovar.h,v 13.5.14.2.16.2 2009/12/08 22:34:21 Exp $ + */ + +#ifndef _sdiovar_h_ +#define _sdiovar_h_ + +#include <typedefs.h> + +/* require default structure packing */ +#define BWL_DEFAULT_PACKING +#include <packed_section_start.h> + +typedef struct sdreg { + int func; + int offset; + int value; +} sdreg_t; + +/* Common msglevel constants */ +#define SDH_ERROR_VAL 0x0001 /* Error */ +#define SDH_TRACE_VAL 0x0002 /* Trace */ +#define SDH_INFO_VAL 0x0004 /* Info */ +#define SDH_DEBUG_VAL 0x0008 /* Debug */ +#define SDH_DATA_VAL 0x0010 /* Data */ +#define SDH_CTRL_VAL 0x0020 /* Control Regs */ +#define SDH_LOG_VAL 0x0040 /* Enable bcmlog */ +#define SDH_DMA_VAL 0x0080 /* DMA */ + +#define NUM_PREV_TRANSACTIONS 16 + + +#include <packed_section_end.h> + +#endif /* _sdiovar_h_ */ diff --git a/drivers/net/wireless/bcm4329/include/siutils.h b/drivers/net/wireless/bcm4329/include/siutils.h new file mode 100644 index 000000000000..cb9f1407b73b --- /dev/null +++ b/drivers/net/wireless/bcm4329/include/siutils.h @@ -0,0 +1,235 @@ +/* + * Misc utility routines for accessing the SOC Interconnects + * of Broadcom HNBU chips. + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: siutils.h,v 13.197.4.2.4.3.8.16 2010/06/23 21:36:05 Exp $ + */ + + +#ifndef _siutils_h_ +#define _siutils_h_ + + +struct si_pub { + uint socitype; + + uint bustype; + uint buscoretype; + uint buscorerev; + uint buscoreidx; + int ccrev; + uint32 cccaps; + int pmurev; + uint32 pmucaps; + uint boardtype; + uint boardvendor; + uint boardflags; + uint chip; + uint chiprev; + uint chippkg; + uint32 chipst; + bool issim; + uint socirev; + bool pci_pr32414; +}; + +#if defined(WLC_HIGH) && !defined(WLC_LOW) +typedef struct si_pub si_t; +#else +typedef const struct si_pub si_t; +#endif + + +#define SI_OSH NULL + + +#define XTAL 0x1 +#define PLL 0x2 + + +#define CLK_FAST 0 +#define CLK_DYNAMIC 2 + + +#define GPIO_DRV_PRIORITY 0 +#define GPIO_APP_PRIORITY 1 +#define GPIO_HI_PRIORITY 2 + + +#define GPIO_PULLUP 0 +#define GPIO_PULLDN 1 + + +#define GPIO_REGEVT 0 +#define GPIO_REGEVT_INTMSK 1 +#define GPIO_REGEVT_INTPOL 2 + + +#define SI_DEVPATH_BUFSZ 16 + + +#define SI_DOATTACH 1 +#define SI_PCIDOWN 2 +#define SI_PCIUP 3 + +#define ISSIM_ENAB(sih) 0 + + +#if defined(BCMPMUCTL) +#define PMUCTL_ENAB(sih) (BCMPMUCTL) +#else +#define PMUCTL_ENAB(sih) ((sih)->cccaps & CC_CAP_PMU) +#endif + + +#if defined(BCMPMUCTL) && BCMPMUCTL +#define CCCTL_ENAB(sih) (0) +#define CCPLL_ENAB(sih) (0) +#else +#define CCCTL_ENAB(sih) ((sih)->cccaps & CC_CAP_PWR_CTL) +#define CCPLL_ENAB(sih) ((sih)->cccaps & CC_CAP_PLL_MASK) +#endif + +typedef void (*gpio_handler_t)(uint32 stat, void *arg); + + + +extern si_t *si_attach(uint pcidev, osl_t *osh, void *regs, uint bustype, + void *sdh, char **vars, uint *varsz); +extern si_t *si_kattach(osl_t *osh); +extern void si_detach(si_t *sih); +extern bool si_pci_war16165(si_t *sih); + +extern uint si_corelist(si_t *sih, uint coreid[]); +extern uint si_coreid(si_t *sih); +extern uint si_flag(si_t *sih); +extern uint si_intflag(si_t *sih); +extern uint si_coreidx(si_t *sih); +extern uint si_coreunit(si_t *sih); +extern uint si_corevendor(si_t *sih); +extern uint si_corerev(si_t *sih); +extern void *si_osh(si_t *sih); +extern void si_setosh(si_t *sih, osl_t *osh); +extern uint si_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val); +extern void *si_coreregs(si_t *sih); +extern void si_write_wrapperreg(si_t *sih, uint32 offset, uint32 val); +extern uint32 si_core_cflags(si_t *sih, uint32 mask, uint32 val); +extern void si_core_cflags_wo(si_t *sih, uint32 mask, uint32 val); +extern uint32 si_core_sflags(si_t *sih, uint32 mask, uint32 val); +extern bool si_iscoreup(si_t *sih); +extern uint si_findcoreidx(si_t *sih, uint coreid, uint coreunit); +extern void *si_setcoreidx(si_t *sih, uint coreidx); +extern void *si_setcore(si_t *sih, uint coreid, uint coreunit); +extern void *si_switch_core(si_t *sih, uint coreid, uint *origidx, uint *intr_val); +extern void si_restore_core(si_t *sih, uint coreid, uint intr_val); +extern int si_numaddrspaces(si_t *sih); +extern uint32 si_addrspace(si_t *sih, uint asidx); +extern uint32 si_addrspacesize(si_t *sih, uint asidx); +extern int si_corebist(si_t *sih); +extern void si_core_reset(si_t *sih, uint32 bits, uint32 resetbits); +extern void si_core_tofixup(si_t *sih); +extern void si_core_disable(si_t *sih, uint32 bits); +extern uint32 si_clock_rate(uint32 pll_type, uint32 n, uint32 m); +extern uint32 si_clock(si_t *sih); +extern void si_clock_pmu_spuravoid(si_t *sih, bool spuravoid); +extern uint32 si_alp_clock(si_t *sih); +extern uint32 si_ilp_clock(si_t *sih); +extern void si_pci_setup(si_t *sih, uint coremask); +extern void si_pcmcia_init(si_t *sih); +extern void si_setint(si_t *sih, int siflag); +extern bool si_backplane64(si_t *sih); +extern void si_register_intr_callback(si_t *sih, void *intrsoff_fn, void *intrsrestore_fn, + void *intrsenabled_fn, void *intr_arg); +extern void si_deregister_intr_callback(si_t *sih); +extern void si_clkctl_init(si_t *sih); +extern uint16 si_clkctl_fast_pwrup_delay(si_t *sih); +extern bool si_clkctl_cc(si_t *sih, uint mode); +extern int si_clkctl_xtal(si_t *sih, uint what, bool on); +extern uint32 si_gpiotimerval(si_t *sih, uint32 mask, uint32 val); +extern bool si_backplane64(si_t *sih); +extern void si_btcgpiowar(si_t *sih); +extern bool si_deviceremoved(si_t *sih); +extern uint32 si_socram_size(si_t *sih); + +extern void si_watchdog(si_t *sih, uint ticks); +extern void si_watchdog_ms(si_t *sih, uint32 ms); +extern void *si_gpiosetcore(si_t *sih); +extern uint32 si_gpiocontrol(si_t *sih, uint32 mask, uint32 val, uint8 priority); +extern uint32 si_gpioouten(si_t *sih, uint32 mask, uint32 val, uint8 priority); +extern uint32 si_gpioout(si_t *sih, uint32 mask, uint32 val, uint8 priority); +extern uint32 si_gpioin(si_t *sih); +extern uint32 si_gpiointpolarity(si_t *sih, uint32 mask, uint32 val, uint8 priority); +extern uint32 si_gpiointmask(si_t *sih, uint32 mask, uint32 val, uint8 priority); +extern uint32 si_gpioled(si_t *sih, uint32 mask, uint32 val); +extern uint32 si_gpioreserve(si_t *sih, uint32 gpio_num, uint8 priority); +extern uint32 si_gpiorelease(si_t *sih, uint32 gpio_num, uint8 priority); +extern uint32 si_gpiopull(si_t *sih, bool updown, uint32 mask, uint32 val); +extern uint32 si_gpioevent(si_t *sih, uint regtype, uint32 mask, uint32 val); +extern uint32 si_gpio_int_enable(si_t *sih, bool enable); + + +extern void *si_gpio_handler_register(si_t *sih, uint32 e, bool lev, gpio_handler_t cb, void *arg); +extern void si_gpio_handler_unregister(si_t *sih, void* gpioh); +extern void si_gpio_handler_process(si_t *sih); + + +extern bool si_pci_pmecap(si_t *sih); +struct osl_info; +extern bool si_pci_fastpmecap(struct osl_info *osh); +extern bool si_pci_pmeclr(si_t *sih); +extern void si_pci_pmeen(si_t *sih); +extern uint si_pcie_readreg(void *sih, uint addrtype, uint offset); + +extern void si_sdio_init(si_t *sih); + +extern uint16 si_d11_devid(si_t *sih); +extern int si_corepciid(si_t *sih, uint func, uint16 *pcivendor, uint16 *pcidevice, + uint8 *pciclass, uint8 *pcisubclass, uint8 *pciprogif, uint8 *pciheader); + +#define si_eci_init(sih) (0) +#define si_eci_notify_bt(sih, type, val, interrupt) (0) + + + +extern int si_devpath(si_t *sih, char *path, int size); + +extern char *si_getdevpathvar(si_t *sih, const char *name); +extern int si_getdevpathintvar(si_t *sih, const char *name); + + +extern uint8 si_pcieclkreq(si_t *sih, uint32 mask, uint32 val); +extern void si_war42780_clkreq(si_t *sih, bool clkreq); +extern void si_pci_sleep(si_t *sih); +extern void si_pci_down(si_t *sih); +extern void si_pci_up(si_t *sih); +extern void si_pcie_war_ovr_disable(si_t *sih); +extern void si_pcie_extendL1timer(si_t *sih, bool extend); +extern int si_pci_fixcfg(si_t *sih); + + + + + + + +#endif diff --git a/drivers/net/wireless/bcm4329/include/spid.h b/drivers/net/wireless/bcm4329/include/spid.h new file mode 100644 index 000000000000..c740296de9af --- /dev/null +++ b/drivers/net/wireless/bcm4329/include/spid.h @@ -0,0 +1,153 @@ +/* + * SPI device spec header file + * + * Copyright (C) 2010, Broadcom Corporation + * All Rights Reserved. + * + * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom Corporation; + * the contents of this file may not be disclosed to third parties, copied + * or duplicated in any form, in whole or in part, without the prior + * written permission of Broadcom Corporation. + * + * $Id: spid.h,v 1.7.10.1.16.3 2009/04/09 19:23:14 Exp $ + */ + +#ifndef _SPI_H +#define _SPI_H + +/* + * Brcm SPI Device Register Map. + * + */ + +typedef volatile struct { + uint8 config; /* 0x00, len, endian, clock, speed, polarity, wakeup */ + uint8 response_delay; /* 0x01, read response delay in bytes (corerev < 3) */ + uint8 status_enable; /* 0x02, status-enable, intr with status, response_delay + * function selection, command/data error check + */ + uint8 reset_bp; /* 0x03, reset on wlan/bt backplane reset (corerev >= 1) */ + uint16 intr_reg; /* 0x04, Intr status register */ + uint16 intr_en_reg; /* 0x06, Intr mask register */ + uint32 status_reg; /* 0x08, RO, Status bits of last spi transfer */ + uint16 f1_info_reg; /* 0x0c, RO, enabled, ready for data transfer, blocksize */ + uint16 f2_info_reg; /* 0x0e, RO, enabled, ready for data transfer, blocksize */ + uint16 f3_info_reg; /* 0x10, RO, enabled, ready for data transfer, blocksize */ + uint32 test_read; /* 0x14, RO 0xfeedbead signature */ + uint32 test_rw; /* 0x18, RW */ + uint8 resp_delay_f0; /* 0x1c, read resp delay bytes for F0 (corerev >= 3) */ + uint8 resp_delay_f1; /* 0x1d, read resp delay bytes for F1 (corerev >= 3) */ + uint8 resp_delay_f2; /* 0x1e, read resp delay bytes for F2 (corerev >= 3) */ + uint8 resp_delay_f3; /* 0x1f, read resp delay bytes for F3 (corerev >= 3) */ +} spi_regs_t; + +/* SPI device register offsets */ +#define SPID_CONFIG 0x00 +#define SPID_RESPONSE_DELAY 0x01 +#define SPID_STATUS_ENABLE 0x02 +#define SPID_RESET_BP 0x03 /* (corerev >= 1) */ +#define SPID_INTR_REG 0x04 /* 16 bits - Interrupt status */ +#define SPID_INTR_EN_REG 0x06 /* 16 bits - Interrupt mask */ +#define SPID_STATUS_REG 0x08 /* 32 bits */ +#define SPID_F1_INFO_REG 0x0C /* 16 bits */ +#define SPID_F2_INFO_REG 0x0E /* 16 bits */ +#define SPID_F3_INFO_REG 0x10 /* 16 bits */ +#define SPID_TEST_READ 0x14 /* 32 bits */ +#define SPID_TEST_RW 0x18 /* 32 bits */ +#define SPID_RESP_DELAY_F0 0x1c /* 8 bits (corerev >= 3) */ +#define SPID_RESP_DELAY_F1 0x1d /* 8 bits (corerev >= 3) */ +#define SPID_RESP_DELAY_F2 0x1e /* 8 bits (corerev >= 3) */ +#define SPID_RESP_DELAY_F3 0x1f /* 8 bits (corerev >= 3) */ + +/* Bit masks for SPID_CONFIG device register */ +#define WORD_LENGTH_32 0x1 /* 0/1 16/32 bit word length */ +#define ENDIAN_BIG 0x2 /* 0/1 Little/Big Endian */ +#define CLOCK_PHASE 0x4 /* 0/1 clock phase delay */ +#define CLOCK_POLARITY 0x8 /* 0/1 Idle state clock polarity is low/high */ +#define HIGH_SPEED_MODE 0x10 /* 1/0 High Speed mode / Normal mode */ +#define INTR_POLARITY 0x20 /* 1/0 Interrupt active polarity is high/low */ +#define WAKE_UP 0x80 /* 0/1 Wake-up command from Host to WLAN */ + +/* Bit mask for SPID_RESPONSE_DELAY device register */ +#define RESPONSE_DELAY_MASK 0xFF /* Configurable rd response delay in multiples of 8 bits */ + +/* Bit mask for SPID_STATUS_ENABLE device register */ +#define STATUS_ENABLE 0x1 /* 1/0 Status sent/not sent to host after read/write */ +#define INTR_WITH_STATUS 0x2 /* 0/1 Do-not / do-interrupt if status is sent */ +#define RESP_DELAY_ALL 0x4 /* Applicability of resp delay to F1 or all func's read */ +#define DWORD_PKT_LEN_EN 0x8 /* Packet len denoted in dwords instead of bytes */ +#define CMD_ERR_CHK_EN 0x20 /* Command error check enable */ +#define DATA_ERR_CHK_EN 0x40 /* Data error check enable */ + +/* Bit mask for SPID_RESET_BP device register */ +#define RESET_ON_WLAN_BP_RESET 0x4 /* enable reset for WLAN backplane */ +#define RESET_ON_BT_BP_RESET 0x8 /* enable reset for BT backplane */ +#define RESET_SPI 0x80 /* reset the above enabled logic */ + +/* Bit mask for SPID_INTR_REG device register */ +#define DATA_UNAVAILABLE 0x0001 /* Requested data not available; Clear by writing a "1" */ +#define F2_F3_FIFO_RD_UNDERFLOW 0x0002 +#define F2_F3_FIFO_WR_OVERFLOW 0x0004 +#define COMMAND_ERROR 0x0008 /* Cleared by writing 1 */ +#define DATA_ERROR 0x0010 /* Cleared by writing 1 */ +#define F2_PACKET_AVAILABLE 0x0020 +#define F3_PACKET_AVAILABLE 0x0040 +#define F1_OVERFLOW 0x0080 /* Due to last write. Bkplane has pending write requests */ +#define MISC_INTR0 0x0100 +#define MISC_INTR1 0x0200 +#define MISC_INTR2 0x0400 +#define MISC_INTR3 0x0800 +#define MISC_INTR4 0x1000 +#define F1_INTR 0x2000 +#define F2_INTR 0x4000 +#define F3_INTR 0x8000 + +/* Bit mask for 32bit SPID_STATUS_REG device register */ +#define STATUS_DATA_NOT_AVAILABLE 0x00000001 +#define STATUS_UNDERFLOW 0x00000002 +#define STATUS_OVERFLOW 0x00000004 +#define STATUS_F2_INTR 0x00000008 +#define STATUS_F3_INTR 0x00000010 +#define STATUS_F2_RX_READY 0x00000020 +#define STATUS_F3_RX_READY 0x00000040 +#define STATUS_HOST_CMD_DATA_ERR 0x00000080 +#define STATUS_F2_PKT_AVAILABLE 0x00000100 +#define STATUS_F2_PKT_LEN_MASK 0x000FFE00 +#define STATUS_F2_PKT_LEN_SHIFT 9 +#define STATUS_F3_PKT_AVAILABLE 0x00100000 +#define STATUS_F3_PKT_LEN_MASK 0xFFE00000 +#define STATUS_F3_PKT_LEN_SHIFT 21 + +/* Bit mask for 16 bits SPID_F1_INFO_REG device register */ +#define F1_ENABLED 0x0001 +#define F1_RDY_FOR_DATA_TRANSFER 0x0002 +#define F1_MAX_PKT_SIZE 0x01FC + +/* Bit mask for 16 bits SPID_F2_INFO_REG device register */ +#define F2_ENABLED 0x0001 +#define F2_RDY_FOR_DATA_TRANSFER 0x0002 +#define F2_MAX_PKT_SIZE 0x3FFC + +/* Bit mask for 16 bits SPID_F3_INFO_REG device register */ +#define F3_ENABLED 0x0001 +#define F3_RDY_FOR_DATA_TRANSFER 0x0002 +#define F3_MAX_PKT_SIZE 0x3FFC + +/* Bit mask for 32 bits SPID_TEST_READ device register read in 16bit LE mode */ +#define TEST_RO_DATA_32BIT_LE 0xFEEDBEAD + +/* Maximum number of I/O funcs */ +#define SPI_MAX_IOFUNCS 4 + +#define SPI_MAX_PKT_LEN (2048*4) + +/* Misc defines */ +#define SPI_FUNC_0 0 +#define SPI_FUNC_1 1 +#define SPI_FUNC_2 2 +#define SPI_FUNC_3 3 + +#define WAIT_F2RXFIFORDY 100 +#define WAIT_F2RXFIFORDY_DELAY 20 + +#endif /* _SPI_H */ diff --git a/drivers/net/wireless/bcm4329/include/trxhdr.h b/drivers/net/wireless/bcm4329/include/trxhdr.h new file mode 100644 index 000000000000..8f5eed9410eb --- /dev/null +++ b/drivers/net/wireless/bcm4329/include/trxhdr.h @@ -0,0 +1,46 @@ +/* + * TRX image file header format. + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: trxhdr.h,v 13.11.310.1 2008/08/17 12:58:58 Exp $ + */ + +#include <typedefs.h> + +#define TRX_MAGIC 0x30524448 /* "HDR0" */ +#define TRX_VERSION 1 /* Version 1 */ +#define TRX_MAX_LEN 0x3A0000 /* Max length */ +#define TRX_NO_HEADER 1 /* Do not write TRX header */ +#define TRX_GZ_FILES 0x2 /* Contains up to TRX_MAX_OFFSET individual gzip files */ +#define TRX_MAX_OFFSET 3 /* Max number of individual files */ +#define TRX_UNCOMP_IMAGE 0x20 /* Trx contains uncompressed rtecdc.bin image */ + +struct trx_header { + uint32 magic; /* "HDR0" */ + uint32 len; /* Length of file including header */ + uint32 crc32; /* 32-bit CRC from flag_version to end of file */ + uint32 flag_version; /* 0:15 flags, 16:31 version */ + uint32 offsets[TRX_MAX_OFFSET]; /* Offsets of partitions from start of header */ +}; + +/* Compatibility */ +typedef struct trx_header TRXHDR, *PTRXHDR; diff --git a/drivers/net/wireless/bcm4329/include/typedefs.h b/drivers/net/wireless/bcm4329/include/typedefs.h new file mode 100644 index 000000000000..4d9dd761ed64 --- /dev/null +++ b/drivers/net/wireless/bcm4329/include/typedefs.h @@ -0,0 +1,303 @@ +/* + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * $Id: typedefs.h,v 1.85.34.1.2.5 2009/01/27 04:09:40 Exp $ + */ + + +#ifndef _TYPEDEFS_H_ +#define _TYPEDEFS_H_ + +#ifdef SITE_TYPEDEFS + + + +#include "site_typedefs.h" + +#else + + + +#ifdef __cplusplus + +#define TYPEDEF_BOOL +#ifndef FALSE +#define FALSE false +#endif +#ifndef TRUE +#define TRUE true +#endif + +#else + + +#endif + +#if defined(__x86_64__) +#define TYPEDEF_UINTPTR +typedef unsigned long long int uintptr; +#endif + + + + +#if defined(TARGETOS_nucleus) + +#include <stddef.h> + + +#define TYPEDEF_FLOAT_T +#endif + +#if defined(_NEED_SIZE_T_) +typedef long unsigned int size_t; +#endif + +#ifdef __DJGPP__ +typedef long unsigned int size_t; +#endif + + + + + +#define TYPEDEF_UINT +#ifndef TARGETENV_android +#define TYPEDEF_USHORT +#define TYPEDEF_ULONG +#endif +#ifdef __KERNEL__ +#include <linux/version.h> +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)) +#define TYPEDEF_BOOL +#endif +#endif + + + + + +#if defined(__GNUC__) && defined(__STRICT_ANSI__) +#define TYPEDEF_INT64 +#define TYPEDEF_UINT64 +#endif + + +#if defined(__ICL) + +#define TYPEDEF_INT64 + +#if defined(__STDC__) +#define TYPEDEF_UINT64 +#endif + +#endif + +#if !defined(__DJGPP__) && !defined(TARGETOS_nucleus) + + +#if defined(__KERNEL__) + +#include <linux/types.h> + +#else + + +#include <sys/types.h> + +#endif + +#endif + + + + +#define USE_TYPEDEF_DEFAULTS + +#endif + + + + +#ifdef USE_TYPEDEF_DEFAULTS +#undef USE_TYPEDEF_DEFAULTS + +#ifndef TYPEDEF_BOOL +typedef unsigned char bool; +#endif + + + +#ifndef TYPEDEF_UCHAR +typedef unsigned char uchar; +#endif + +#ifndef TYPEDEF_USHORT +typedef unsigned short ushort; +#endif + +#ifndef TYPEDEF_UINT +typedef unsigned int uint; +#endif + +#ifndef TYPEDEF_ULONG +typedef unsigned long ulong; +#endif + + + +#ifndef TYPEDEF_UINT8 +typedef unsigned char uint8; +#endif + +#ifndef TYPEDEF_UINT16 +typedef unsigned short uint16; +#endif + +#ifndef TYPEDEF_UINT32 +typedef unsigned int uint32; +#endif + +#ifndef TYPEDEF_UINT64 +typedef unsigned long long uint64; +#endif + +#ifndef TYPEDEF_UINTPTR +typedef unsigned int uintptr; +#endif + +#ifndef TYPEDEF_INT8 +typedef signed char int8; +#endif + +#ifndef TYPEDEF_INT16 +typedef signed short int16; +#endif + +#ifndef TYPEDEF_INT32 +typedef signed int int32; +#endif + +#ifndef TYPEDEF_INT64 +typedef signed long long int64; +#endif + + + +#ifndef TYPEDEF_FLOAT32 +typedef float float32; +#endif + +#ifndef TYPEDEF_FLOAT64 +typedef double float64; +#endif + + + +#ifndef TYPEDEF_FLOAT_T + +#if defined(FLOAT32) +typedef float32 float_t; +#else +typedef float64 float_t; +#endif + +#endif + + + +#ifndef FALSE +#define FALSE 0 +#endif + +#ifndef TRUE +#define TRUE 1 +#endif + +#ifndef NULL +#define NULL 0 +#endif + +#ifndef OFF +#define OFF 0 +#endif + +#ifndef ON +#define ON 1 +#endif + +#define AUTO (-1) + + + +#ifndef PTRSZ +#define PTRSZ sizeof(char*) +#endif + + + +#if defined(__GNUC__) + #define BWL_COMPILER_GNU +#elif defined(__CC_ARM) + #define BWL_COMPILER_ARMCC +#else + #error "Unknown compiler!" +#endif + + +#ifndef INLINE + #if defined(BWL_COMPILER_MICROSOFT) + #define INLINE __inline + #elif defined(BWL_COMPILER_GNU) + #define INLINE __inline__ + #elif defined(BWL_COMPILER_ARMCC) + #define INLINE __inline + #else + #define INLINE + #endif +#endif + +#undef TYPEDEF_BOOL +#undef TYPEDEF_UCHAR +#undef TYPEDEF_USHORT +#undef TYPEDEF_UINT +#undef TYPEDEF_ULONG +#undef TYPEDEF_UINT8 +#undef TYPEDEF_UINT16 +#undef TYPEDEF_UINT32 +#undef TYPEDEF_UINT64 +#undef TYPEDEF_UINTPTR +#undef TYPEDEF_INT8 +#undef TYPEDEF_INT16 +#undef TYPEDEF_INT32 +#undef TYPEDEF_INT64 +#undef TYPEDEF_FLOAT32 +#undef TYPEDEF_FLOAT64 +#undef TYPEDEF_FLOAT_T + +#endif + + +#define UNUSED_PARAMETER(x) (void)(x) + + +#include <bcmdefs.h> + +#endif diff --git a/drivers/net/wireless/bcm4329/include/wlioctl.h b/drivers/net/wireless/bcm4329/include/wlioctl.h new file mode 100644 index 000000000000..345ba34b94c0 --- /dev/null +++ b/drivers/net/wireless/bcm4329/include/wlioctl.h @@ -0,0 +1,1663 @@ +/* + * Custom OID/ioctl definitions for + * Broadcom 802.11abg Networking Device Driver + * + * Definitions subject to change without notice. + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: wlioctl.h,v 1.601.4.15.2.14.2.62 2010/08/19 01:20:12 Exp $ + */ + + +#ifndef _wlioctl_h_ +#define _wlioctl_h_ + +#include <typedefs.h> +#include <proto/ethernet.h> +#include <proto/bcmeth.h> +#include <proto/bcmevent.h> +#include <proto/802.11.h> +#include <bcmwifi.h> + + + +#define ACTION_FRAME_SIZE 1040 + +typedef struct wl_action_frame { + struct ether_addr da; + uint16 len; + uint32 packetId; + uint8 data[ACTION_FRAME_SIZE]; +} wl_action_frame_t; + +#define WL_WIFI_ACTION_FRAME_SIZE sizeof(struct wl_action_frame) + + +#define BWL_DEFAULT_PACKING +#include <packed_section_start.h> + +#define RWL_ACTION_WIFI_CATEGORY 127 +#define RWL_WIFI_OUI_BYTE1 0x90 +#define RWL_WIFI_OUI_BYTE2 0x4C +#define RWL_WIFI_OUI_BYTE3 0x0F +#define RWL_WIFI_ACTION_FRAME_SIZE sizeof(struct dot11_action_wifi_vendor_specific) +#define RWL_WIFI_DEFAULT 0x00 +#define RWL_WIFI_FIND_MY_PEER 0x09 +#define RWL_WIFI_FOUND_PEER 0x0A +#define RWL_ACTION_WIFI_FRAG_TYPE 0x55 + +typedef struct ssid_info +{ + uint8 ssid_len; + uint8 ssid[32]; +} ssid_info_t; + +typedef struct cnt_rx +{ + uint32 cnt_rxundec; + uint32 cnt_rxframe; +} cnt_rx_t; + + + +#define RWL_REF_MAC_ADDRESS_OFFSET 17 +#define RWL_DUT_MAC_ADDRESS_OFFSET 23 +#define RWL_WIFI_CLIENT_CHANNEL_OFFSET 50 +#define RWL_WIFI_SERVER_CHANNEL_OFFSET 51 + + + + + +#define WL_BSS_INFO_VERSION 108 + + +typedef struct wl_bss_info { + uint32 version; + uint32 length; + struct ether_addr BSSID; + uint16 beacon_period; + uint16 capability; + uint8 SSID_len; + uint8 SSID[32]; + struct { + uint count; + uint8 rates[16]; + } rateset; + chanspec_t chanspec; + uint16 atim_window; + uint8 dtim_period; + int16 RSSI; + int8 phy_noise; + + uint8 n_cap; + uint32 nbss_cap; + uint8 ctl_ch; + uint32 reserved32[1]; + uint8 flags; + uint8 reserved[3]; + uint8 basic_mcs[MCSSET_LEN]; + + uint16 ie_offset; + uint32 ie_length; + + +} wl_bss_info_t; + +typedef struct wlc_ssid { + uint32 SSID_len; + uchar SSID[32]; +} wlc_ssid_t; + + +#define WL_BSSTYPE_INFRA 1 +#define WL_BSSTYPE_INDEP 0 +#define WL_BSSTYPE_ANY 2 + + +#define WL_SCANFLAGS_PASSIVE 0x01 +#define WL_SCANFLAGS_PROHIBITED 0x04 + +typedef struct wl_scan_params { + wlc_ssid_t ssid; + struct ether_addr bssid; + int8 bss_type; + int8 scan_type; + int32 nprobes; + int32 active_time; + int32 passive_time; + int32 home_time; + int32 channel_num; + uint16 channel_list[1]; +} wl_scan_params_t; + +#define WL_SCAN_PARAMS_FIXED_SIZE 64 + + +#define WL_SCAN_PARAMS_COUNT_MASK 0x0000ffff +#define WL_SCAN_PARAMS_NSSID_SHIFT 16 + +#define WL_SCAN_ACTION_START 1 +#define WL_SCAN_ACTION_CONTINUE 2 +#define WL_SCAN_ACTION_ABORT 3 + +#define ISCAN_REQ_VERSION 1 + + +typedef struct wl_iscan_params { + uint32 version; + uint16 action; + uint16 scan_duration; + wl_scan_params_t params; +} wl_iscan_params_t; + +#define WL_ISCAN_PARAMS_FIXED_SIZE (OFFSETOF(wl_iscan_params_t, params) + sizeof(wlc_ssid_t)) + +typedef struct wl_scan_results { + uint32 buflen; + uint32 version; + uint32 count; + wl_bss_info_t bss_info[1]; +} wl_scan_results_t; + +#define WL_SCAN_RESULTS_FIXED_SIZE 12 + + +#define WL_SCAN_RESULTS_SUCCESS 0 +#define WL_SCAN_RESULTS_PARTIAL 1 +#define WL_SCAN_RESULTS_PENDING 2 +#define WL_SCAN_RESULTS_ABORTED 3 +#define WL_SCAN_RESULTS_NO_MEM 4 + +#define ESCAN_REQ_VERSION 1 + +typedef struct wl_escan_params { + uint32 version; + uint16 action; + uint16 sync_id; + wl_scan_params_t params; +} wl_escan_params_t; + +#define WL_ESCAN_PARAMS_FIXED_SIZE (OFFSETOF(wl_escan_params_t, params) + sizeof(wlc_ssid_t)) + +typedef struct wl_escan_result { + uint32 buflen; + uint32 version; + uint16 sync_id; + uint16 bss_count; + wl_bss_info_t bss_info[1]; +} wl_escan_result_t; + +#define WL_ESCAN_RESULTS_FIXED_SIZE (sizeof(wl_escan_result_t) - sizeof(wl_bss_info_t)) + + +typedef struct wl_iscan_results { + uint32 status; + wl_scan_results_t results; +} wl_iscan_results_t; + +#define WL_ISCAN_RESULTS_FIXED_SIZE \ + (WL_SCAN_RESULTS_FIXED_SIZE + OFFSETOF(wl_iscan_results_t, results)) + +#define WL_NUMRATES 16 +typedef struct wl_rateset { + uint32 count; + uint8 rates[WL_NUMRATES]; +} wl_rateset_t; + + +typedef struct wl_uint32_list { + + uint32 count; + + uint32 element[1]; +} wl_uint32_list_t; + + +typedef struct wl_assoc_params { + struct ether_addr bssid; + uint16 bssid_cnt; + int32 chanspec_num; + chanspec_t chanspec_list[1]; +} wl_assoc_params_t; +#define WL_ASSOC_PARAMS_FIXED_SIZE (sizeof(wl_assoc_params_t) - sizeof(chanspec_t)) + + +typedef wl_assoc_params_t wl_reassoc_params_t; +#define WL_REASSOC_PARAMS_FIXED_SIZE WL_ASSOC_PARAMS_FIXED_SIZE + + +typedef struct wl_join_params { + wlc_ssid_t ssid; + wl_assoc_params_t params; +} wl_join_params_t; +#define WL_JOIN_PARAMS_FIXED_SIZE (sizeof(wl_join_params_t) - sizeof(chanspec_t)) + +#define WLC_CNTRY_BUF_SZ 4 + + +typedef enum sup_auth_status { + + WLC_SUP_DISCONNECTED = 0, + WLC_SUP_CONNECTING, + WLC_SUP_IDREQUIRED, + WLC_SUP_AUTHENTICATING, + WLC_SUP_AUTHENTICATED, + WLC_SUP_KEYXCHANGE, + WLC_SUP_KEYED, + WLC_SUP_TIMEOUT, + WLC_SUP_LAST_BASIC_STATE, + + + WLC_SUP_KEYXCHANGE_WAIT_M1 = WLC_SUP_AUTHENTICATED, + + WLC_SUP_KEYXCHANGE_PREP_M2 = WLC_SUP_KEYXCHANGE, + + WLC_SUP_KEYXCHANGE_WAIT_M3 = WLC_SUP_LAST_BASIC_STATE, + + WLC_SUP_KEYXCHANGE_PREP_M4, + WLC_SUP_KEYXCHANGE_WAIT_G1, + WLC_SUP_KEYXCHANGE_PREP_G2 +} sup_auth_status_t; + + +#define CRYPTO_ALGO_OFF 0 +#define CRYPTO_ALGO_WEP1 1 +#define CRYPTO_ALGO_TKIP 2 +#define CRYPTO_ALGO_WEP128 3 +#define CRYPTO_ALGO_AES_CCM 4 +#define CRYPTO_ALGO_AES_OCB_MSDU 5 +#define CRYPTO_ALGO_AES_OCB_MPDU 6 +#define CRYPTO_ALGO_NALG 7 +#define CRYPTO_ALGO_SMS4 11 + +#define WSEC_GEN_MIC_ERROR 0x0001 +#define WSEC_GEN_REPLAY 0x0002 +#define WSEC_GEN_ICV_ERROR 0x0004 + +#define WL_SOFT_KEY (1 << 0) +#define WL_PRIMARY_KEY (1 << 1) +#define WL_KF_RES_4 (1 << 4) +#define WL_KF_RES_5 (1 << 5) +#define WL_IBSS_PEER_GROUP_KEY (1 << 6) + +typedef struct wl_wsec_key { + uint32 index; + uint32 len; + uint8 data[DOT11_MAX_KEY_SIZE]; + uint32 pad_1[18]; + uint32 algo; + uint32 flags; + uint32 pad_2[2]; + int pad_3; + int iv_initialized; + int pad_4; + + struct { + uint32 hi; + uint16 lo; + } rxiv; + uint32 pad_5[2]; + struct ether_addr ea; +} wl_wsec_key_t; + +#define WSEC_MIN_PSK_LEN 8 +#define WSEC_MAX_PSK_LEN 64 + + +#define WSEC_PASSPHRASE (1<<0) + + +typedef struct { + ushort key_len; + ushort flags; + uint8 key[WSEC_MAX_PSK_LEN]; +} wsec_pmk_t; + + +#define WEP_ENABLED 0x0001 +#define TKIP_ENABLED 0x0002 +#define AES_ENABLED 0x0004 +#define WSEC_SWFLAG 0x0008 +#define SES_OW_ENABLED 0x0040 +#define SMS4_ENABLED 0x0100 + + +#define WPA_AUTH_DISABLED 0x0000 +#define WPA_AUTH_NONE 0x0001 +#define WPA_AUTH_UNSPECIFIED 0x0002 +#define WPA_AUTH_PSK 0x0004 + +#define WPA2_AUTH_UNSPECIFIED 0x0040 +#define WPA2_AUTH_PSK 0x0080 +#define BRCM_AUTH_PSK 0x0100 +#define BRCM_AUTH_DPT 0x0200 +#define WPA_AUTH_WAPI 0x0400 + +#define WPA_AUTH_PFN_ANY 0xffffffff + + +#define MAXPMKID 16 + +typedef struct _pmkid { + struct ether_addr BSSID; + uint8 PMKID[WPA2_PMKID_LEN]; +} pmkid_t; + +typedef struct _pmkid_list { + uint32 npmkid; + pmkid_t pmkid[1]; +} pmkid_list_t; + +typedef struct _pmkid_cand { + struct ether_addr BSSID; + uint8 preauth; +} pmkid_cand_t; + +typedef struct _pmkid_cand_list { + uint32 npmkid_cand; + pmkid_cand_t pmkid_cand[1]; +} pmkid_cand_list_t; + + + + +typedef struct { + uint32 val; + struct ether_addr ea; +} scb_val_t; + + + +typedef struct channel_info { + int hw_channel; + int target_channel; + int scan_channel; +} channel_info_t; + + +struct maclist { + uint count; + struct ether_addr ea[1]; +}; + + +typedef struct get_pktcnt { + uint rx_good_pkt; + uint rx_bad_pkt; + uint tx_good_pkt; + uint tx_bad_pkt; + uint rx_ocast_good_pkt; +} get_pktcnt_t; + + +typedef struct wl_ioctl { + uint cmd; + void *buf; + uint len; + uint8 set; + uint used; + uint needed; +} wl_ioctl_t; + + + +#define WLC_IOCTL_MAGIC 0x14e46c77 + + +#define WLC_IOCTL_VERSION 1 + +#define WLC_IOCTL_MAXLEN 8192 +#define WLC_IOCTL_SMLEN 256 +#define WLC_IOCTL_MEDLEN 1536 + + + +#define WLC_GET_MAGIC 0 +#define WLC_GET_VERSION 1 +#define WLC_UP 2 +#define WLC_DOWN 3 +#define WLC_GET_LOOP 4 +#define WLC_SET_LOOP 5 +#define WLC_DUMP 6 +#define WLC_GET_MSGLEVEL 7 +#define WLC_SET_MSGLEVEL 8 +#define WLC_GET_PROMISC 9 +#define WLC_SET_PROMISC 10 + +#define WLC_GET_RATE 12 + +#define WLC_GET_INSTANCE 14 + + + + +#define WLC_GET_INFRA 19 +#define WLC_SET_INFRA 20 +#define WLC_GET_AUTH 21 +#define WLC_SET_AUTH 22 +#define WLC_GET_BSSID 23 +#define WLC_SET_BSSID 24 +#define WLC_GET_SSID 25 +#define WLC_SET_SSID 26 +#define WLC_RESTART 27 + +#define WLC_GET_CHANNEL 29 +#define WLC_SET_CHANNEL 30 +#define WLC_GET_SRL 31 +#define WLC_SET_SRL 32 +#define WLC_GET_LRL 33 +#define WLC_SET_LRL 34 +#define WLC_GET_PLCPHDR 35 +#define WLC_SET_PLCPHDR 36 +#define WLC_GET_RADIO 37 +#define WLC_SET_RADIO 38 +#define WLC_GET_PHYTYPE 39 +#define WLC_DUMP_RATE 40 +#define WLC_SET_RATE_PARAMS 41 + + +#define WLC_GET_KEY 44 +#define WLC_SET_KEY 45 +#define WLC_GET_REGULATORY 46 +#define WLC_SET_REGULATORY 47 +#define WLC_GET_PASSIVE_SCAN 48 +#define WLC_SET_PASSIVE_SCAN 49 +#define WLC_SCAN 50 +#define WLC_SCAN_RESULTS 51 +#define WLC_DISASSOC 52 +#define WLC_REASSOC 53 +#define WLC_GET_ROAM_TRIGGER 54 +#define WLC_SET_ROAM_TRIGGER 55 +#define WLC_GET_ROAM_DELTA 56 +#define WLC_SET_ROAM_DELTA 57 +#define WLC_GET_ROAM_SCAN_PERIOD 58 +#define WLC_SET_ROAM_SCAN_PERIOD 59 +#define WLC_EVM 60 +#define WLC_GET_TXANT 61 +#define WLC_SET_TXANT 62 +#define WLC_GET_ANTDIV 63 +#define WLC_SET_ANTDIV 64 + + +#define WLC_GET_CLOSED 67 +#define WLC_SET_CLOSED 68 +#define WLC_GET_MACLIST 69 +#define WLC_SET_MACLIST 70 +#define WLC_GET_RATESET 71 +#define WLC_SET_RATESET 72 + +#define WLC_LONGTRAIN 74 +#define WLC_GET_BCNPRD 75 +#define WLC_SET_BCNPRD 76 +#define WLC_GET_DTIMPRD 77 +#define WLC_SET_DTIMPRD 78 +#define WLC_GET_SROM 79 +#define WLC_SET_SROM 80 +#define WLC_GET_WEP_RESTRICT 81 +#define WLC_SET_WEP_RESTRICT 82 +#define WLC_GET_COUNTRY 83 +#define WLC_SET_COUNTRY 84 +#define WLC_GET_PM 85 +#define WLC_SET_PM 86 +#define WLC_GET_WAKE 87 +#define WLC_SET_WAKE 88 + +#define WLC_GET_FORCELINK 90 +#define WLC_SET_FORCELINK 91 +#define WLC_FREQ_ACCURACY 92 +#define WLC_CARRIER_SUPPRESS 93 +#define WLC_GET_PHYREG 94 +#define WLC_SET_PHYREG 95 +#define WLC_GET_RADIOREG 96 +#define WLC_SET_RADIOREG 97 +#define WLC_GET_REVINFO 98 +#define WLC_GET_UCANTDIV 99 +#define WLC_SET_UCANTDIV 100 +#define WLC_R_REG 101 +#define WLC_W_REG 102 + + +#define WLC_GET_MACMODE 105 +#define WLC_SET_MACMODE 106 +#define WLC_GET_MONITOR 107 +#define WLC_SET_MONITOR 108 +#define WLC_GET_GMODE 109 +#define WLC_SET_GMODE 110 +#define WLC_GET_LEGACY_ERP 111 +#define WLC_SET_LEGACY_ERP 112 +#define WLC_GET_RX_ANT 113 +#define WLC_GET_CURR_RATESET 114 +#define WLC_GET_SCANSUPPRESS 115 +#define WLC_SET_SCANSUPPRESS 116 +#define WLC_GET_AP 117 +#define WLC_SET_AP 118 +#define WLC_GET_EAP_RESTRICT 119 +#define WLC_SET_EAP_RESTRICT 120 +#define WLC_SCB_AUTHORIZE 121 +#define WLC_SCB_DEAUTHORIZE 122 +#define WLC_GET_WDSLIST 123 +#define WLC_SET_WDSLIST 124 +#define WLC_GET_ATIM 125 +#define WLC_SET_ATIM 126 +#define WLC_GET_RSSI 127 +#define WLC_GET_PHYANTDIV 128 +#define WLC_SET_PHYANTDIV 129 +#define WLC_AP_RX_ONLY 130 +#define WLC_GET_TX_PATH_PWR 131 +#define WLC_SET_TX_PATH_PWR 132 +#define WLC_GET_WSEC 133 +#define WLC_SET_WSEC 134 +#define WLC_GET_PHY_NOISE 135 +#define WLC_GET_BSS_INFO 136 +#define WLC_GET_PKTCNTS 137 +#define WLC_GET_LAZYWDS 138 +#define WLC_SET_LAZYWDS 139 +#define WLC_GET_BANDLIST 140 +#define WLC_GET_BAND 141 +#define WLC_SET_BAND 142 +#define WLC_SCB_DEAUTHENTICATE 143 +#define WLC_GET_SHORTSLOT 144 +#define WLC_GET_SHORTSLOT_OVERRIDE 145 +#define WLC_SET_SHORTSLOT_OVERRIDE 146 +#define WLC_GET_SHORTSLOT_RESTRICT 147 +#define WLC_SET_SHORTSLOT_RESTRICT 148 +#define WLC_GET_GMODE_PROTECTION 149 +#define WLC_GET_GMODE_PROTECTION_OVERRIDE 150 +#define WLC_SET_GMODE_PROTECTION_OVERRIDE 151 +#define WLC_UPGRADE 152 + + +#define WLC_GET_IGNORE_BCNS 155 +#define WLC_SET_IGNORE_BCNS 156 +#define WLC_GET_SCB_TIMEOUT 157 +#define WLC_SET_SCB_TIMEOUT 158 +#define WLC_GET_ASSOCLIST 159 +#define WLC_GET_CLK 160 +#define WLC_SET_CLK 161 +#define WLC_GET_UP 162 +#define WLC_OUT 163 +#define WLC_GET_WPA_AUTH 164 +#define WLC_SET_WPA_AUTH 165 +#define WLC_GET_UCFLAGS 166 +#define WLC_SET_UCFLAGS 167 +#define WLC_GET_PWRIDX 168 +#define WLC_SET_PWRIDX 169 +#define WLC_GET_TSSI 170 +#define WLC_GET_SUP_RATESET_OVERRIDE 171 +#define WLC_SET_SUP_RATESET_OVERRIDE 172 + + + + + +#define WLC_GET_PROTECTION_CONTROL 178 +#define WLC_SET_PROTECTION_CONTROL 179 +#define WLC_GET_PHYLIST 180 +#define WLC_ENCRYPT_STRENGTH 181 +#define WLC_DECRYPT_STATUS 182 +#define WLC_GET_KEY_SEQ 183 +#define WLC_GET_SCAN_CHANNEL_TIME 184 +#define WLC_SET_SCAN_CHANNEL_TIME 185 +#define WLC_GET_SCAN_UNASSOC_TIME 186 +#define WLC_SET_SCAN_UNASSOC_TIME 187 +#define WLC_GET_SCAN_HOME_TIME 188 +#define WLC_SET_SCAN_HOME_TIME 189 +#define WLC_GET_SCAN_NPROBES 190 +#define WLC_SET_SCAN_NPROBES 191 +#define WLC_GET_PRB_RESP_TIMEOUT 192 +#define WLC_SET_PRB_RESP_TIMEOUT 193 +#define WLC_GET_ATTEN 194 +#define WLC_SET_ATTEN 195 +#define WLC_GET_SHMEM 196 +#define WLC_SET_SHMEM 197 + + +#define WLC_SET_WSEC_TEST 200 +#define WLC_SCB_DEAUTHENTICATE_FOR_REASON 201 +#define WLC_TKIP_COUNTERMEASURES 202 +#define WLC_GET_PIOMODE 203 +#define WLC_SET_PIOMODE 204 +#define WLC_SET_ASSOC_PREFER 205 +#define WLC_GET_ASSOC_PREFER 206 +#define WLC_SET_ROAM_PREFER 207 +#define WLC_GET_ROAM_PREFER 208 +#define WLC_SET_LED 209 +#define WLC_GET_LED 210 +#define WLC_GET_INTERFERENCE_MODE 211 +#define WLC_SET_INTERFERENCE_MODE 212 +#define WLC_GET_CHANNEL_QA 213 +#define WLC_START_CHANNEL_QA 214 +#define WLC_GET_CHANNEL_SEL 215 +#define WLC_START_CHANNEL_SEL 216 +#define WLC_GET_VALID_CHANNELS 217 +#define WLC_GET_FAKEFRAG 218 +#define WLC_SET_FAKEFRAG 219 +#define WLC_GET_PWROUT_PERCENTAGE 220 +#define WLC_SET_PWROUT_PERCENTAGE 221 +#define WLC_SET_BAD_FRAME_PREEMPT 222 +#define WLC_GET_BAD_FRAME_PREEMPT 223 +#define WLC_SET_LEAP_LIST 224 +#define WLC_GET_LEAP_LIST 225 +#define WLC_GET_CWMIN 226 +#define WLC_SET_CWMIN 227 +#define WLC_GET_CWMAX 228 +#define WLC_SET_CWMAX 229 +#define WLC_GET_WET 230 +#define WLC_SET_WET 231 +#define WLC_GET_PUB 232 + + +#define WLC_GET_KEY_PRIMARY 235 +#define WLC_SET_KEY_PRIMARY 236 + +#define WLC_GET_ACI_ARGS 238 +#define WLC_SET_ACI_ARGS 239 +#define WLC_UNSET_CALLBACK 240 +#define WLC_SET_CALLBACK 241 +#define WLC_GET_RADAR 242 +#define WLC_SET_RADAR 243 +#define WLC_SET_SPECT_MANAGMENT 244 +#define WLC_GET_SPECT_MANAGMENT 245 +#define WLC_WDS_GET_REMOTE_HWADDR 246 +#define WLC_WDS_GET_WPA_SUP 247 +#define WLC_SET_CS_SCAN_TIMER 248 +#define WLC_GET_CS_SCAN_TIMER 249 +#define WLC_MEASURE_REQUEST 250 +#define WLC_INIT 251 +#define WLC_SEND_QUIET 252 +#define WLC_KEEPALIVE 253 +#define WLC_SEND_PWR_CONSTRAINT 254 +#define WLC_UPGRADE_STATUS 255 +#define WLC_CURRENT_PWR 256 +#define WLC_GET_SCAN_PASSIVE_TIME 257 +#define WLC_SET_SCAN_PASSIVE_TIME 258 +#define WLC_LEGACY_LINK_BEHAVIOR 259 +#define WLC_GET_CHANNELS_IN_COUNTRY 260 +#define WLC_GET_COUNTRY_LIST 261 +#define WLC_GET_VAR 262 +#define WLC_SET_VAR 263 +#define WLC_NVRAM_GET 264 +#define WLC_NVRAM_SET 265 +#define WLC_NVRAM_DUMP 266 +#define WLC_REBOOT 267 +#define WLC_SET_WSEC_PMK 268 +#define WLC_GET_AUTH_MODE 269 +#define WLC_SET_AUTH_MODE 270 +#define WLC_GET_WAKEENTRY 271 +#define WLC_SET_WAKEENTRY 272 +#define WLC_NDCONFIG_ITEM 273 +#define WLC_NVOTPW 274 +#define WLC_OTPW 275 +#define WLC_IOV_BLOCK_GET 276 +#define WLC_IOV_MODULES_GET 277 +#define WLC_SOFT_RESET 278 +#define WLC_GET_ALLOW_MODE 279 +#define WLC_SET_ALLOW_MODE 280 +#define WLC_GET_DESIRED_BSSID 281 +#define WLC_SET_DESIRED_BSSID 282 +#define WLC_DISASSOC_MYAP 283 +#define WLC_GET_NBANDS 284 +#define WLC_GET_BANDSTATES 285 +#define WLC_GET_WLC_BSS_INFO 286 +#define WLC_GET_ASSOC_INFO 287 +#define WLC_GET_OID_PHY 288 +#define WLC_SET_OID_PHY 289 +#define WLC_SET_ASSOC_TIME 290 +#define WLC_GET_DESIRED_SSID 291 +#define WLC_GET_CHANSPEC 292 +#define WLC_GET_ASSOC_STATE 293 +#define WLC_SET_PHY_STATE 294 +#define WLC_GET_SCAN_PENDING 295 +#define WLC_GET_SCANREQ_PENDING 296 +#define WLC_GET_PREV_ROAM_REASON 297 +#define WLC_SET_PREV_ROAM_REASON 298 +#define WLC_GET_BANDSTATES_PI 299 +#define WLC_GET_PHY_STATE 300 +#define WLC_GET_BSS_WPA_RSN 301 +#define WLC_GET_BSS_WPA2_RSN 302 +#define WLC_GET_BSS_BCN_TS 303 +#define WLC_GET_INT_DISASSOC 304 +#define WLC_SET_NUM_PEERS 305 +#define WLC_GET_NUM_BSS 306 +#define WLC_LAST 307 + + + +#define WL_RADIO_SW_DISABLE (1<<0) +#define WL_RADIO_HW_DISABLE (1<<1) +#define WL_RADIO_MPC_DISABLE (1<<2) +#define WL_RADIO_COUNTRY_DISABLE (1<<3) + + +#define WL_TXPWR_OVERRIDE (1U<<31) + +#define WL_PHY_PAVARS_LEN 6 + + +#define WL_DIAG_INTERRUPT 1 +#define WL_DIAG_LOOPBACK 2 +#define WL_DIAG_MEMORY 3 +#define WL_DIAG_LED 4 +#define WL_DIAG_REG 5 +#define WL_DIAG_SROM 6 +#define WL_DIAG_DMA 7 + +#define WL_DIAGERR_SUCCESS 0 +#define WL_DIAGERR_FAIL_TO_RUN 1 +#define WL_DIAGERR_NOT_SUPPORTED 2 +#define WL_DIAGERR_INTERRUPT_FAIL 3 +#define WL_DIAGERR_LOOPBACK_FAIL 4 +#define WL_DIAGERR_SROM_FAIL 5 +#define WL_DIAGERR_SROM_BADCRC 6 +#define WL_DIAGERR_REG_FAIL 7 +#define WL_DIAGERR_MEMORY_FAIL 8 +#define WL_DIAGERR_NOMEM 9 +#define WL_DIAGERR_DMA_FAIL 10 + +#define WL_DIAGERR_MEMORY_TIMEOUT 11 +#define WL_DIAGERR_MEMORY_BADPATTERN 12 + + +#define WLC_BAND_AUTO 0 +#define WLC_BAND_5G 1 +#define WLC_BAND_2G 2 +#define WLC_BAND_ALL 3 + + +#define WL_CHAN_FREQ_RANGE_2G 0 +#define WL_CHAN_FREQ_RANGE_5GL 1 +#define WL_CHAN_FREQ_RANGE_5GM 2 +#define WL_CHAN_FREQ_RANGE_5GH 3 + + +#define WLC_PHY_TYPE_A 0 +#define WLC_PHY_TYPE_B 1 +#define WLC_PHY_TYPE_G 2 +#define WLC_PHY_TYPE_N 4 +#define WLC_PHY_TYPE_LP 5 +#define WLC_PHY_TYPE_SSN 6 +#define WLC_PHY_TYPE_NULL 0xf + + +#define WLC_MACMODE_DISABLED 0 +#define WLC_MACMODE_DENY 1 +#define WLC_MACMODE_ALLOW 2 + + +#define GMODE_LEGACY_B 0 +#define GMODE_AUTO 1 +#define GMODE_ONLY 2 +#define GMODE_B_DEFERRED 3 +#define GMODE_PERFORMANCE 4 +#define GMODE_LRS 5 +#define GMODE_MAX 6 + + +#define WLC_PLCP_AUTO -1 +#define WLC_PLCP_SHORT 0 +#define WLC_PLCP_LONG 1 + + +#define WLC_PROTECTION_AUTO -1 +#define WLC_PROTECTION_OFF 0 +#define WLC_PROTECTION_ON 1 +#define WLC_PROTECTION_MMHDR_ONLY 2 +#define WLC_PROTECTION_CTS_ONLY 3 + + +#define WLC_PROTECTION_CTL_OFF 0 +#define WLC_PROTECTION_CTL_LOCAL 1 +#define WLC_PROTECTION_CTL_OVERLAP 2 + + +#define WLC_N_PROTECTION_OFF 0 +#define WLC_N_PROTECTION_OPTIONAL 1 +#define WLC_N_PROTECTION_20IN40 2 +#define WLC_N_PROTECTION_MIXEDMODE 3 + + +#define WLC_N_PREAMBLE_MIXEDMODE 0 +#define WLC_N_PREAMBLE_GF 1 + + +#define WLC_N_BW_20ALL 0 +#define WLC_N_BW_40ALL 1 +#define WLC_N_BW_20IN2G_40IN5G 2 + + +#define WLC_N_TXRX_CHAIN0 0 +#define WLC_N_TXRX_CHAIN1 1 + + +#define WLC_N_SGI_20 0x01 +#define WLC_N_SGI_40 0x02 + + +#define PM_OFF 0 +#define PM_MAX 1 +#define PM_FAST 2 + + +#define INTERFERE_NONE 0 +#define NON_WLAN 1 +#define WLAN_MANUAL 2 +#define WLAN_AUTO 3 +#define AUTO_ACTIVE (1 << 7) + +typedef struct wl_aci_args { + int enter_aci_thresh; + int exit_aci_thresh; + int usec_spin; + int glitch_delay; + uint16 nphy_adcpwr_enter_thresh; + uint16 nphy_adcpwr_exit_thresh; + uint16 nphy_repeat_ctr; + uint16 nphy_num_samples; + uint16 nphy_undetect_window_sz; + uint16 nphy_b_energy_lo_aci; + uint16 nphy_b_energy_md_aci; + uint16 nphy_b_energy_hi_aci; +} wl_aci_args_t; + +#define WL_ACI_ARGS_LEGACY_LENGTH 16 + + + +#define WL_ERROR_VAL 0x00000001 +#define WL_TRACE_VAL 0x00000002 +#define WL_PRHDRS_VAL 0x00000004 +#define WL_PRPKT_VAL 0x00000008 +#define WL_INFORM_VAL 0x00000010 +#define WL_TMP_VAL 0x00000020 +#define WL_OID_VAL 0x00000040 +#define WL_RATE_VAL 0x00000080 +#define WL_ASSOC_VAL 0x00000100 +#define WL_PRUSR_VAL 0x00000200 +#define WL_PS_VAL 0x00000400 +#define WL_TXPWR_VAL 0x00000800 +#define WL_PORT_VAL 0x00001000 +#define WL_DUAL_VAL 0x00002000 +#define WL_WSEC_VAL 0x00004000 +#define WL_WSEC_DUMP_VAL 0x00008000 +#define WL_LOG_VAL 0x00010000 +#define WL_NRSSI_VAL 0x00020000 +#define WL_LOFT_VAL 0x00040000 +#define WL_REGULATORY_VAL 0x00080000 +#define WL_PHYCAL_VAL 0x00100000 +#define WL_RADAR_VAL 0x00200000 +#define WL_MPC_VAL 0x00400000 +#define WL_APSTA_VAL 0x00800000 +#define WL_DFS_VAL 0x01000000 +#define WL_BA_VAL 0x02000000 +#define WL_MBSS_VAL 0x04000000 +#define WL_CAC_VAL 0x08000000 +#define WL_AMSDU_VAL 0x10000000 +#define WL_AMPDU_VAL 0x20000000 +#define WL_FFPLD_VAL 0x40000000 + + +#define WL_DPT_VAL 0x00000001 +#define WL_SCAN_VAL 0x00000002 +#define WL_WOWL_VAL 0x00000004 +#define WL_COEX_VAL 0x00000008 +#define WL_RTDC_VAL 0x00000010 +#define WL_BTA_VAL 0x00000040 + + +#define WL_LED_NUMGPIO 16 + + +#define WL_LED_OFF 0 +#define WL_LED_ON 1 +#define WL_LED_ACTIVITY 2 +#define WL_LED_RADIO 3 +#define WL_LED_ARADIO 4 +#define WL_LED_BRADIO 5 +#define WL_LED_BGMODE 6 +#define WL_LED_WI1 7 +#define WL_LED_WI2 8 +#define WL_LED_WI3 9 +#define WL_LED_ASSOC 10 +#define WL_LED_INACTIVE 11 +#define WL_LED_ASSOCACT 12 +#define WL_LED_NUMBEHAVIOR 13 + + +#define WL_LED_BEH_MASK 0x7f +#define WL_LED_AL_MASK 0x80 + + +#define WL_NUMCHANNELS 64 +#define WL_NUMCHANSPECS 100 + + +#define WL_WDS_WPA_ROLE_AUTH 0 +#define WL_WDS_WPA_ROLE_SUP 1 +#define WL_WDS_WPA_ROLE_AUTO 255 + + +#define WL_EVENTING_MASK_LEN 16 + + +#define VNDR_IE_CMD_LEN 4 + + +#define VNDR_IE_BEACON_FLAG 0x1 +#define VNDR_IE_PRBRSP_FLAG 0x2 +#define VNDR_IE_ASSOCRSP_FLAG 0x4 +#define VNDR_IE_AUTHRSP_FLAG 0x8 +#define VNDR_IE_PRBREQ_FLAG 0x10 +#define VNDR_IE_ASSOCREQ_FLAG 0x20 +#define VNDR_IE_CUSTOM_FLAG 0x100 + +#define VNDR_IE_INFO_HDR_LEN (sizeof(uint32)) + +typedef struct { + uint32 pktflag; + vndr_ie_t vndr_ie_data; +} vndr_ie_info_t; + +typedef struct { + int iecount; + vndr_ie_info_t vndr_ie_list[1]; +} vndr_ie_buf_t; + +typedef struct { + char cmd[VNDR_IE_CMD_LEN]; + vndr_ie_buf_t vndr_ie_buffer; +} vndr_ie_setbuf_t; + + + + +#define WL_JOIN_PREF_RSSI 1 +#define WL_JOIN_PREF_WPA 2 +#define WL_JOIN_PREF_BAND 3 + + +#define WLJP_BAND_ASSOC_PREF 255 + + +#define WL_WPA_ACP_MCS_ANY "\x00\x00\x00\x00" + +struct tsinfo_arg { + uint8 octets[3]; +}; + + +#define NFIFO 6 + +#define WL_CNT_T_VERSION 5 +#define WL_CNT_EXT_T_VERSION 1 + +typedef struct { + uint16 version; + uint16 length; + + + uint32 txframe; + uint32 txbyte; + uint32 txretrans; + uint32 txerror; + uint32 txctl; + uint32 txprshort; + uint32 txserr; + uint32 txnobuf; + uint32 txnoassoc; + uint32 txrunt; + uint32 txchit; + uint32 txcmiss; + + + uint32 txuflo; + uint32 txphyerr; + uint32 txphycrs; + + + uint32 rxframe; + uint32 rxbyte; + uint32 rxerror; + uint32 rxctl; + uint32 rxnobuf; + uint32 rxnondata; + uint32 rxbadds; + uint32 rxbadcm; + uint32 rxfragerr; + uint32 rxrunt; + uint32 rxgiant; + uint32 rxnoscb; + uint32 rxbadproto; + uint32 rxbadsrcmac; + uint32 rxbadda; + uint32 rxfilter; + + + uint32 rxoflo; + uint32 rxuflo[NFIFO]; + + uint32 d11cnt_txrts_off; + uint32 d11cnt_rxcrc_off; + uint32 d11cnt_txnocts_off; + + + uint32 dmade; + uint32 dmada; + uint32 dmape; + uint32 reset; + uint32 tbtt; + uint32 txdmawar; + uint32 pkt_callback_reg_fail; + + + uint32 txallfrm; + uint32 txrtsfrm; + uint32 txctsfrm; + uint32 txackfrm; + uint32 txdnlfrm; + uint32 txbcnfrm; + uint32 txfunfl[8]; + uint32 txtplunfl; + uint32 txphyerror; + uint32 rxfrmtoolong; + uint32 rxfrmtooshrt; + uint32 rxinvmachdr; + uint32 rxbadfcs; + uint32 rxbadplcp; + uint32 rxcrsglitch; + uint32 rxstrt; + uint32 rxdfrmucastmbss; + uint32 rxmfrmucastmbss; + uint32 rxcfrmucast; + uint32 rxrtsucast; + uint32 rxctsucast; + uint32 rxackucast; + uint32 rxdfrmocast; + uint32 rxmfrmocast; + uint32 rxcfrmocast; + uint32 rxrtsocast; + uint32 rxctsocast; + uint32 rxdfrmmcast; + uint32 rxmfrmmcast; + uint32 rxcfrmmcast; + uint32 rxbeaconmbss; + uint32 rxdfrmucastobss; + uint32 rxbeaconobss; + uint32 rxrsptmout; + uint32 bcntxcancl; + uint32 rxf0ovfl; + uint32 rxf1ovfl; + uint32 rxf2ovfl; + uint32 txsfovfl; + uint32 pmqovfl; + uint32 rxcgprqfrm; + uint32 rxcgprsqovfl; + uint32 txcgprsfail; + uint32 txcgprssuc; + uint32 prs_timeout; + uint32 rxnack; + uint32 frmscons; + uint32 txnack; + uint32 txglitch_nack; + uint32 txburst; + + + uint32 txfrag; + uint32 txmulti; + uint32 txfail; + uint32 txretry; + uint32 txretrie; + uint32 rxdup; + uint32 txrts; + uint32 txnocts; + uint32 txnoack; + uint32 rxfrag; + uint32 rxmulti; + uint32 rxcrc; + uint32 txfrmsnt; + uint32 rxundec; + + + uint32 tkipmicfaill; + uint32 tkipcntrmsr; + uint32 tkipreplay; + uint32 ccmpfmterr; + uint32 ccmpreplay; + uint32 ccmpundec; + uint32 fourwayfail; + uint32 wepundec; + uint32 wepicverr; + uint32 decsuccess; + uint32 tkipicverr; + uint32 wepexcluded; + + uint32 txchanrej; + uint32 psmwds; + uint32 phywatchdog; + + + uint32 prq_entries_handled; + uint32 prq_undirected_entries; + uint32 prq_bad_entries; + uint32 atim_suppress_count; + uint32 bcn_template_not_ready; + uint32 bcn_template_not_ready_done; + uint32 late_tbtt_dpc; + + + uint32 rx1mbps; + uint32 rx2mbps; + uint32 rx5mbps5; + uint32 rx6mbps; + uint32 rx9mbps; + uint32 rx11mbps; + uint32 rx12mbps; + uint32 rx18mbps; + uint32 rx24mbps; + uint32 rx36mbps; + uint32 rx48mbps; + uint32 rx54mbps; + uint32 rx108mbps; + uint32 rx162mbps; + uint32 rx216mbps; + uint32 rx270mbps; + uint32 rx324mbps; + uint32 rx378mbps; + uint32 rx432mbps; + uint32 rx486mbps; + uint32 rx540mbps; + + uint32 pktengrxducast; + uint32 pktengrxdmcast; +} wl_cnt_t; + +typedef struct { + uint16 version; + uint16 length; + + uint32 rxampdu_sgi; + uint32 rxampdu_stbc; + uint32 rxmpdu_sgi; + uint32 rxmpdu_stbc; + uint32 rxmcs0_40M; + uint32 rxmcs1_40M; + uint32 rxmcs2_40M; + uint32 rxmcs3_40M; + uint32 rxmcs4_40M; + uint32 rxmcs5_40M; + uint32 rxmcs6_40M; + uint32 rxmcs7_40M; + uint32 rxmcs32_40M; + + uint32 txfrmsnt_20Mlo; + uint32 txfrmsnt_20Mup; + uint32 txfrmsnt_40M; + + uint32 rx_20ul; +} wl_cnt_ext_t; + +#define WL_RXDIV_STATS_T_VERSION 1 +typedef struct { + uint16 version; + uint16 length; + + uint32 rxant[4]; +} wl_rxdiv_stats_t; + +#define WL_DELTA_STATS_T_VERSION 1 + +typedef struct { + uint16 version; + uint16 length; + + + uint32 txframe; + uint32 txbyte; + uint32 txretrans; + uint32 txfail; + + + uint32 rxframe; + uint32 rxbyte; + + + uint32 rx1mbps; + uint32 rx2mbps; + uint32 rx5mbps5; + uint32 rx6mbps; + uint32 rx9mbps; + uint32 rx11mbps; + uint32 rx12mbps; + uint32 rx18mbps; + uint32 rx24mbps; + uint32 rx36mbps; + uint32 rx48mbps; + uint32 rx54mbps; + uint32 rx108mbps; + uint32 rx162mbps; + uint32 rx216mbps; + uint32 rx270mbps; + uint32 rx324mbps; + uint32 rx378mbps; + uint32 rx432mbps; + uint32 rx486mbps; + uint32 rx540mbps; +} wl_delta_stats_t; + +#define WL_WME_CNT_VERSION 1 + +typedef struct { + uint32 packets; + uint32 bytes; +} wl_traffic_stats_t; + +typedef struct { + uint16 version; + uint16 length; + + wl_traffic_stats_t tx[AC_COUNT]; + wl_traffic_stats_t tx_failed[AC_COUNT]; + wl_traffic_stats_t rx[AC_COUNT]; + wl_traffic_stats_t rx_failed[AC_COUNT]; + + wl_traffic_stats_t forward[AC_COUNT]; + + wl_traffic_stats_t tx_expired[AC_COUNT]; + +} wl_wme_cnt_t; + + + +#define WLC_ROAM_TRIGGER_DEFAULT 0 +#define WLC_ROAM_TRIGGER_BANDWIDTH 1 +#define WLC_ROAM_TRIGGER_DISTANCE 2 +#define WLC_ROAM_TRIGGER_MAX_VALUE 2 + + +enum { + PFN_LIST_ORDER, + PFN_RSSI +}; + +enum { + DISABLE, + ENABLE +}; + +#define SORT_CRITERIA_BIT 0 +#define AUTO_NET_SWITCH_BIT 1 +#define ENABLE_BKGRD_SCAN_BIT 2 +#define IMMEDIATE_SCAN_BIT 3 +#define AUTO_CONNECT_BIT 4 + +#define SORT_CRITERIA_MASK 0x01 +#define AUTO_NET_SWITCH_MASK 0x02 +#define ENABLE_BKGRD_SCAN_MASK 0x04 +#define IMMEDIATE_SCAN_MASK 0x08 +#define AUTO_CONNECT_MASK 0x10 + +#define PFN_VERSION 1 + +#define MAX_PFN_LIST_COUNT 16 + + +typedef struct wl_pfn_param { + int32 version; + int32 scan_freq; + int32 lost_network_timeout; + int16 flags; + int16 rssi_margin; +} wl_pfn_param_t; + +typedef struct wl_pfn { + wlc_ssid_t ssid; + int32 bss_type; + int32 infra; + int32 auth; + uint32 wpa_auth; + int32 wsec; +#ifdef WLPFN_AUTO_CONNECT + union { + wl_wsec_key_t sec_key; + wsec_pmk_t wpa_sec_key; + } pfn_security; +#endif +} wl_pfn_t; + + +#define TOE_TX_CSUM_OL 0x00000001 +#define TOE_RX_CSUM_OL 0x00000002 + + +#define TOE_ERRTEST_TX_CSUM 0x00000001 +#define TOE_ERRTEST_RX_CSUM 0x00000002 +#define TOE_ERRTEST_RX_CSUM2 0x00000004 + +struct toe_ol_stats_t { + + uint32 tx_summed; + + + uint32 tx_iph_fill; + uint32 tx_tcp_fill; + uint32 tx_udp_fill; + uint32 tx_icmp_fill; + + + uint32 rx_iph_good; + uint32 rx_iph_bad; + uint32 rx_tcp_good; + uint32 rx_tcp_bad; + uint32 rx_udp_good; + uint32 rx_udp_bad; + uint32 rx_icmp_good; + uint32 rx_icmp_bad; + + + uint32 tx_tcp_errinj; + uint32 tx_udp_errinj; + uint32 tx_icmp_errinj; + + + uint32 rx_tcp_errinj; + uint32 rx_udp_errinj; + uint32 rx_icmp_errinj; +}; + + +#define ARP_OL_AGENT 0x00000001 +#define ARP_OL_SNOOP 0x00000002 +#define ARP_OL_HOST_AUTO_REPLY 0x00000004 +#define ARP_OL_PEER_AUTO_REPLY 0x00000008 + + +#define ARP_ERRTEST_REPLY_PEER 0x1 +#define ARP_ERRTEST_REPLY_HOST 0x2 + +#define ARP_MULTIHOMING_MAX 8 + + +struct arp_ol_stats_t { + uint32 host_ip_entries; + uint32 host_ip_overflow; + + uint32 arp_table_entries; + uint32 arp_table_overflow; + + uint32 host_request; + uint32 host_reply; + uint32 host_service; + + uint32 peer_request; + uint32 peer_request_drop; + uint32 peer_reply; + uint32 peer_reply_drop; + uint32 peer_service; +}; + + + + + +typedef struct wl_keep_alive_pkt { + uint32 period_msec; + uint16 len_bytes; + uint8 data[1]; +} wl_keep_alive_pkt_t; + +#define WL_KEEP_ALIVE_FIXED_LEN OFFSETOF(wl_keep_alive_pkt_t, data) + + + + + +typedef enum wl_pkt_filter_type { + WL_PKT_FILTER_TYPE_PATTERN_MATCH +} wl_pkt_filter_type_t; + +#define WL_PKT_FILTER_TYPE wl_pkt_filter_type_t + + +typedef struct wl_pkt_filter_pattern { + uint32 offset; + uint32 size_bytes; + uint8 mask_and_pattern[1]; +} wl_pkt_filter_pattern_t; + + +typedef struct wl_pkt_filter { + uint32 id; + uint32 type; + uint32 negate_match; + union { + wl_pkt_filter_pattern_t pattern; + } u; +} wl_pkt_filter_t; + +#define WL_PKT_FILTER_FIXED_LEN OFFSETOF(wl_pkt_filter_t, u) +#define WL_PKT_FILTER_PATTERN_FIXED_LEN OFFSETOF(wl_pkt_filter_pattern_t, mask_and_pattern) + + +typedef struct wl_pkt_filter_enable { + uint32 id; + uint32 enable; +} wl_pkt_filter_enable_t; + + +typedef struct wl_pkt_filter_list { + uint32 num; + wl_pkt_filter_t filter[1]; +} wl_pkt_filter_list_t; + +#define WL_PKT_FILTER_LIST_FIXED_LEN OFFSETOF(wl_pkt_filter_list_t, filter) + + +typedef struct wl_pkt_filter_stats { + uint32 num_pkts_matched; + uint32 num_pkts_forwarded; + uint32 num_pkts_discarded; +} wl_pkt_filter_stats_t; + + +typedef struct wl_seq_cmd_ioctl { + uint32 cmd; + uint32 len; +} wl_seq_cmd_ioctl_t; + +#define WL_SEQ_CMD_ALIGN_BYTES 4 + + +#define WL_SEQ_CMDS_GET_IOCTL_FILTER(cmd) \ + (((cmd) == WLC_GET_MAGIC) || \ + ((cmd) == WLC_GET_VERSION) || \ + ((cmd) == WLC_GET_AP) || \ + ((cmd) == WLC_GET_INSTANCE)) + + + +#define WL_PKTENG_PER_TX_START 0x01 +#define WL_PKTENG_PER_TX_STOP 0x02 +#define WL_PKTENG_PER_RX_START 0x04 +#define WL_PKTENG_PER_RX_WITH_ACK_START 0x05 +#define WL_PKTENG_PER_TX_WITH_ACK_START 0x06 +#define WL_PKTENG_PER_RX_STOP 0x08 +#define WL_PKTENG_PER_MASK 0xff + +#define WL_PKTENG_SYNCHRONOUS 0x100 + +typedef struct wl_pkteng { + uint32 flags; + uint32 delay; + uint32 nframes; + uint32 length; + uint8 seqno; + struct ether_addr dest; + struct ether_addr src; +} wl_pkteng_t; + +#define NUM_80211b_RATES 4 +#define NUM_80211ag_RATES 8 +#define NUM_80211n_RATES 32 +#define NUM_80211_RATES (NUM_80211b_RATES+NUM_80211ag_RATES+NUM_80211n_RATES) +typedef struct wl_pkteng_stats { + uint32 lostfrmcnt; + int32 rssi; + int32 snr; + uint16 rxpktcnt[NUM_80211_RATES+1]; +} wl_pkteng_stats_t; + +#define WL_WOWL_MAGIC (1 << 0) +#define WL_WOWL_NET (1 << 1) +#define WL_WOWL_DIS (1 << 2) +#define WL_WOWL_RETR (1 << 3) +#define WL_WOWL_BCN (1 << 4) +#define WL_WOWL_TST (1 << 5) +#define WL_WOWL_BCAST (1 << 15) + +#define MAGIC_PKT_MINLEN 102 + +typedef struct { + uint masksize; + uint offset; + uint patternoffset; + uint patternsize; + + +} wl_wowl_pattern_t; + +typedef struct { + uint count; + wl_wowl_pattern_t pattern[1]; +} wl_wowl_pattern_list_t; + +typedef struct { + uint8 pci_wakeind; + uint16 ucode_wakeind; +} wl_wowl_wakeind_t; + + +typedef struct wl_txrate_class { + uint8 init_rate; + uint8 min_rate; + uint8 max_rate; +} wl_txrate_class_t; + + + + +#define WLC_OBSS_SCAN_PASSIVE_DWELL_DEFAULT 100 +#define WLC_OBSS_SCAN_PASSIVE_DWELL_MIN 5 +#define WLC_OBSS_SCAN_PASSIVE_DWELL_MAX 1000 +#define WLC_OBSS_SCAN_ACTIVE_DWELL_DEFAULT 20 +#define WLC_OBSS_SCAN_ACTIVE_DWELL_MIN 10 +#define WLC_OBSS_SCAN_ACTIVE_DWELL_MAX 1000 +#define WLC_OBSS_SCAN_WIDTHSCAN_INTERVAL_DEFAULT 300 +#define WLC_OBSS_SCAN_WIDTHSCAN_INTERVAL_MIN 10 +#define WLC_OBSS_SCAN_WIDTHSCAN_INTERVAL_MAX 900 +#define WLC_OBSS_SCAN_CHANWIDTH_TRANSITION_DLY_DEFAULT 5 +#define WLC_OBSS_SCAN_CHANWIDTH_TRANSITION_DLY_MIN 5 +#define WLC_OBSS_SCAN_CHANWIDTH_TRANSITION_DLY_MAX 100 +#define WLC_OBSS_SCAN_PASSIVE_TOTAL_PER_CHANNEL_DEFAULT 200 +#define WLC_OBSS_SCAN_PASSIVE_TOTAL_PER_CHANNEL_MIN 200 +#define WLC_OBSS_SCAN_PASSIVE_TOTAL_PER_CHANNEL_MAX 10000 +#define WLC_OBSS_SCAN_ACTIVE_TOTAL_PER_CHANNEL_DEFAULT 20 +#define WLC_OBSS_SCAN_ACTIVE_TOTAL_PER_CHANNEL_MIN 20 +#define WLC_OBSS_SCAN_ACTIVE_TOTAL_PER_CHANNEL_MAX 10000 +#define WLC_OBSS_SCAN_ACTIVITY_THRESHOLD_DEFAULT 25 +#define WLC_OBSS_SCAN_ACTIVITY_THRESHOLD_MIN 0 +#define WLC_OBSS_SCAN_ACTIVITY_THRESHOLD_MAX 100 + + +typedef struct wl_obss_scan_arg { + int16 passive_dwell; + int16 active_dwell; + int16 bss_widthscan_interval; + int16 passive_total; + int16 active_total; + int16 chanwidth_transition_delay; + int16 activity_threshold; +} wl_obss_scan_arg_t; +#define WL_OBSS_SCAN_PARAM_LEN sizeof(wl_obss_scan_arg_t) +#define WL_MIN_NUM_OBSS_SCAN_ARG 7 + +#define WL_COEX_INFO_MASK 0x07 +#define WL_COEX_INFO_REQ 0x01 +#define WL_COEX_40MHZ_INTOLERANT 0x02 +#define WL_COEX_WIDTH20 0x04 + +typedef struct wl_action_obss_coex_req { + uint8 info; + uint8 num; + uint8 ch_list[1]; +} wl_action_obss_coex_req_t; + + +#define MAX_RSSI_LEVELS 8 + + +typedef struct wl_rssi_event { + + uint32 rate_limit_msec; + + uint8 num_rssi_levels; + + int8 rssi_levels[MAX_RSSI_LEVELS]; +} wl_rssi_event_t; + + + +#define WLFEATURE_DISABLE_11N 0x00000001 +#define WLFEATURE_DISABLE_11N_STBC_TX 0x00000002 +#define WLFEATURE_DISABLE_11N_STBC_RX 0x00000004 +#define WLFEATURE_DISABLE_11N_SGI_TX 0x00000008 +#define WLFEATURE_DISABLE_11N_SGI_RX 0x00000010 +#define WLFEATURE_DISABLE_11N_AMPDU_TX 0x00000020 +#define WLFEATURE_DISABLE_11N_AMPDU_RX 0x00000040 +#define WLFEATURE_DISABLE_11N_GF 0x00000080 + + + +#include <packed_section_end.h> + + +#include <packed_section_start.h> + + +typedef BWL_PRE_PACKED_STRUCT struct sta_prbreq_wps_ie_hdr { + struct ether_addr staAddr; + uint16 ieLen; +} BWL_POST_PACKED_STRUCT sta_prbreq_wps_ie_hdr_t; + +typedef BWL_PRE_PACKED_STRUCT struct sta_prbreq_wps_ie_data { + sta_prbreq_wps_ie_hdr_t hdr; + uint8 ieData[1]; +} BWL_POST_PACKED_STRUCT sta_prbreq_wps_ie_data_t; + +typedef BWL_PRE_PACKED_STRUCT struct sta_prbreq_wps_ie_list { + uint32 totLen; + uint8 ieDataList[1]; +} BWL_POST_PACKED_STRUCT sta_prbreq_wps_ie_list_t; + + +#include <packed_section_end.h> + +#endif diff --git a/drivers/net/wireless/bcm4329/linux_osl.c b/drivers/net/wireless/bcm4329/linux_osl.c new file mode 100644 index 000000000000..d00bd1ca291b --- /dev/null +++ b/drivers/net/wireless/bcm4329/linux_osl.c @@ -0,0 +1,622 @@ +/* + * Linux OS Independent Layer + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: linux_osl.c,v 1.125.12.3.8.7 2010/05/04 21:10:04 Exp $ + */ + + +#define LINUX_OSL + +#include <typedefs.h> +#include <bcmendian.h> +#include <linuxver.h> +#include <bcmdefs.h> +#include <osl.h> +#include <bcmutils.h> +#include <linux/delay.h> +#include <pcicfg.h> + +#define PCI_CFG_RETRY 10 + +#define OS_HANDLE_MAGIC 0x1234abcd +#define BCM_MEM_FILENAME_LEN 24 + +#ifdef DHD_USE_STATIC_BUF +#define MAX_STATIC_BUF_NUM 16 +#define STATIC_BUF_SIZE (PAGE_SIZE*2) +#define STATIC_BUF_TOTAL_LEN (MAX_STATIC_BUF_NUM*STATIC_BUF_SIZE) +typedef struct bcm_static_buf { + struct semaphore static_sem; + unsigned char *buf_ptr; + unsigned char buf_use[MAX_STATIC_BUF_NUM]; +} bcm_static_buf_t; + +static bcm_static_buf_t *bcm_static_buf = 0; + +#define MAX_STATIC_PKT_NUM 8 +typedef struct bcm_static_pkt { + struct sk_buff *skb_4k[MAX_STATIC_PKT_NUM]; + struct sk_buff *skb_8k[MAX_STATIC_PKT_NUM]; + struct semaphore osl_pkt_sem; + unsigned char pkt_use[MAX_STATIC_PKT_NUM*2]; +} bcm_static_pkt_t; +static bcm_static_pkt_t *bcm_static_skb = 0; + +#endif +typedef struct bcm_mem_link { + struct bcm_mem_link *prev; + struct bcm_mem_link *next; + uint size; + int line; + char file[BCM_MEM_FILENAME_LEN]; +} bcm_mem_link_t; + +struct osl_info { + osl_pubinfo_t pub; + uint magic; + void *pdev; + uint malloced; + uint failed; + uint bustype; + bcm_mem_link_t *dbgmem_list; +}; + +static int16 linuxbcmerrormap[] = +{ 0, + -EINVAL, + -EINVAL, + -EINVAL, + -EINVAL, + -EINVAL, + -EINVAL, + -EINVAL, + -EINVAL, + -EINVAL, + -EINVAL, + -EINVAL, + -EINVAL, + -EINVAL, + -E2BIG, + -E2BIG, + -EBUSY, + -EINVAL, + -EINVAL, + -EINVAL, + -EINVAL, + -EFAULT, + -ENOMEM, + -EOPNOTSUPP, + -EMSGSIZE, + -EINVAL, + -EPERM, + -ENOMEM, + -EINVAL, + -ERANGE, + -EINVAL, + -EINVAL, + -EINVAL, + -EINVAL, + -EINVAL, + -EIO, + -ENODEV, + -EINVAL, + -EIO, + -EIO, + -EINVAL, + -EINVAL, + + + +#if BCME_LAST != -41 +#error "You need to add a OS error translation in the linuxbcmerrormap \ + for new error code defined in bcmutils.h" +#endif +}; + + +int +osl_error(int bcmerror) +{ + if (bcmerror > 0) + bcmerror = 0; + else if (bcmerror < BCME_LAST) + bcmerror = BCME_ERROR; + + + return linuxbcmerrormap[-bcmerror]; +} + +void * dhd_os_prealloc(int section, unsigned long size); +osl_t * +osl_attach(void *pdev, uint bustype, bool pkttag) +{ + osl_t *osh; + gfp_t flags; + + flags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL; + osh = kmalloc(sizeof(osl_t), flags); + ASSERT(osh); + + bzero(osh, sizeof(osl_t)); + + + ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(linuxbcmerrormap) - 1)); + + osh->magic = OS_HANDLE_MAGIC; + osh->malloced = 0; + osh->failed = 0; + osh->dbgmem_list = NULL; + osh->pdev = pdev; + osh->pub.pkttag = pkttag; + osh->bustype = bustype; + + switch (bustype) { + case PCI_BUS: + case SI_BUS: + case PCMCIA_BUS: + osh->pub.mmbus = TRUE; + break; + case JTAG_BUS: + case SDIO_BUS: + case USB_BUS: + case SPI_BUS: + osh->pub.mmbus = FALSE; + break; + default: + ASSERT(FALSE); + break; + } + +#ifdef DHD_USE_STATIC_BUF + + + if (!bcm_static_buf) { + if (!(bcm_static_buf = (bcm_static_buf_t *)dhd_os_prealloc(3, STATIC_BUF_SIZE+ + STATIC_BUF_TOTAL_LEN))) { + printk("can not alloc static buf!\n"); + } + else { + /* printk("alloc static buf at %x!\n", (unsigned int)bcm_static_buf); */ + } + + init_MUTEX(&bcm_static_buf->static_sem); + + + bcm_static_buf->buf_ptr = (unsigned char *)bcm_static_buf + STATIC_BUF_SIZE; + + } + + if (!bcm_static_skb) + { + int i; + void *skb_buff_ptr = 0; + bcm_static_skb = (bcm_static_pkt_t *)((char *)bcm_static_buf + 2048); + skb_buff_ptr = dhd_os_prealloc(4, 0); + + bcopy(skb_buff_ptr, bcm_static_skb, sizeof(struct sk_buff *)*16); + for (i = 0; i < MAX_STATIC_PKT_NUM*2; i++) + bcm_static_skb->pkt_use[i] = 0; + + init_MUTEX(&bcm_static_skb->osl_pkt_sem); + } +#endif + return osh; +} + +void +osl_detach(osl_t *osh) +{ + if (osh == NULL) + return; + +#ifdef DHD_USE_STATIC_BUF + if (bcm_static_buf) { + bcm_static_buf = 0; + } + if (bcm_static_skb) { + bcm_static_skb = 0; + } +#endif + ASSERT(osh->magic == OS_HANDLE_MAGIC); + kfree(osh); +} + + +void* +osl_pktget(osl_t *osh, uint len) +{ + struct sk_buff *skb; + + if ((skb = dev_alloc_skb(len))) { + skb_put(skb, len); + skb->priority = 0; + + + osh->pub.pktalloced++; + } + + return ((void*) skb); +} + + +void +osl_pktfree(osl_t *osh, void *p, bool send) +{ + struct sk_buff *skb, *nskb; + + skb = (struct sk_buff*) p; + + if (send && osh->pub.tx_fn) + osh->pub.tx_fn(osh->pub.tx_ctx, p, 0); + + + while (skb) { + nskb = skb->next; + skb->next = NULL; + + + if (skb->destructor) { + + dev_kfree_skb_any(skb); + } else { + + dev_kfree_skb(skb); + } + + osh->pub.pktalloced--; + + skb = nskb; + } +} + +#ifdef DHD_USE_STATIC_BUF +void* +osl_pktget_static(osl_t *osh, uint len) +{ + int i = 0; + struct sk_buff *skb; + + + if (len > (PAGE_SIZE*2)) + { + printk("Do we really need this big skb??\n"); + return osl_pktget(osh, len); + } + + + down(&bcm_static_skb->osl_pkt_sem); + if (len <= PAGE_SIZE) + { + + for (i = 0; i < MAX_STATIC_PKT_NUM; i++) + { + if (bcm_static_skb->pkt_use[i] == 0) + break; + } + + if (i != MAX_STATIC_PKT_NUM) + { + bcm_static_skb->pkt_use[i] = 1; + up(&bcm_static_skb->osl_pkt_sem); + + skb = bcm_static_skb->skb_4k[i]; + skb->tail = skb->data + len; + skb->len = len; + + return skb; + } + } + + + for (i = 0; i < MAX_STATIC_PKT_NUM; i++) + { + if (bcm_static_skb->pkt_use[i+MAX_STATIC_PKT_NUM] == 0) + break; + } + + if (i != MAX_STATIC_PKT_NUM) + { + bcm_static_skb->pkt_use[i+MAX_STATIC_PKT_NUM] = 1; + up(&bcm_static_skb->osl_pkt_sem); + skb = bcm_static_skb->skb_8k[i]; + skb->tail = skb->data + len; + skb->len = len; + + return skb; + } + + + + up(&bcm_static_skb->osl_pkt_sem); + printk("all static pkt in use!\n"); + return osl_pktget(osh, len); +} + + +void +osl_pktfree_static(osl_t *osh, void *p, bool send) +{ + int i; + + for (i = 0; i < MAX_STATIC_PKT_NUM*2; i++) + { + if (p == bcm_static_skb->skb_4k[i]) + { + down(&bcm_static_skb->osl_pkt_sem); + bcm_static_skb->pkt_use[i] = 0; + up(&bcm_static_skb->osl_pkt_sem); + + + return; + } + } + return osl_pktfree(osh, p, send); +} +#endif +uint32 +osl_pci_read_config(osl_t *osh, uint offset, uint size) +{ + uint val = 0; + uint retry = PCI_CFG_RETRY; + + ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC))); + + + ASSERT(size == 4); + + do { + pci_read_config_dword(osh->pdev, offset, &val); + if (val != 0xffffffff) + break; + } while (retry--); + + + return (val); +} + +void +osl_pci_write_config(osl_t *osh, uint offset, uint size, uint val) +{ + uint retry = PCI_CFG_RETRY; + + ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC))); + + + ASSERT(size == 4); + + do { + pci_write_config_dword(osh->pdev, offset, val); + if (offset != PCI_BAR0_WIN) + break; + if (osl_pci_read_config(osh, offset, size) == val) + break; + } while (retry--); + +} + + +uint +osl_pci_bus(osl_t *osh) +{ + ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev); + + return ((struct pci_dev *)osh->pdev)->bus->number; +} + + +uint +osl_pci_slot(osl_t *osh) +{ + ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev); + + return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn); +} + +static void +osl_pcmcia_attr(osl_t *osh, uint offset, char *buf, int size, bool write) +{ +} + +void +osl_pcmcia_read_attr(osl_t *osh, uint offset, void *buf, int size) +{ + osl_pcmcia_attr(osh, offset, (char *) buf, size, FALSE); +} + +void +osl_pcmcia_write_attr(osl_t *osh, uint offset, void *buf, int size) +{ + osl_pcmcia_attr(osh, offset, (char *) buf, size, TRUE); +} + + + +void* +osl_malloc(osl_t *osh, uint size) +{ + void *addr; + gfp_t flags; + + if (osh) + ASSERT(osh->magic == OS_HANDLE_MAGIC); + +#ifdef DHD_USE_STATIC_BUF + if (bcm_static_buf) + { + int i = 0; + if ((size >= PAGE_SIZE)&&(size <= STATIC_BUF_SIZE)) + { + down(&bcm_static_buf->static_sem); + + for (i = 0; i < MAX_STATIC_BUF_NUM; i++) + { + if (bcm_static_buf->buf_use[i] == 0) + break; + } + + if (i == MAX_STATIC_BUF_NUM) + { + up(&bcm_static_buf->static_sem); + printk("all static buff in use!\n"); + goto original; + } + + bcm_static_buf->buf_use[i] = 1; + up(&bcm_static_buf->static_sem); + + bzero(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i, size); + if (osh) + osh->malloced += size; + + return ((void *)(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i)); + } + } +original: +#endif + flags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL; + if ((addr = kmalloc(size, flags)) == NULL) { + if (osh) + osh->failed++; + return (NULL); + } + if (osh) + osh->malloced += size; + + return (addr); +} + +void +osl_mfree(osl_t *osh, void *addr, uint size) +{ +#ifdef DHD_USE_STATIC_BUF + if (bcm_static_buf) + { + if ((addr > (void *)bcm_static_buf) && ((unsigned char *)addr + <= ((unsigned char *)bcm_static_buf + STATIC_BUF_TOTAL_LEN))) + { + int buf_idx = 0; + + buf_idx = ((unsigned char *)addr - bcm_static_buf->buf_ptr)/STATIC_BUF_SIZE; + + down(&bcm_static_buf->static_sem); + bcm_static_buf->buf_use[buf_idx] = 0; + up(&bcm_static_buf->static_sem); + + if (osh) { + ASSERT(osh->magic == OS_HANDLE_MAGIC); + osh->malloced -= size; + } + return; + } + } +#endif + if (osh) { + ASSERT(osh->magic == OS_HANDLE_MAGIC); + osh->malloced -= size; + } + kfree(addr); +} + +uint +osl_malloced(osl_t *osh) +{ + ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC))); + return (osh->malloced); +} + +uint +osl_malloc_failed(osl_t *osh) +{ + ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC))); + return (osh->failed); +} + +void* +osl_dma_alloc_consistent(osl_t *osh, uint size, ulong *pap) +{ + ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC))); + + return (pci_alloc_consistent(osh->pdev, size, (dma_addr_t*)pap)); +} + +void +osl_dma_free_consistent(osl_t *osh, void *va, uint size, ulong pa) +{ + ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC))); + + pci_free_consistent(osh->pdev, size, va, (dma_addr_t)pa); +} + +uint +osl_dma_map(osl_t *osh, void *va, uint size, int direction) +{ + int dir; + + ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC))); + dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE; + return (pci_map_single(osh->pdev, va, size, dir)); +} + +void +osl_dma_unmap(osl_t *osh, uint pa, uint size, int direction) +{ + int dir; + + ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC))); + dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE; + pci_unmap_single(osh->pdev, (uint32)pa, size, dir); +} + + +void +osl_delay(uint usec) +{ + uint d; + + while (usec > 0) { + d = MIN(usec, 1000); + udelay(d); + usec -= d; + } +} + + + +void * +osl_pktdup(osl_t *osh, void *skb) +{ + void * p; + gfp_t flags; + + flags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL; + if ((p = skb_clone((struct sk_buff*)skb, flags)) == NULL) + return NULL; + + + if (osh->pub.pkttag) + bzero((void*)((struct sk_buff *)p)->cb, OSL_PKTTAG_SZ); + + + osh->pub.pktalloced++; + return (p); +} diff --git a/drivers/net/wireless/bcm4329/miniopt.c b/drivers/net/wireless/bcm4329/miniopt.c new file mode 100644 index 000000000000..6a184a75f06b --- /dev/null +++ b/drivers/net/wireless/bcm4329/miniopt.c @@ -0,0 +1,163 @@ +/* + * Description. + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * $Id: miniopt.c,v 1.1.6.4 2009/09/25 00:32:01 Exp $ + */ + +/* ---- Include Files ---------------------------------------------------- */ + +#include <typedefs.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include "miniopt.h" + + +/* ---- Public Variables ------------------------------------------------- */ +/* ---- Private Constants and Types -------------------------------------- */ + + + +/* ---- Private Variables ------------------------------------------------ */ +/* ---- Private Function Prototypes -------------------------------------- */ +/* ---- Functions -------------------------------------------------------- */ + +/* ----------------------------------------------------------------------- */ +void +miniopt_init(miniopt_t *t, const char* name, const char* flags, bool longflags) +{ + static const char *null_flags = ""; + + memset(t, 0, sizeof(miniopt_t)); + t->name = name; + if (flags == NULL) + t->flags = null_flags; + else + t->flags = flags; + t->longflags = longflags; +} + + +/* ----------------------------------------------------------------------- */ +int +miniopt(miniopt_t *t, char **argv) +{ + int keylen; + char *p, *eq, *valstr, *endptr = NULL; + int err = 0; + + t->consumed = 0; + t->positional = FALSE; + memset(t->key, 0, MINIOPT_MAXKEY); + t->opt = '\0'; + t->valstr = NULL; + t->good_int = FALSE; + valstr = NULL; + + if (*argv == NULL) { + err = -1; + goto exit; + } + + p = *argv++; + t->consumed++; + + if (!t->opt_end && !strcmp(p, "--")) { + t->opt_end = TRUE; + if (*argv == NULL) { + err = -1; + goto exit; + } + p = *argv++; + t->consumed++; + } + + if (t->opt_end) { + t->positional = TRUE; + valstr = p; + } + else if (!strncmp(p, "--", 2)) { + eq = strchr(p, '='); + if (eq == NULL && !t->longflags) { + fprintf(stderr, + "%s: missing \" = \" in long param \"%s\"\n", t->name, p); + err = 1; + goto exit; + } + keylen = eq ? (eq - (p + 2)) : (int)strlen(p) - 2; + if (keylen > 63) keylen = 63; + memcpy(t->key, p + 2, keylen); + + if (eq) { + valstr = eq + 1; + if (*valstr == '\0') { + fprintf(stderr, + "%s: missing value after \" = \" in long param \"%s\"\n", + t->name, p); + err = 1; + goto exit; + } + } + } + else if (!strncmp(p, "-", 1)) { + t->opt = p[1]; + if (strlen(p) > 2) { + fprintf(stderr, + "%s: only single char options, error on param \"%s\"\n", + t->name, p); + err = 1; + goto exit; + } + if (strchr(t->flags, t->opt)) { + /* this is a flag option, no value expected */ + valstr = NULL; + } else { + if (*argv == NULL) { + fprintf(stderr, + "%s: missing value parameter after \"%s\"\n", t->name, p); + err = 1; + goto exit; + } + valstr = *argv; + argv++; + t->consumed++; + } + } else { + t->positional = TRUE; + valstr = p; + } + + /* parse valstr as int just in case */ + if (valstr) { + t->uval = (uint)strtoul(valstr, &endptr, 0); + t->val = (int)t->uval; + t->good_int = (*endptr == '\0'); + } + + t->valstr = valstr; + +exit: + if (err == 1) + t->opt = '?'; + + return err; +} diff --git a/drivers/net/wireless/bcm4329/sbutils.c b/drivers/net/wireless/bcm4329/sbutils.c new file mode 100644 index 000000000000..46cd51010b78 --- /dev/null +++ b/drivers/net/wireless/bcm4329/sbutils.c @@ -0,0 +1,1004 @@ +/* + * Misc utility routines for accessing chip-specific features + * of the SiliconBackplane-based Broadcom chips. + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: sbutils.c,v 1.662.4.10.2.7.4.2 2010/04/19 05:48:48 Exp $ + */ + +#include <typedefs.h> +#include <bcmdefs.h> +#include <osl.h> +#include <bcmutils.h> +#include <siutils.h> +#include <bcmdevs.h> +#include <hndsoc.h> +#include <sbchipc.h> +#include <pcicfg.h> +#include <sbpcmcia.h> + +#include "siutils_priv.h" + +/* local prototypes */ +static uint _sb_coreidx(si_info_t *sii, uint32 sba); +static uint _sb_scan(si_info_t *sii, uint32 sba, void *regs, uint bus, uint32 sbba, + uint ncores); +static uint32 _sb_coresba(si_info_t *sii); +static void *_sb_setcoreidx(si_info_t *sii, uint coreidx); + +#define SET_SBREG(sii, r, mask, val) \ + W_SBREG((sii), (r), ((R_SBREG((sii), (r)) & ~(mask)) | (val))) +#define REGS2SB(va) (sbconfig_t*) ((int8*)(va) + SBCONFIGOFF) + +/* sonicsrev */ +#define SONICS_2_2 (SBIDL_RV_2_2 >> SBIDL_RV_SHIFT) +#define SONICS_2_3 (SBIDL_RV_2_3 >> SBIDL_RV_SHIFT) + +#define R_SBREG(sii, sbr) sb_read_sbreg((sii), (sbr)) +#define W_SBREG(sii, sbr, v) sb_write_sbreg((sii), (sbr), (v)) +#define AND_SBREG(sii, sbr, v) W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) & (v))) +#define OR_SBREG(sii, sbr, v) W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) | (v))) + +static uint32 +sb_read_sbreg(si_info_t *sii, volatile uint32 *sbr) +{ + uint8 tmp; + uint32 val, intr_val = 0; + + + /* + * compact flash only has 11 bits address, while we needs 12 bits address. + * MEM_SEG will be OR'd with other 11 bits address in hardware, + * so we program MEM_SEG with 12th bit when necessary(access sb regsiters). + * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special + */ + if (PCMCIA(sii)) { + INTR_OFF(sii, intr_val); + tmp = 1; + OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1); + sbr = (volatile uint32 *)((uintptr)sbr & ~(1 << 11)); /* mask out bit 11 */ + } + + val = R_REG(sii->osh, sbr); + + if (PCMCIA(sii)) { + tmp = 0; + OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1); + INTR_RESTORE(sii, intr_val); + } + + return (val); +} + +static void +sb_write_sbreg(si_info_t *sii, volatile uint32 *sbr, uint32 v) +{ + uint8 tmp; + volatile uint32 dummy; + uint32 intr_val = 0; + + + /* + * compact flash only has 11 bits address, while we needs 12 bits address. + * MEM_SEG will be OR'd with other 11 bits address in hardware, + * so we program MEM_SEG with 12th bit when necessary(access sb regsiters). + * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special + */ + if (PCMCIA(sii)) { + INTR_OFF(sii, intr_val); + tmp = 1; + OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1); + sbr = (volatile uint32 *)((uintptr)sbr & ~(1 << 11)); /* mask out bit 11 */ + } + + if (BUSTYPE(sii->pub.bustype) == PCMCIA_BUS) { +#ifdef IL_BIGENDIAN + dummy = R_REG(sii->osh, sbr); + W_REG(sii->osh, ((volatile uint16 *)sbr + 1), (uint16)((v >> 16) & 0xffff)); + dummy = R_REG(sii->osh, sbr); + W_REG(sii->osh, (volatile uint16 *)sbr, (uint16)(v & 0xffff)); +#else + dummy = R_REG(sii->osh, sbr); + W_REG(sii->osh, (volatile uint16 *)sbr, (uint16)(v & 0xffff)); + dummy = R_REG(sii->osh, sbr); + W_REG(sii->osh, ((volatile uint16 *)sbr + 1), (uint16)((v >> 16) & 0xffff)); +#endif /* IL_BIGENDIAN */ + } else + W_REG(sii->osh, sbr, v); + + if (PCMCIA(sii)) { + tmp = 0; + OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1); + INTR_RESTORE(sii, intr_val); + } +} + +uint +sb_coreid(si_t *sih) +{ + si_info_t *sii; + sbconfig_t *sb; + + sii = SI_INFO(sih); + sb = REGS2SB(sii->curmap); + + return ((R_SBREG(sii, &sb->sbidhigh) & SBIDH_CC_MASK) >> SBIDH_CC_SHIFT); +} + +uint +sb_flag(si_t *sih) +{ + si_info_t *sii; + sbconfig_t *sb; + + sii = SI_INFO(sih); + sb = REGS2SB(sii->curmap); + + return R_SBREG(sii, &sb->sbtpsflag) & SBTPS_NUM0_MASK; +} + +void +sb_setint(si_t *sih, int siflag) +{ + si_info_t *sii; + sbconfig_t *sb; + uint32 vec; + + sii = SI_INFO(sih); + sb = REGS2SB(sii->curmap); + + if (siflag == -1) + vec = 0; + else + vec = 1 << siflag; + W_SBREG(sii, &sb->sbintvec, vec); +} + +/* return core index of the core with address 'sba' */ +static uint +_sb_coreidx(si_info_t *sii, uint32 sba) +{ + uint i; + + for (i = 0; i < sii->numcores; i ++) + if (sba == sii->common_info->coresba[i]) + return i; + return BADIDX; +} + +/* return core address of the current core */ +static uint32 +_sb_coresba(si_info_t *sii) +{ + uint32 sbaddr; + + + switch (BUSTYPE(sii->pub.bustype)) { + case SI_BUS: { + sbconfig_t *sb = REGS2SB(sii->curmap); + sbaddr = sb_base(R_SBREG(sii, &sb->sbadmatch0)); + break; + } + + case PCI_BUS: + sbaddr = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32)); + break; + + case PCMCIA_BUS: { + uint8 tmp = 0; + OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR0, &tmp, 1); + sbaddr = (uint32)tmp << 12; + OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR1, &tmp, 1); + sbaddr |= (uint32)tmp << 16; + OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR2, &tmp, 1); + sbaddr |= (uint32)tmp << 24; + break; + } + + case SPI_BUS: + case SDIO_BUS: + sbaddr = (uint32)(uintptr)sii->curmap; + break; + + + default: + sbaddr = BADCOREADDR; + break; + } + + return sbaddr; +} + +uint +sb_corevendor(si_t *sih) +{ + si_info_t *sii; + sbconfig_t *sb; + + sii = SI_INFO(sih); + sb = REGS2SB(sii->curmap); + + return ((R_SBREG(sii, &sb->sbidhigh) & SBIDH_VC_MASK) >> SBIDH_VC_SHIFT); +} + +uint +sb_corerev(si_t *sih) +{ + si_info_t *sii; + sbconfig_t *sb; + uint sbidh; + + sii = SI_INFO(sih); + sb = REGS2SB(sii->curmap); + sbidh = R_SBREG(sii, &sb->sbidhigh); + + return (SBCOREREV(sbidh)); +} + +/* set core-specific control flags */ +void +sb_core_cflags_wo(si_t *sih, uint32 mask, uint32 val) +{ + si_info_t *sii; + sbconfig_t *sb; + uint32 w; + + sii = SI_INFO(sih); + sb = REGS2SB(sii->curmap); + + ASSERT((val & ~mask) == 0); + + /* mask and set */ + w = (R_SBREG(sii, &sb->sbtmstatelow) & ~(mask << SBTML_SICF_SHIFT)) | + (val << SBTML_SICF_SHIFT); + W_SBREG(sii, &sb->sbtmstatelow, w); +} + +/* set/clear core-specific control flags */ +uint32 +sb_core_cflags(si_t *sih, uint32 mask, uint32 val) +{ + si_info_t *sii; + sbconfig_t *sb; + uint32 w; + + sii = SI_INFO(sih); + sb = REGS2SB(sii->curmap); + + ASSERT((val & ~mask) == 0); + + /* mask and set */ + if (mask || val) { + w = (R_SBREG(sii, &sb->sbtmstatelow) & ~(mask << SBTML_SICF_SHIFT)) | + (val << SBTML_SICF_SHIFT); + W_SBREG(sii, &sb->sbtmstatelow, w); + } + + /* return the new value + * for write operation, the following readback ensures the completion of write opration. + */ + return (R_SBREG(sii, &sb->sbtmstatelow) >> SBTML_SICF_SHIFT); +} + +/* set/clear core-specific status flags */ +uint32 +sb_core_sflags(si_t *sih, uint32 mask, uint32 val) +{ + si_info_t *sii; + sbconfig_t *sb; + uint32 w; + + sii = SI_INFO(sih); + sb = REGS2SB(sii->curmap); + + ASSERT((val & ~mask) == 0); + ASSERT((mask & ~SISF_CORE_BITS) == 0); + + /* mask and set */ + if (mask || val) { + w = (R_SBREG(sii, &sb->sbtmstatehigh) & ~(mask << SBTMH_SISF_SHIFT)) | + (val << SBTMH_SISF_SHIFT); + W_SBREG(sii, &sb->sbtmstatehigh, w); + } + + /* return the new value */ + return (R_SBREG(sii, &sb->sbtmstatehigh) >> SBTMH_SISF_SHIFT); +} + +bool +sb_iscoreup(si_t *sih) +{ + si_info_t *sii; + sbconfig_t *sb; + + sii = SI_INFO(sih); + sb = REGS2SB(sii->curmap); + + return ((R_SBREG(sii, &sb->sbtmstatelow) & + (SBTML_RESET | SBTML_REJ_MASK | (SICF_CLOCK_EN << SBTML_SICF_SHIFT))) == + (SICF_CLOCK_EN << SBTML_SICF_SHIFT)); +} + +/* + * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation, + * switch back to the original core, and return the new value. + * + * When using the silicon backplane, no fidleing with interrupts or core switches are needed. + * + * Also, when using pci/pcie, we can optimize away the core switching for pci registers + * and (on newer pci cores) chipcommon registers. + */ +uint +sb_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val) +{ + uint origidx = 0; + uint32 *r = NULL; + uint w; + uint intr_val = 0; + bool fast = FALSE; + si_info_t *sii; + + sii = SI_INFO(sih); + + ASSERT(GOODIDX(coreidx)); + ASSERT(regoff < SI_CORE_SIZE); + ASSERT((val & ~mask) == 0); + + if (coreidx >= SI_MAXCORES) + return 0; + + if (BUSTYPE(sii->pub.bustype) == SI_BUS) { + /* If internal bus, we can always get at everything */ + fast = TRUE; + /* map if does not exist */ + if (!sii->common_info->regs[coreidx]) { + sii->common_info->regs[coreidx] = + REG_MAP(sii->common_info->coresba[coreidx], SI_CORE_SIZE); + ASSERT(GOODREGS(sii->common_info->regs[coreidx])); + } + r = (uint32 *)((uchar *)sii->common_info->regs[coreidx] + regoff); + } else if (BUSTYPE(sii->pub.bustype) == PCI_BUS) { + /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */ + + if ((sii->common_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) { + /* Chipc registers are mapped at 12KB */ + + fast = TRUE; + r = (uint32 *)((char *)sii->curmap + PCI_16KB0_CCREGS_OFFSET + regoff); + } else if (sii->pub.buscoreidx == coreidx) { + /* pci registers are at either in the last 2KB of an 8KB window + * or, in pcie and pci rev 13 at 8KB + */ + fast = TRUE; + if (SI_FAST(sii)) + r = (uint32 *)((char *)sii->curmap + + PCI_16KB0_PCIREGS_OFFSET + regoff); + else + r = (uint32 *)((char *)sii->curmap + + ((regoff >= SBCONFIGOFF) ? + PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) + + regoff); + } + } + + if (!fast) { + INTR_OFF(sii, intr_val); + + /* save current core index */ + origidx = si_coreidx(&sii->pub); + + /* switch core */ + r = (uint32*) ((uchar*)sb_setcoreidx(&sii->pub, coreidx) + regoff); + } + ASSERT(r != NULL); + + /* mask and set */ + if (mask || val) { + if (regoff >= SBCONFIGOFF) { + w = (R_SBREG(sii, r) & ~mask) | val; + W_SBREG(sii, r, w); + } else { + w = (R_REG(sii->osh, r) & ~mask) | val; + W_REG(sii->osh, r, w); + } + } + + /* readback */ + if (regoff >= SBCONFIGOFF) + w = R_SBREG(sii, r); + else { + if ((CHIPID(sii->pub.chip) == BCM5354_CHIP_ID) && + (coreidx == SI_CC_IDX) && + (regoff == OFFSETOF(chipcregs_t, watchdog))) { + w = val; + } else + w = R_REG(sii->osh, r); + } + + if (!fast) { + /* restore core index */ + if (origidx != coreidx) + sb_setcoreidx(&sii->pub, origidx); + + INTR_RESTORE(sii, intr_val); + } + + return (w); +} + +/* Scan the enumeration space to find all cores starting from the given + * bus 'sbba'. Append coreid and other info to the lists in 'si'. 'sba' + * is the default core address at chip POR time and 'regs' is the virtual + * address that the default core is mapped at. 'ncores' is the number of + * cores expected on bus 'sbba'. It returns the total number of cores + * starting from bus 'sbba', inclusive. + */ +#define SB_MAXBUSES 2 +static uint +_sb_scan(si_info_t *sii, uint32 sba, void *regs, uint bus, uint32 sbba, uint numcores) +{ + uint next; + uint ncc = 0; + uint i; + + if (bus >= SB_MAXBUSES) { + SI_ERROR(("_sb_scan: bus 0x%08x at level %d is too deep to scan\n", sbba, bus)); + return 0; + } + SI_MSG(("_sb_scan: scan bus 0x%08x assume %u cores\n", sbba, numcores)); + + /* Scan all cores on the bus starting from core 0. + * Core addresses must be contiguous on each bus. + */ + for (i = 0, next = sii->numcores; i < numcores && next < SB_BUS_MAXCORES; i++, next++) { + sii->common_info->coresba[next] = sbba + (i * SI_CORE_SIZE); + + /* keep and reuse the initial register mapping */ + if ((BUSTYPE(sii->pub.bustype) == SI_BUS) && + (sii->common_info->coresba[next] == sba)) { + SI_MSG(("_sb_scan: reuse mapped regs %p for core %u\n", regs, next)); + sii->common_info->regs[next] = regs; + } + + /* change core to 'next' and read its coreid */ + sii->curmap = _sb_setcoreidx(sii, next); + sii->curidx = next; + + sii->common_info->coreid[next] = sb_coreid(&sii->pub); + + /* core specific processing... */ + /* chipc provides # cores */ + if (sii->common_info->coreid[next] == CC_CORE_ID) { + chipcregs_t *cc = (chipcregs_t *)sii->curmap; + uint32 ccrev = sb_corerev(&sii->pub); + + /* determine numcores - this is the total # cores in the chip */ + if (((ccrev == 4) || (ccrev >= 6))) + numcores = (R_REG(sii->osh, &cc->chipid) & CID_CC_MASK) >> + CID_CC_SHIFT; + else { + /* Older chips */ + uint chip = sii->pub.chip; + + if (chip == BCM4306_CHIP_ID) /* < 4306c0 */ + numcores = 6; + else if (chip == BCM4704_CHIP_ID) + numcores = 9; + else if (chip == BCM5365_CHIP_ID) + numcores = 7; + else { + SI_ERROR(("sb_chip2numcores: unsupported chip 0x%x\n", + chip)); + ASSERT(0); + numcores = 1; + } + } + SI_MSG(("_sb_scan: there are %u cores in the chip %s\n", numcores, + sii->pub.issim ? "QT" : "")); + } + /* scan bridged SB(s) and add results to the end of the list */ + else if (sii->common_info->coreid[next] == OCP_CORE_ID) { + sbconfig_t *sb = REGS2SB(sii->curmap); + uint32 nsbba = R_SBREG(sii, &sb->sbadmatch1); + uint nsbcc; + + sii->numcores = next + 1; + + if ((nsbba & 0xfff00000) != SI_ENUM_BASE) + continue; + nsbba &= 0xfffff000; + if (_sb_coreidx(sii, nsbba) != BADIDX) + continue; + + nsbcc = (R_SBREG(sii, &sb->sbtmstatehigh) & 0x000f0000) >> 16; + nsbcc = _sb_scan(sii, sba, regs, bus + 1, nsbba, nsbcc); + if (sbba == SI_ENUM_BASE) + numcores -= nsbcc; + ncc += nsbcc; + } + } + + SI_MSG(("_sb_scan: found %u cores on bus 0x%08x\n", i, sbba)); + + sii->numcores = i + ncc; + return sii->numcores; +} + +/* scan the sb enumerated space to identify all cores */ +void +sb_scan(si_t *sih, void *regs, uint devid) +{ + si_info_t *sii; + uint32 origsba; + + sii = SI_INFO(sih); + + /* Save the current core info and validate it later till we know + * for sure what is good and what is bad. + */ + origsba = _sb_coresba(sii); + + /* scan all SB(s) starting from SI_ENUM_BASE */ + sii->numcores = _sb_scan(sii, origsba, regs, 0, SI_ENUM_BASE, 1); +} + +/* + * This function changes logical "focus" to the indicated core; + * must be called with interrupts off. + * Moreover, callers should keep interrupts off during switching out of and back to d11 core + */ +void * +sb_setcoreidx(si_t *sih, uint coreidx) +{ + si_info_t *sii; + + sii = SI_INFO(sih); + + if (coreidx >= sii->numcores) + return (NULL); + + /* + * If the user has provided an interrupt mask enabled function, + * then assert interrupts are disabled before switching the core. + */ + ASSERT((sii->intrsenabled_fn == NULL) || !(*(sii)->intrsenabled_fn)((sii)->intr_arg)); + + sii->curmap = _sb_setcoreidx(sii, coreidx); + sii->curidx = coreidx; + + return (sii->curmap); +} + +/* This function changes the logical "focus" to the indicated core. + * Return the current core's virtual address. + */ +static void * +_sb_setcoreidx(si_info_t *sii, uint coreidx) +{ + uint32 sbaddr = sii->common_info->coresba[coreidx]; + void *regs; + + switch (BUSTYPE(sii->pub.bustype)) { + case SI_BUS: + /* map new one */ + if (!sii->common_info->regs[coreidx]) { + sii->common_info->regs[coreidx] = REG_MAP(sbaddr, SI_CORE_SIZE); + ASSERT(GOODREGS(sii->common_info->regs[coreidx])); + } + regs = sii->common_info->regs[coreidx]; + break; + + case PCI_BUS: + /* point bar0 window */ + OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, sbaddr); + regs = sii->curmap; + break; + + case PCMCIA_BUS: { + uint8 tmp = (sbaddr >> 12) & 0x0f; + OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR0, &tmp, 1); + tmp = (sbaddr >> 16) & 0xff; + OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR1, &tmp, 1); + tmp = (sbaddr >> 24) & 0xff; + OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR2, &tmp, 1); + regs = sii->curmap; + break; + } + case SPI_BUS: + case SDIO_BUS: + /* map new one */ + if (!sii->common_info->regs[coreidx]) { + sii->common_info->regs[coreidx] = (void *)(uintptr)sbaddr; + ASSERT(GOODREGS(sii->common_info->regs[coreidx])); + } + regs = sii->common_info->regs[coreidx]; + break; + + + default: + ASSERT(0); + regs = NULL; + break; + } + + return regs; +} + +/* Return the address of sbadmatch0/1/2/3 register */ +static volatile uint32 * +sb_admatch(si_info_t *sii, uint asidx) +{ + sbconfig_t *sb; + volatile uint32 *addrm; + + sb = REGS2SB(sii->curmap); + + switch (asidx) { + case 0: + addrm = &sb->sbadmatch0; + break; + + case 1: + addrm = &sb->sbadmatch1; + break; + + case 2: + addrm = &sb->sbadmatch2; + break; + + case 3: + addrm = &sb->sbadmatch3; + break; + + default: + SI_ERROR(("%s: Address space index (%d) out of range\n", __FUNCTION__, asidx)); + return 0; + } + + return (addrm); +} + +/* Return the number of address spaces in current core */ +int +sb_numaddrspaces(si_t *sih) +{ + si_info_t *sii; + sbconfig_t *sb; + + sii = SI_INFO(sih); + sb = REGS2SB(sii->curmap); + + /* + 1 because of enumeration space */ + return ((R_SBREG(sii, &sb->sbidlow) & SBIDL_AR_MASK) >> SBIDL_AR_SHIFT) + 1; +} + +/* Return the address of the nth address space in the current core */ +uint32 +sb_addrspace(si_t *sih, uint asidx) +{ + si_info_t *sii; + + sii = SI_INFO(sih); + + return (sb_base(R_SBREG(sii, sb_admatch(sii, asidx)))); +} + +/* Return the size of the nth address space in the current core */ +uint32 +sb_addrspacesize(si_t *sih, uint asidx) +{ + si_info_t *sii; + + sii = SI_INFO(sih); + + return (sb_size(R_SBREG(sii, sb_admatch(sii, asidx)))); +} + + +/* do buffered registers update */ +void +sb_commit(si_t *sih) +{ + si_info_t *sii; + uint origidx; + uint intr_val = 0; + + sii = SI_INFO(sih); + + origidx = sii->curidx; + ASSERT(GOODIDX(origidx)); + + INTR_OFF(sii, intr_val); + + /* switch over to chipcommon core if there is one, else use pci */ + if (sii->pub.ccrev != NOREV) { + chipcregs_t *ccregs = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0); + + /* do the buffer registers update */ + W_REG(sii->osh, &ccregs->broadcastaddress, SB_COMMIT); + W_REG(sii->osh, &ccregs->broadcastdata, 0x0); + } else + ASSERT(0); + + /* restore core index */ + sb_setcoreidx(sih, origidx); + INTR_RESTORE(sii, intr_val); +} + +void +sb_core_disable(si_t *sih, uint32 bits) +{ + si_info_t *sii; + volatile uint32 dummy; + sbconfig_t *sb; + + sii = SI_INFO(sih); + + ASSERT(GOODREGS(sii->curmap)); + sb = REGS2SB(sii->curmap); + + /* if core is already in reset, just return */ + if (R_SBREG(sii, &sb->sbtmstatelow) & SBTML_RESET) + return; + + /* if clocks are not enabled, put into reset and return */ + if ((R_SBREG(sii, &sb->sbtmstatelow) & (SICF_CLOCK_EN << SBTML_SICF_SHIFT)) == 0) + goto disable; + + /* set target reject and spin until busy is clear (preserve core-specific bits) */ + OR_SBREG(sii, &sb->sbtmstatelow, SBTML_REJ); + dummy = R_SBREG(sii, &sb->sbtmstatelow); + OSL_DELAY(1); + SPINWAIT((R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_BUSY), 100000); + if (R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_BUSY) + SI_ERROR(("%s: target state still busy\n", __FUNCTION__)); + + if (R_SBREG(sii, &sb->sbidlow) & SBIDL_INIT) { + OR_SBREG(sii, &sb->sbimstate, SBIM_RJ); + dummy = R_SBREG(sii, &sb->sbimstate); + OSL_DELAY(1); + SPINWAIT((R_SBREG(sii, &sb->sbimstate) & SBIM_BY), 100000); + } + + /* set reset and reject while enabling the clocks */ + W_SBREG(sii, &sb->sbtmstatelow, + (((bits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) | + SBTML_REJ | SBTML_RESET)); + dummy = R_SBREG(sii, &sb->sbtmstatelow); + OSL_DELAY(10); + + /* don't forget to clear the initiator reject bit */ + if (R_SBREG(sii, &sb->sbidlow) & SBIDL_INIT) + AND_SBREG(sii, &sb->sbimstate, ~SBIM_RJ); + +disable: + /* leave reset and reject asserted */ + W_SBREG(sii, &sb->sbtmstatelow, ((bits << SBTML_SICF_SHIFT) | SBTML_REJ | SBTML_RESET)); + OSL_DELAY(1); +} + +/* reset and re-enable a core + * inputs: + * bits - core specific bits that are set during and after reset sequence + * resetbits - core specific bits that are set only during reset sequence + */ +void +sb_core_reset(si_t *sih, uint32 bits, uint32 resetbits) +{ + si_info_t *sii; + sbconfig_t *sb; + volatile uint32 dummy; + + sii = SI_INFO(sih); + ASSERT(GOODREGS(sii->curmap)); + sb = REGS2SB(sii->curmap); + + /* + * Must do the disable sequence first to work for arbitrary current core state. + */ + sb_core_disable(sih, (bits | resetbits)); + + /* + * Now do the initialization sequence. + */ + + /* set reset while enabling the clock and forcing them on throughout the core */ + W_SBREG(sii, &sb->sbtmstatelow, + (((bits | resetbits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) | + SBTML_RESET)); + dummy = R_SBREG(sii, &sb->sbtmstatelow); + OSL_DELAY(1); + + if (R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_SERR) { + W_SBREG(sii, &sb->sbtmstatehigh, 0); + } + if ((dummy = R_SBREG(sii, &sb->sbimstate)) & (SBIM_IBE | SBIM_TO)) { + AND_SBREG(sii, &sb->sbimstate, ~(SBIM_IBE | SBIM_TO)); + } + + /* clear reset and allow it to propagate throughout the core */ + W_SBREG(sii, &sb->sbtmstatelow, + ((bits | resetbits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT)); + dummy = R_SBREG(sii, &sb->sbtmstatelow); + OSL_DELAY(1); + + /* leave clock enabled */ + W_SBREG(sii, &sb->sbtmstatelow, ((bits | SICF_CLOCK_EN) << SBTML_SICF_SHIFT)); + dummy = R_SBREG(sii, &sb->sbtmstatelow); + OSL_DELAY(1); +} + +void +sb_core_tofixup(si_t *sih) +{ + si_info_t *sii; + sbconfig_t *sb; + + sii = SI_INFO(sih); + + if ((BUSTYPE(sii->pub.bustype) != PCI_BUS) || PCIE(sii) || + (PCI(sii) && (sii->pub.buscorerev >= 5))) + return; + + ASSERT(GOODREGS(sii->curmap)); + sb = REGS2SB(sii->curmap); + + if (BUSTYPE(sii->pub.bustype) == SI_BUS) { + SET_SBREG(sii, &sb->sbimconfiglow, + SBIMCL_RTO_MASK | SBIMCL_STO_MASK, + (0x5 << SBIMCL_RTO_SHIFT) | 0x3); + } else { + if (sb_coreid(sih) == PCI_CORE_ID) { + SET_SBREG(sii, &sb->sbimconfiglow, + SBIMCL_RTO_MASK | SBIMCL_STO_MASK, + (0x3 << SBIMCL_RTO_SHIFT) | 0x2); + } else { + SET_SBREG(sii, &sb->sbimconfiglow, (SBIMCL_RTO_MASK | SBIMCL_STO_MASK), 0); + } + } + + sb_commit(sih); +} + +/* + * Set the initiator timeout for the "master core". + * The master core is defined to be the core in control + * of the chip and so it issues accesses to non-memory + * locations (Because of dma *any* core can access memeory). + * + * The routine uses the bus to decide who is the master: + * SI_BUS => mips + * JTAG_BUS => chipc + * PCI_BUS => pci or pcie + * PCMCIA_BUS => pcmcia + * SDIO_BUS => pcmcia + * + * This routine exists so callers can disable initiator + * timeouts so accesses to very slow devices like otp + * won't cause an abort. The routine allows arbitrary + * settings of the service and request timeouts, though. + * + * Returns the timeout state before changing it or -1 + * on error. + */ + +#define TO_MASK (SBIMCL_RTO_MASK | SBIMCL_STO_MASK) + +uint32 +sb_set_initiator_to(si_t *sih, uint32 to, uint idx) +{ + si_info_t *sii; + uint origidx; + uint intr_val = 0; + uint32 tmp, ret = 0xffffffff; + sbconfig_t *sb; + + sii = SI_INFO(sih); + + if ((to & ~TO_MASK) != 0) + return ret; + + /* Figure out the master core */ + if (idx == BADIDX) { + switch (BUSTYPE(sii->pub.bustype)) { + case PCI_BUS: + idx = sii->pub.buscoreidx; + break; + case JTAG_BUS: + idx = SI_CC_IDX; + break; + case PCMCIA_BUS: + case SDIO_BUS: + idx = si_findcoreidx(sih, PCMCIA_CORE_ID, 0); + break; + case SI_BUS: + idx = si_findcoreidx(sih, MIPS33_CORE_ID, 0); + break; + default: + ASSERT(0); + } + if (idx == BADIDX) + return ret; + } + + INTR_OFF(sii, intr_val); + origidx = si_coreidx(sih); + + sb = REGS2SB(sb_setcoreidx(sih, idx)); + + tmp = R_SBREG(sii, &sb->sbimconfiglow); + ret = tmp & TO_MASK; + W_SBREG(sii, &sb->sbimconfiglow, (tmp & ~TO_MASK) | to); + + sb_commit(sih); + sb_setcoreidx(sih, origidx); + INTR_RESTORE(sii, intr_val); + return ret; +} + +uint32 +sb_base(uint32 admatch) +{ + uint32 base; + uint type; + + type = admatch & SBAM_TYPE_MASK; + ASSERT(type < 3); + + base = 0; + + if (type == 0) { + base = admatch & SBAM_BASE0_MASK; + } else if (type == 1) { + ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */ + base = admatch & SBAM_BASE1_MASK; + } else if (type == 2) { + ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */ + base = admatch & SBAM_BASE2_MASK; + } + + return (base); +} + +uint32 +sb_size(uint32 admatch) +{ + uint32 size; + uint type; + + type = admatch & SBAM_TYPE_MASK; + ASSERT(type < 3); + + size = 0; + + if (type == 0) { + size = 1 << (((admatch & SBAM_ADINT0_MASK) >> SBAM_ADINT0_SHIFT) + 1); + } else if (type == 1) { + ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */ + size = 1 << (((admatch & SBAM_ADINT1_MASK) >> SBAM_ADINT1_SHIFT) + 1); + } else if (type == 2) { + ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */ + size = 1 << (((admatch & SBAM_ADINT2_MASK) >> SBAM_ADINT2_SHIFT) + 1); + } + + return (size); +} diff --git a/drivers/net/wireless/bcm4329/siutils.c b/drivers/net/wireless/bcm4329/siutils.c new file mode 100644 index 000000000000..1814db0f9dd6 --- /dev/null +++ b/drivers/net/wireless/bcm4329/siutils.c @@ -0,0 +1,1527 @@ +/* + * Misc utility routines for accessing chip-specific features + * of the SiliconBackplane-based Broadcom chips. + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: siutils.c,v 1.662.4.4.4.16.4.28 2010/06/23 21:37:54 Exp $ + */ + +#include <typedefs.h> +#include <bcmdefs.h> +#include <osl.h> +#include <bcmutils.h> +#include <siutils.h> +#include <bcmdevs.h> +#include <hndsoc.h> +#include <sbchipc.h> +#include <pcicfg.h> +#include <sbpcmcia.h> +#include <sbsocram.h> +#include <bcmsdh.h> +#include <sdio.h> +#include <sbsdio.h> +#include <sbhnddma.h> +#include <sbsdpcmdev.h> +#include <bcmsdpcm.h> +#include <hndpmu.h> + +#include "siutils_priv.h" + +/* local prototypes */ +static si_info_t *si_doattach(si_info_t *sii, uint devid, osl_t *osh, void *regs, + uint bustype, void *sdh, char **vars, uint *varsz); +static bool si_buscore_prep(si_info_t *sii, uint bustype, uint devid, void *sdh); +static bool si_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype, uint32 savewin, + uint *origidx, void *regs); + + +/* global variable to indicate reservation/release of gpio's */ +static uint32 si_gpioreservation = 0; +static void *common_info_alloced = NULL; + +/* global flag to prevent shared resources from being initialized multiple times in si_attach() */ + +/* + * Allocate a si handle. + * devid - pci device id (used to determine chip#) + * osh - opaque OS handle + * regs - virtual address of initial core registers + * bustype - pci/pcmcia/sb/sdio/etc + * vars - pointer to a pointer area for "environment" variables + * varsz - pointer to int to return the size of the vars + */ +si_t * +si_attach(uint devid, osl_t *osh, void *regs, + uint bustype, void *sdh, char **vars, uint *varsz) +{ + si_info_t *sii; + + /* alloc si_info_t */ + if ((sii = MALLOC(osh, sizeof (si_info_t))) == NULL) { + SI_ERROR(("si_attach: malloc failed! malloced %d bytes\n", MALLOCED(osh))); + return (NULL); + } + + if (si_doattach(sii, devid, osh, regs, bustype, sdh, vars, varsz) == NULL) { + if (NULL != sii->common_info) + MFREE(osh, sii->common_info, sizeof(si_common_info_t)); + MFREE(osh, sii, sizeof(si_info_t)); + return (NULL); + } + sii->vars = vars ? *vars : NULL; + sii->varsz = varsz ? *varsz : 0; + + return (si_t *)sii; +} + +/* global kernel resource */ +static si_info_t ksii; + +static uint32 wd_msticks; /* watchdog timer ticks normalized to ms */ + +/* generic kernel variant of si_attach() */ +si_t * +si_kattach(osl_t *osh) +{ + static bool ksii_attached = FALSE; + + if (!ksii_attached) { + void *regs = REG_MAP(SI_ENUM_BASE, SI_CORE_SIZE); + + if (si_doattach(&ksii, BCM4710_DEVICE_ID, osh, regs, + SI_BUS, NULL, + osh != SI_OSH ? &ksii.vars : NULL, + osh != SI_OSH ? &ksii.varsz : NULL) == NULL) { + if (NULL != ksii.common_info) + MFREE(osh, ksii.common_info, sizeof(si_common_info_t)); + SI_ERROR(("si_kattach: si_doattach failed\n")); + REG_UNMAP(regs); + return NULL; + } + REG_UNMAP(regs); + + /* save ticks normalized to ms for si_watchdog_ms() */ + if (PMUCTL_ENAB(&ksii.pub)) { + /* based on 32KHz ILP clock */ + wd_msticks = 32; + } else { + wd_msticks = ALP_CLOCK / 1000; + } + + ksii_attached = TRUE; + SI_MSG(("si_kattach done. ccrev = %d, wd_msticks = %d\n", + ksii.pub.ccrev, wd_msticks)); + } + + return &ksii.pub; +} + + +static bool +si_buscore_prep(si_info_t *sii, uint bustype, uint devid, void *sdh) +{ + /* need to set memseg flag for CF card first before any sb registers access */ + if (BUSTYPE(bustype) == PCMCIA_BUS) + sii->memseg = TRUE; + + + if (BUSTYPE(bustype) == SDIO_BUS) { + int err; + uint8 clkset; + + /* Try forcing SDIO core to do ALPAvail request only */ + clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ; + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err); + if (!err) { + uint8 clkval; + + /* If register supported, wait for ALPAvail and then force ALP */ + clkval = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, NULL); + if ((clkval & ~SBSDIO_AVBITS) == clkset) { + SPINWAIT(((clkval = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, + SBSDIO_FUNC1_CHIPCLKCSR, NULL)), !SBSDIO_ALPAV(clkval)), + PMU_MAX_TRANSITION_DLY); + if (!SBSDIO_ALPAV(clkval)) { + SI_ERROR(("timeout on ALPAV wait, clkval 0x%02x\n", + clkval)); + return FALSE; + } + clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP; + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, + clkset, &err); + OSL_DELAY(65); + } + } + + /* Also, disable the extra SDIO pull-ups */ + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL); + } + + + return TRUE; +} + +static bool +si_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype, uint32 savewin, + uint *origidx, void *regs) +{ + bool pci, pcie; + uint i; + uint pciidx, pcieidx, pcirev, pcierev; + + cc = si_setcoreidx(&sii->pub, SI_CC_IDX); + ASSERT((uintptr)cc); + + /* get chipcommon rev */ + sii->pub.ccrev = (int)si_corerev(&sii->pub); + + /* get chipcommon chipstatus */ + if (sii->pub.ccrev >= 11) + sii->pub.chipst = R_REG(sii->osh, &cc->chipstatus); + + /* get chipcommon capabilites */ + sii->pub.cccaps = R_REG(sii->osh, &cc->capabilities); + + /* get pmu rev and caps */ + if (sii->pub.cccaps & CC_CAP_PMU) { + sii->pub.pmucaps = R_REG(sii->osh, &cc->pmucapabilities); + sii->pub.pmurev = sii->pub.pmucaps & PCAP_REV_MASK; + } + + SI_MSG(("Chipc: rev %d, caps 0x%x, chipst 0x%x pmurev %d, pmucaps 0x%x\n", + sii->pub.ccrev, sii->pub.cccaps, sii->pub.chipst, sii->pub.pmurev, + sii->pub.pmucaps)); + + /* figure out bus/orignal core idx */ + sii->pub.buscoretype = NODEV_CORE_ID; + sii->pub.buscorerev = NOREV; + sii->pub.buscoreidx = BADIDX; + + pci = pcie = FALSE; + pcirev = pcierev = NOREV; + pciidx = pcieidx = BADIDX; + + for (i = 0; i < sii->numcores; i++) { + uint cid, crev; + + si_setcoreidx(&sii->pub, i); + cid = si_coreid(&sii->pub); + crev = si_corerev(&sii->pub); + + /* Display cores found */ + SI_MSG(("CORE[%d]: id 0x%x rev %d base 0x%x regs 0x%p\n", + i, cid, crev, sii->common_info->coresba[i], sii->common_info->regs[i])); + + if (BUSTYPE(bustype) == PCI_BUS) { + if (cid == PCI_CORE_ID) { + pciidx = i; + pcirev = crev; + pci = TRUE; + } else if (cid == PCIE_CORE_ID) { + pcieidx = i; + pcierev = crev; + pcie = TRUE; + } + } else if ((BUSTYPE(bustype) == PCMCIA_BUS) && + (cid == PCMCIA_CORE_ID)) { + sii->pub.buscorerev = crev; + sii->pub.buscoretype = cid; + sii->pub.buscoreidx = i; + } + else if (((BUSTYPE(bustype) == SDIO_BUS) || + (BUSTYPE(bustype) == SPI_BUS)) && + ((cid == PCMCIA_CORE_ID) || + (cid == SDIOD_CORE_ID))) { + sii->pub.buscorerev = crev; + sii->pub.buscoretype = cid; + sii->pub.buscoreidx = i; + } + + /* find the core idx before entering this func. */ + if ((savewin && (savewin == sii->common_info->coresba[i])) || + (regs == sii->common_info->regs[i])) + *origidx = i; + } + + + SI_MSG(("Buscore id/type/rev %d/0x%x/%d\n", sii->pub.buscoreidx, sii->pub.buscoretype, + sii->pub.buscorerev)); + + if (BUSTYPE(sii->pub.bustype) == SI_BUS && (CHIPID(sii->pub.chip) == BCM4712_CHIP_ID) && + (sii->pub.chippkg != BCM4712LARGE_PKG_ID) && (sii->pub.chiprev <= 3)) + OR_REG(sii->osh, &cc->slow_clk_ctl, SCC_SS_XTAL); + + + /* Make sure any on-chip ARM is off (in case strapping is wrong), or downloaded code was + * already running. + */ + if ((BUSTYPE(bustype) == SDIO_BUS) || (BUSTYPE(bustype) == SPI_BUS)) { + if (si_setcore(&sii->pub, ARM7S_CORE_ID, 0) || + si_setcore(&sii->pub, ARMCM3_CORE_ID, 0)) + si_core_disable(&sii->pub, 0); + } + + /* return to the original core */ + si_setcoreidx(&sii->pub, *origidx); + + return TRUE; +} + + + +static si_info_t * +si_doattach(si_info_t *sii, uint devid, osl_t *osh, void *regs, + uint bustype, void *sdh, char **vars, uint *varsz) +{ + struct si_pub *sih = &sii->pub; + uint32 w, savewin; + chipcregs_t *cc; + char *pvars = NULL; + uint origidx; + + ASSERT(GOODREGS(regs)); + + bzero((uchar*)sii, sizeof(si_info_t)); + + + { + if (NULL == (common_info_alloced = (void *)MALLOC(osh, sizeof(si_common_info_t)))) { + SI_ERROR(("si_doattach: malloc failed! malloced %dbytes\n", MALLOCED(osh))); + return (NULL); + } + bzero((uchar*)(common_info_alloced), sizeof(si_common_info_t)); + } + sii->common_info = (si_common_info_t *)common_info_alloced; + sii->common_info->attach_count++; + + savewin = 0; + + sih->buscoreidx = BADIDX; + + sii->curmap = regs; + sii->sdh = sdh; + sii->osh = osh; + + + /* find Chipcommon address */ + if (bustype == PCI_BUS) { + savewin = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32)); + if (!GOODCOREADDR(savewin, SI_ENUM_BASE)) + savewin = SI_ENUM_BASE; + OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, SI_ENUM_BASE); + cc = (chipcregs_t *)regs; + } else + if ((bustype == SDIO_BUS) || (bustype == SPI_BUS)) { + cc = (chipcregs_t *)sii->curmap; + } else { + cc = (chipcregs_t *)REG_MAP(SI_ENUM_BASE, SI_CORE_SIZE); + } + + sih->bustype = bustype; + if (bustype != BUSTYPE(bustype)) { + SI_ERROR(("si_doattach: bus type %d does not match configured bus type %d\n", + bustype, BUSTYPE(bustype))); + return NULL; + } + + /* bus/core/clk setup for register access */ + if (!si_buscore_prep(sii, bustype, devid, sdh)) { + SI_ERROR(("si_doattach: si_core_clk_prep failed %d\n", bustype)); + return NULL; + } + + /* ChipID recognition. + * We assume we can read chipid at offset 0 from the regs arg. + * If we add other chiptypes (or if we need to support old sdio hosts w/o chipcommon), + * some way of recognizing them needs to be added here. + */ + w = R_REG(osh, &cc->chipid); + sih->socitype = (w & CID_TYPE_MASK) >> CID_TYPE_SHIFT; + /* Might as wll fill in chip id rev & pkg */ + sih->chip = w & CID_ID_MASK; + sih->chiprev = (w & CID_REV_MASK) >> CID_REV_SHIFT; + sih->chippkg = (w & CID_PKG_MASK) >> CID_PKG_SHIFT; + if ((CHIPID(sih->chip) == BCM4329_CHIP_ID) && (sih->chippkg != BCM4329_289PIN_PKG_ID)) + sih->chippkg = BCM4329_182PIN_PKG_ID; + sih->issim = IS_SIM(sih->chippkg); + + /* scan for cores */ + if (CHIPTYPE(sii->pub.socitype) == SOCI_SB) { + SI_MSG(("Found chip type SB (0x%08x)\n", w)); + sb_scan(&sii->pub, regs, devid); + } else if (CHIPTYPE(sii->pub.socitype) == SOCI_AI) { + SI_MSG(("Found chip type AI (0x%08x)\n", w)); + /* pass chipc address instead of original core base */ + ai_scan(&sii->pub, (void *)cc, devid); + } else { + SI_ERROR(("Found chip of unkown type (0x%08x)\n", w)); + return NULL; + } + /* no cores found, bail out */ + if (sii->numcores == 0) { + SI_ERROR(("si_doattach: could not find any cores\n")); + return NULL; + } + /* bus/core/clk setup */ + origidx = SI_CC_IDX; + if (!si_buscore_setup(sii, cc, bustype, savewin, &origidx, regs)) { + SI_ERROR(("si_doattach: si_buscore_setup failed\n")); + return NULL; + } + + pvars = NULL; + + + + if (sii->pub.ccrev >= 20) { + cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0); + W_REG(osh, &cc->gpiopullup, 0); + W_REG(osh, &cc->gpiopulldown, 0); + si_setcoreidx(sih, origidx); + } + + /* Skip PMU initialization from the Dongle Host. + * Firmware will take care of it when it comes up. + */ + + + + return (sii); +} + +/* may be called with core in reset */ +void +si_detach(si_t *sih) +{ + si_info_t *sii; + uint idx; + + sii = SI_INFO(sih); + + if (sii == NULL) + return; + + if (BUSTYPE(sih->bustype) == SI_BUS) + for (idx = 0; idx < SI_MAXCORES; idx++) + if (sii->common_info->regs[idx]) { + REG_UNMAP(sii->common_info->regs[idx]); + sii->common_info->regs[idx] = NULL; + } + + + if (1 == sii->common_info->attach_count--) { + MFREE(sii->osh, sii->common_info, sizeof(si_common_info_t)); + common_info_alloced = NULL; + } + +#if !defined(BCMBUSTYPE) || (BCMBUSTYPE == SI_BUS) + if (sii != &ksii) +#endif /* !BCMBUSTYPE || (BCMBUSTYPE == SI_BUS) */ + MFREE(sii->osh, sii, sizeof(si_info_t)); +} + +void * +si_osh(si_t *sih) +{ + si_info_t *sii; + + sii = SI_INFO(sih); + return sii->osh; +} + +void +si_setosh(si_t *sih, osl_t *osh) +{ + si_info_t *sii; + + sii = SI_INFO(sih); + if (sii->osh != NULL) { + SI_ERROR(("osh is already set....\n")); + ASSERT(!sii->osh); + } + sii->osh = osh; +} + +/* register driver interrupt disabling and restoring callback functions */ +void +si_register_intr_callback(si_t *sih, void *intrsoff_fn, void *intrsrestore_fn, + void *intrsenabled_fn, void *intr_arg) +{ + si_info_t *sii; + + sii = SI_INFO(sih); + sii->intr_arg = intr_arg; + sii->intrsoff_fn = (si_intrsoff_t)intrsoff_fn; + sii->intrsrestore_fn = (si_intrsrestore_t)intrsrestore_fn; + sii->intrsenabled_fn = (si_intrsenabled_t)intrsenabled_fn; + /* save current core id. when this function called, the current core + * must be the core which provides driver functions(il, et, wl, etc.) + */ + sii->dev_coreid = sii->common_info->coreid[sii->curidx]; +} + +void +si_deregister_intr_callback(si_t *sih) +{ + si_info_t *sii; + + sii = SI_INFO(sih); + sii->intrsoff_fn = NULL; +} + +uint +si_intflag(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + if (CHIPTYPE(sih->socitype) == SOCI_SB) { + sbconfig_t *ccsbr = (sbconfig_t *)((uintptr)((ulong) + (sii->common_info->coresba[SI_CC_IDX]) + SBCONFIGOFF)); + return R_REG(sii->osh, &ccsbr->sbflagst); + } else if (CHIPTYPE(sih->socitype) == SOCI_AI) + return R_REG(sii->osh, ((uint32 *)(uintptr) + (sii->common_info->oob_router + OOB_STATUSA))); + else { + ASSERT(0); + return 0; + } +} + +uint +si_flag(si_t *sih) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + return sb_flag(sih); + else if (CHIPTYPE(sih->socitype) == SOCI_AI) + return ai_flag(sih); + else { + ASSERT(0); + return 0; + } +} + +void +si_setint(si_t *sih, int siflag) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + sb_setint(sih, siflag); + else if (CHIPTYPE(sih->socitype) == SOCI_AI) + ai_setint(sih, siflag); + else + ASSERT(0); +} + +uint +si_coreid(si_t *sih) +{ + si_info_t *sii; + + sii = SI_INFO(sih); + return sii->common_info->coreid[sii->curidx]; +} + +uint +si_coreidx(si_t *sih) +{ + si_info_t *sii; + + sii = SI_INFO(sih); + return sii->curidx; +} + +/* return the core-type instantiation # of the current core */ +uint +si_coreunit(si_t *sih) +{ + si_info_t *sii; + uint idx; + uint coreid; + uint coreunit; + uint i; + + sii = SI_INFO(sih); + coreunit = 0; + + idx = sii->curidx; + + ASSERT(GOODREGS(sii->curmap)); + coreid = si_coreid(sih); + + /* count the cores of our type */ + for (i = 0; i < idx; i++) + if (sii->common_info->coreid[i] == coreid) + coreunit++; + + return (coreunit); +} + +uint +si_corevendor(si_t *sih) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + return sb_corevendor(sih); + else if (CHIPTYPE(sih->socitype) == SOCI_AI) + return ai_corevendor(sih); + else { + ASSERT(0); + return 0; + } +} + +bool +si_backplane64(si_t *sih) +{ + return ((sih->cccaps & CC_CAP_BKPLN64) != 0); +} + +uint +si_corerev(si_t *sih) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + return sb_corerev(sih); + else if (CHIPTYPE(sih->socitype) == SOCI_AI) + return ai_corerev(sih); + else { + ASSERT(0); + return 0; + } +} + +/* return index of coreid or BADIDX if not found */ +uint +si_findcoreidx(si_t *sih, uint coreid, uint coreunit) +{ + si_info_t *sii; + uint found; + uint i; + + sii = SI_INFO(sih); + + found = 0; + + for (i = 0; i < sii->numcores; i++) + if (sii->common_info->coreid[i] == coreid) { + if (found == coreunit) + return (i); + found++; + } + + return (BADIDX); +} + +/* return list of found cores */ +uint +si_corelist(si_t *sih, uint coreid[]) +{ + si_info_t *sii; + + sii = SI_INFO(sih); + + bcopy((uchar*)sii->common_info->coreid, (uchar*)coreid, (sii->numcores * sizeof(uint))); + return (sii->numcores); +} + +/* return current register mapping */ +void * +si_coreregs(si_t *sih) +{ + si_info_t *sii; + + sii = SI_INFO(sih); + ASSERT(GOODREGS(sii->curmap)); + + return (sii->curmap); +} + +/* + * This function changes logical "focus" to the indicated core; + * must be called with interrupts off. + * Moreover, callers should keep interrupts off during switching out of and back to d11 core + */ +void * +si_setcore(si_t *sih, uint coreid, uint coreunit) +{ + uint idx; + + idx = si_findcoreidx(sih, coreid, coreunit); + if (!GOODIDX(idx)) + return (NULL); + + if (CHIPTYPE(sih->socitype) == SOCI_SB) + return sb_setcoreidx(sih, idx); + else if (CHIPTYPE(sih->socitype) == SOCI_AI) + return ai_setcoreidx(sih, idx); + else { + ASSERT(0); + return NULL; + } +} + +void * +si_setcoreidx(si_t *sih, uint coreidx) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + return sb_setcoreidx(sih, coreidx); + else if (CHIPTYPE(sih->socitype) == SOCI_AI) + return ai_setcoreidx(sih, coreidx); + else { + ASSERT(0); + return NULL; + } +} + +/* Turn off interrupt as required by sb_setcore, before switch core */ +void *si_switch_core(si_t *sih, uint coreid, uint *origidx, uint *intr_val) +{ + void *cc; + si_info_t *sii; + + sii = SI_INFO(sih); + + INTR_OFF(sii, *intr_val); + *origidx = sii->curidx; + cc = si_setcore(sih, coreid, 0); + ASSERT(cc != NULL); + + return cc; +} + +/* restore coreidx and restore interrupt */ +void si_restore_core(si_t *sih, uint coreid, uint intr_val) +{ + si_info_t *sii; + + sii = SI_INFO(sih); + + si_setcoreidx(sih, coreid); + INTR_RESTORE(sii, intr_val); +} + +int +si_numaddrspaces(si_t *sih) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + return sb_numaddrspaces(sih); + else if (CHIPTYPE(sih->socitype) == SOCI_AI) + return ai_numaddrspaces(sih); + else { + ASSERT(0); + return 0; + } +} + +uint32 +si_addrspace(si_t *sih, uint asidx) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + return sb_addrspace(sih, asidx); + else if (CHIPTYPE(sih->socitype) == SOCI_AI) + return ai_addrspace(sih, asidx); + else { + ASSERT(0); + return 0; + } +} + +uint32 +si_addrspacesize(si_t *sih, uint asidx) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + return sb_addrspacesize(sih, asidx); + else if (CHIPTYPE(sih->socitype) == SOCI_AI) + return ai_addrspacesize(sih, asidx); + else { + ASSERT(0); + return 0; + } +} + +uint32 +si_core_cflags(si_t *sih, uint32 mask, uint32 val) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + return sb_core_cflags(sih, mask, val); + else if (CHIPTYPE(sih->socitype) == SOCI_AI) + return ai_core_cflags(sih, mask, val); + else { + ASSERT(0); + return 0; + } +} + +void +si_core_cflags_wo(si_t *sih, uint32 mask, uint32 val) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + sb_core_cflags_wo(sih, mask, val); + else if (CHIPTYPE(sih->socitype) == SOCI_AI) + ai_core_cflags_wo(sih, mask, val); + else + ASSERT(0); +} + +uint32 +si_core_sflags(si_t *sih, uint32 mask, uint32 val) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + return sb_core_sflags(sih, mask, val); + else if (CHIPTYPE(sih->socitype) == SOCI_AI) + return ai_core_sflags(sih, mask, val); + else { + ASSERT(0); + return 0; + } +} + +bool +si_iscoreup(si_t *sih) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + return sb_iscoreup(sih); + else if (CHIPTYPE(sih->socitype) == SOCI_AI) + return ai_iscoreup(sih); + else { + ASSERT(0); + return FALSE; + } +} + +void +si_write_wrapperreg(si_t *sih, uint32 offset, uint32 val) +{ + /* only for 4319, no requirement for SOCI_SB */ + if (CHIPTYPE(sih->socitype) == SOCI_AI) { + ai_write_wrap_reg(sih, offset, val); + } + else + return; + + return; +} + +uint +si_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + return sb_corereg(sih, coreidx, regoff, mask, val); + else if (CHIPTYPE(sih->socitype) == SOCI_AI) + return ai_corereg(sih, coreidx, regoff, mask, val); + else { + ASSERT(0); + return 0; + } +} + +void +si_core_disable(si_t *sih, uint32 bits) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + sb_core_disable(sih, bits); + else if (CHIPTYPE(sih->socitype) == SOCI_AI) + ai_core_disable(sih, bits); +} + +void +si_core_reset(si_t *sih, uint32 bits, uint32 resetbits) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + sb_core_reset(sih, bits, resetbits); + else if (CHIPTYPE(sih->socitype) == SOCI_AI) + ai_core_reset(sih, bits, resetbits); +} + +void +si_core_tofixup(si_t *sih) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + sb_core_tofixup(sih); +} + +/* Run bist on current core. Caller needs to take care of core-specific bist hazards */ +int +si_corebist(si_t *sih) +{ + uint32 cflags; + int result = 0; + + /* Read core control flags */ + cflags = si_core_cflags(sih, 0, 0); + + /* Set bist & fgc */ + si_core_cflags(sih, 0, (SICF_BIST_EN | SICF_FGC)); + + /* Wait for bist done */ + SPINWAIT(((si_core_sflags(sih, 0, 0) & SISF_BIST_DONE) == 0), 100000); + + if (si_core_sflags(sih, 0, 0) & SISF_BIST_ERROR) + result = BCME_ERROR; + + /* Reset core control flags */ + si_core_cflags(sih, 0xffff, cflags); + + return result; +} + +static uint32 +factor6(uint32 x) +{ + switch (x) { + case CC_F6_2: return 2; + case CC_F6_3: return 3; + case CC_F6_4: return 4; + case CC_F6_5: return 5; + case CC_F6_6: return 6; + case CC_F6_7: return 7; + default: return 0; + } +} + +/* calculate the speed the SI would run at given a set of clockcontrol values */ +uint32 +si_clock_rate(uint32 pll_type, uint32 n, uint32 m) +{ + uint32 n1, n2, clock, m1, m2, m3, mc; + + n1 = n & CN_N1_MASK; + n2 = (n & CN_N2_MASK) >> CN_N2_SHIFT; + + if (pll_type == PLL_TYPE6) { + if (m & CC_T6_MMASK) + return CC_T6_M1; + else + return CC_T6_M0; + } else if ((pll_type == PLL_TYPE1) || + (pll_type == PLL_TYPE3) || + (pll_type == PLL_TYPE4) || + (pll_type == PLL_TYPE7)) { + n1 = factor6(n1); + n2 += CC_F5_BIAS; + } else if (pll_type == PLL_TYPE2) { + n1 += CC_T2_BIAS; + n2 += CC_T2_BIAS; + ASSERT((n1 >= 2) && (n1 <= 7)); + ASSERT((n2 >= 5) && (n2 <= 23)); + } else if (pll_type == PLL_TYPE5) { + return (100000000); + } else + ASSERT(0); + /* PLL types 3 and 7 use BASE2 (25Mhz) */ + if ((pll_type == PLL_TYPE3) || + (pll_type == PLL_TYPE7)) { + clock = CC_CLOCK_BASE2 * n1 * n2; + } else + clock = CC_CLOCK_BASE1 * n1 * n2; + + if (clock == 0) + return 0; + + m1 = m & CC_M1_MASK; + m2 = (m & CC_M2_MASK) >> CC_M2_SHIFT; + m3 = (m & CC_M3_MASK) >> CC_M3_SHIFT; + mc = (m & CC_MC_MASK) >> CC_MC_SHIFT; + + if ((pll_type == PLL_TYPE1) || + (pll_type == PLL_TYPE3) || + (pll_type == PLL_TYPE4) || + (pll_type == PLL_TYPE7)) { + m1 = factor6(m1); + if ((pll_type == PLL_TYPE1) || (pll_type == PLL_TYPE3)) + m2 += CC_F5_BIAS; + else + m2 = factor6(m2); + m3 = factor6(m3); + + switch (mc) { + case CC_MC_BYPASS: return (clock); + case CC_MC_M1: return (clock / m1); + case CC_MC_M1M2: return (clock / (m1 * m2)); + case CC_MC_M1M2M3: return (clock / (m1 * m2 * m3)); + case CC_MC_M1M3: return (clock / (m1 * m3)); + default: return (0); + } + } else { + ASSERT(pll_type == PLL_TYPE2); + + m1 += CC_T2_BIAS; + m2 += CC_T2M2_BIAS; + m3 += CC_T2_BIAS; + ASSERT((m1 >= 2) && (m1 <= 7)); + ASSERT((m2 >= 3) && (m2 <= 10)); + ASSERT((m3 >= 2) && (m3 <= 7)); + + if ((mc & CC_T2MC_M1BYP) == 0) + clock /= m1; + if ((mc & CC_T2MC_M2BYP) == 0) + clock /= m2; + if ((mc & CC_T2MC_M3BYP) == 0) + clock /= m3; + + return (clock); + } +} + + +/* set chip watchdog reset timer to fire in 'ticks' */ +void +si_watchdog(si_t *sih, uint ticks) +{ + if (PMUCTL_ENAB(sih)) { + + if ((sih->chip == BCM4319_CHIP_ID) && (sih->chiprev == 0) && (ticks != 0)) { + si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, clk_ctl_st), ~0, 0x2); + si_setcore(sih, USB20D_CORE_ID, 0); + si_core_disable(sih, 1); + si_setcore(sih, CC_CORE_ID, 0); + } + + if (ticks == 1) + ticks = 2; + si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, pmuwatchdog), ~0, ticks); + } else { + /* instant NMI */ + si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, watchdog), ~0, ticks); + } +} + +#if !defined(BCMBUSTYPE) || (BCMBUSTYPE == SI_BUS) +/* trigger watchdog reset after ms milliseconds */ +void +si_watchdog_ms(si_t *sih, uint32 ms) +{ + si_info_t *sii; + + sii = SI_INFO(sih); + + si_watchdog(sih, wd_msticks * ms); +} +#endif + + + +/* initialize the sdio core */ +void +si_sdio_init(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + + if (((sih->buscoretype == PCMCIA_CORE_ID) && (sih->buscorerev >= 8)) || + (sih->buscoretype == SDIOD_CORE_ID)) { + uint idx; + sdpcmd_regs_t *sdpregs; + + /* get the current core index */ + idx = sii->curidx; + ASSERT(idx == si_findcoreidx(sih, D11_CORE_ID, 0)); + + /* switch to sdio core */ + if (!(sdpregs = (sdpcmd_regs_t *)si_setcore(sih, PCMCIA_CORE_ID, 0))) + sdpregs = (sdpcmd_regs_t *)si_setcore(sih, SDIOD_CORE_ID, 0); + ASSERT(sdpregs); + + SI_MSG(("si_sdio_init: For PCMCIA/SDIO Corerev %d, enable ints from core %d " + "through SD core %d (%p)\n", + sih->buscorerev, idx, sii->curidx, sdpregs)); + + /* enable backplane error and core interrupts */ + W_REG(sii->osh, &sdpregs->hostintmask, I_SBINT); + W_REG(sii->osh, &sdpregs->sbintmask, (I_SB_SERR | I_SB_RESPERR | (1 << idx))); + + /* switch back to previous core */ + si_setcoreidx(sih, idx); + } + + /* enable interrupts */ + bcmsdh_intr_enable(sii->sdh); + +} + + +/* change logical "focus" to the gpio core for optimized access */ +void * +si_gpiosetcore(si_t *sih) +{ + return (si_setcoreidx(sih, SI_CC_IDX)); +} + +/* mask&set gpiocontrol bits */ +uint32 +si_gpiocontrol(si_t *sih, uint32 mask, uint32 val, uint8 priority) +{ + uint regoff; + + regoff = 0; + + /* gpios could be shared on router platforms + * ignore reservation if it's high priority (e.g., test apps) + */ + if ((priority != GPIO_HI_PRIORITY) && + (BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) { + mask = priority ? (si_gpioreservation & mask) : + ((si_gpioreservation | mask) & ~(si_gpioreservation)); + val &= mask; + } + + regoff = OFFSETOF(chipcregs_t, gpiocontrol); + return (si_corereg(sih, SI_CC_IDX, regoff, mask, val)); +} + +/* mask&set gpio output enable bits */ +uint32 +si_gpioouten(si_t *sih, uint32 mask, uint32 val, uint8 priority) +{ + uint regoff; + + regoff = 0; + + /* gpios could be shared on router platforms + * ignore reservation if it's high priority (e.g., test apps) + */ + if ((priority != GPIO_HI_PRIORITY) && + (BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) { + mask = priority ? (si_gpioreservation & mask) : + ((si_gpioreservation | mask) & ~(si_gpioreservation)); + val &= mask; + } + + regoff = OFFSETOF(chipcregs_t, gpioouten); + return (si_corereg(sih, SI_CC_IDX, regoff, mask, val)); +} + +/* mask&set gpio output bits */ +uint32 +si_gpioout(si_t *sih, uint32 mask, uint32 val, uint8 priority) +{ + uint regoff; + + regoff = 0; + + /* gpios could be shared on router platforms + * ignore reservation if it's high priority (e.g., test apps) + */ + if ((priority != GPIO_HI_PRIORITY) && + (BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) { + mask = priority ? (si_gpioreservation & mask) : + ((si_gpioreservation | mask) & ~(si_gpioreservation)); + val &= mask; + } + + regoff = OFFSETOF(chipcregs_t, gpioout); + return (si_corereg(sih, SI_CC_IDX, regoff, mask, val)); +} + +/* reserve one gpio */ +uint32 +si_gpioreserve(si_t *sih, uint32 gpio_bitmask, uint8 priority) +{ + si_info_t *sii; + + sii = SI_INFO(sih); + + /* only cores on SI_BUS share GPIO's and only applcation users need to + * reserve/release GPIO + */ + if ((BUSTYPE(sih->bustype) != SI_BUS) || (!priority)) { + ASSERT((BUSTYPE(sih->bustype) == SI_BUS) && (priority)); + return -1; + } + /* make sure only one bit is set */ + if ((!gpio_bitmask) || ((gpio_bitmask) & (gpio_bitmask - 1))) { + ASSERT((gpio_bitmask) && !((gpio_bitmask) & (gpio_bitmask - 1))); + return -1; + } + + /* already reserved */ + if (si_gpioreservation & gpio_bitmask) + return -1; + /* set reservation */ + si_gpioreservation |= gpio_bitmask; + + return si_gpioreservation; +} + +/* release one gpio */ +/* + * releasing the gpio doesn't change the current value on the GPIO last write value + * persists till some one overwrites it + */ + +uint32 +si_gpiorelease(si_t *sih, uint32 gpio_bitmask, uint8 priority) +{ + si_info_t *sii; + + sii = SI_INFO(sih); + + /* only cores on SI_BUS share GPIO's and only applcation users need to + * reserve/release GPIO + */ + if ((BUSTYPE(sih->bustype) != SI_BUS) || (!priority)) { + ASSERT((BUSTYPE(sih->bustype) == SI_BUS) && (priority)); + return -1; + } + /* make sure only one bit is set */ + if ((!gpio_bitmask) || ((gpio_bitmask) & (gpio_bitmask - 1))) { + ASSERT((gpio_bitmask) && !((gpio_bitmask) & (gpio_bitmask - 1))); + return -1; + } + + /* already released */ + if (!(si_gpioreservation & gpio_bitmask)) + return -1; + + /* clear reservation */ + si_gpioreservation &= ~gpio_bitmask; + + return si_gpioreservation; +} + +/* return the current gpioin register value */ +uint32 +si_gpioin(si_t *sih) +{ + si_info_t *sii; + uint regoff; + + sii = SI_INFO(sih); + regoff = 0; + + regoff = OFFSETOF(chipcregs_t, gpioin); + return (si_corereg(sih, SI_CC_IDX, regoff, 0, 0)); +} + +/* mask&set gpio interrupt polarity bits */ +uint32 +si_gpiointpolarity(si_t *sih, uint32 mask, uint32 val, uint8 priority) +{ + si_info_t *sii; + uint regoff; + + sii = SI_INFO(sih); + regoff = 0; + + /* gpios could be shared on router platforms */ + if ((BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) { + mask = priority ? (si_gpioreservation & mask) : + ((si_gpioreservation | mask) & ~(si_gpioreservation)); + val &= mask; + } + + regoff = OFFSETOF(chipcregs_t, gpiointpolarity); + return (si_corereg(sih, SI_CC_IDX, regoff, mask, val)); +} + +/* mask&set gpio interrupt mask bits */ +uint32 +si_gpiointmask(si_t *sih, uint32 mask, uint32 val, uint8 priority) +{ + si_info_t *sii; + uint regoff; + + sii = SI_INFO(sih); + regoff = 0; + + /* gpios could be shared on router platforms */ + if ((BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) { + mask = priority ? (si_gpioreservation & mask) : + ((si_gpioreservation | mask) & ~(si_gpioreservation)); + val &= mask; + } + + regoff = OFFSETOF(chipcregs_t, gpiointmask); + return (si_corereg(sih, SI_CC_IDX, regoff, mask, val)); +} + +/* assign the gpio to an led */ +uint32 +si_gpioled(si_t *sih, uint32 mask, uint32 val) +{ + si_info_t *sii; + + sii = SI_INFO(sih); + if (sih->ccrev < 16) + return -1; + + /* gpio led powersave reg */ + return (si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, gpiotimeroutmask), mask, val)); +} + +/* mask&set gpio timer val */ +uint32 +si_gpiotimerval(si_t *sih, uint32 mask, uint32 gpiotimerval) +{ + si_info_t *sii; + + sii = SI_INFO(sih); + + if (sih->ccrev < 16) + return -1; + + return (si_corereg(sih, SI_CC_IDX, + OFFSETOF(chipcregs_t, gpiotimerval), mask, gpiotimerval)); +} + +uint32 +si_gpiopull(si_t *sih, bool updown, uint32 mask, uint32 val) +{ + si_info_t *sii; + uint offs; + + sii = SI_INFO(sih); + if (sih->ccrev < 20) + return -1; + + offs = (updown ? OFFSETOF(chipcregs_t, gpiopulldown) : OFFSETOF(chipcregs_t, gpiopullup)); + return (si_corereg(sih, SI_CC_IDX, offs, mask, val)); +} + +uint32 +si_gpioevent(si_t *sih, uint regtype, uint32 mask, uint32 val) +{ + si_info_t *sii; + uint offs; + + sii = SI_INFO(sih); + if (sih->ccrev < 11) + return -1; + + if (regtype == GPIO_REGEVT) + offs = OFFSETOF(chipcregs_t, gpioevent); + else if (regtype == GPIO_REGEVT_INTMSK) + offs = OFFSETOF(chipcregs_t, gpioeventintmask); + else if (regtype == GPIO_REGEVT_INTPOL) + offs = OFFSETOF(chipcregs_t, gpioeventintpolarity); + else + return -1; + + return (si_corereg(sih, SI_CC_IDX, offs, mask, val)); +} + +void * +si_gpio_handler_register(si_t *sih, uint32 event, + bool level, gpio_handler_t cb, void *arg) +{ + si_info_t *sii; + gpioh_item_t *gi; + + ASSERT(event); + ASSERT(cb != NULL); + + sii = SI_INFO(sih); + if (sih->ccrev < 11) + return NULL; + + if ((gi = MALLOC(sii->osh, sizeof(gpioh_item_t))) == NULL) + return NULL; + + bzero(gi, sizeof(gpioh_item_t)); + gi->event = event; + gi->handler = cb; + gi->arg = arg; + gi->level = level; + + gi->next = sii->gpioh_head; + sii->gpioh_head = gi; + + return (void *)(gi); +} + +void +si_gpio_handler_unregister(si_t *sih, void *gpioh) +{ + si_info_t *sii; + gpioh_item_t *p, *n; + + sii = SI_INFO(sih); + if (sih->ccrev < 11) + return; + + ASSERT(sii->gpioh_head != NULL); + if ((void*)sii->gpioh_head == gpioh) { + sii->gpioh_head = sii->gpioh_head->next; + MFREE(sii->osh, gpioh, sizeof(gpioh_item_t)); + return; + } else { + p = sii->gpioh_head; + n = p->next; + while (n) { + if ((void*)n == gpioh) { + p->next = n->next; + MFREE(sii->osh, gpioh, sizeof(gpioh_item_t)); + return; + } + p = n; + n = n->next; + } + } + + ASSERT(0); /* Not found in list */ +} + +void +si_gpio_handler_process(si_t *sih) +{ + si_info_t *sii; + gpioh_item_t *h; + uint32 status; + uint32 level = si_gpioin(sih); + uint32 edge = si_gpioevent(sih, GPIO_REGEVT, 0, 0); + + sii = SI_INFO(sih); + for (h = sii->gpioh_head; h != NULL; h = h->next) { + if (h->handler) { + status = (h->level ? level : edge); + + if (status & h->event) + h->handler(status, h->arg); + } + } + + si_gpioevent(sih, GPIO_REGEVT, edge, edge); /* clear edge-trigger status */ +} + +uint32 +si_gpio_int_enable(si_t *sih, bool enable) +{ + si_info_t *sii; + uint offs; + + sii = SI_INFO(sih); + if (sih->ccrev < 11) + return -1; + + offs = OFFSETOF(chipcregs_t, intmask); + return (si_corereg(sih, SI_CC_IDX, offs, CI_GPIO, (enable ? CI_GPIO : 0))); +} + + +/* Return the RAM size of the SOCRAM core */ +uint32 +si_socram_size(si_t *sih) +{ + si_info_t *sii; + uint origidx; + uint intr_val = 0; + + sbsocramregs_t *regs; + bool wasup; + uint corerev; + uint32 coreinfo; + uint memsize = 0; + + sii = SI_INFO(sih); + + /* Block ints and save current core */ + INTR_OFF(sii, intr_val); + origidx = si_coreidx(sih); + + /* Switch to SOCRAM core */ + if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0))) + goto done; + + /* Get info for determining size */ + if (!(wasup = si_iscoreup(sih))) + si_core_reset(sih, 0, 0); + corerev = si_corerev(sih); + coreinfo = R_REG(sii->osh, ®s->coreinfo); + + /* Calculate size from coreinfo based on rev */ + if (corerev == 0) + memsize = 1 << (16 + (coreinfo & SRCI_MS0_MASK)); + else if (corerev < 3) { + memsize = 1 << (SR_BSZ_BASE + (coreinfo & SRCI_SRBSZ_MASK)); + memsize *= (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT; + } else { + uint nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT; + uint bsz = (coreinfo & SRCI_SRBSZ_MASK); + uint lss = (coreinfo & SRCI_LSS_MASK) >> SRCI_LSS_SHIFT; + if (lss != 0) + nb --; + memsize = nb * (1 << (bsz + SR_BSZ_BASE)); + if (lss != 0) + memsize += (1 << ((lss - 1) + SR_BSZ_BASE)); + } + + /* Return to previous state and core */ + if (!wasup) + si_core_disable(sih, 0); + si_setcoreidx(sih, origidx); + +done: + INTR_RESTORE(sii, intr_val); + + return memsize; +} + + +void +si_btcgpiowar(si_t *sih) +{ + si_info_t *sii; + uint origidx; + uint intr_val = 0; + chipcregs_t *cc; + + sii = SI_INFO(sih); + + /* Make sure that there is ChipCommon core present && + * UART_TX is strapped to 1 + */ + if (!(sih->cccaps & CC_CAP_UARTGPIO)) + return; + + /* si_corereg cannot be used as we have to guarantee 8-bit read/writes */ + INTR_OFF(sii, intr_val); + + origidx = si_coreidx(sih); + + cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0); + ASSERT(cc != NULL); + + W_REG(sii->osh, &cc->uart0mcr, R_REG(sii->osh, &cc->uart0mcr) | 0x04); + + /* restore the original index */ + si_setcoreidx(sih, origidx); + + INTR_RESTORE(sii, intr_val); +} + +/* check if the device is removed */ +bool +si_deviceremoved(si_t *sih) +{ + uint32 w; + si_info_t *sii; + + sii = SI_INFO(sih); + + switch (BUSTYPE(sih->bustype)) { + case PCI_BUS: + ASSERT(sii->osh != NULL); + w = OSL_PCI_READ_CONFIG(sii->osh, PCI_CFG_VID, sizeof(uint32)); + if ((w & 0xFFFF) != VENDOR_BROADCOM) + return TRUE; + else + return FALSE; + default: + return FALSE; + } + return FALSE; +} diff --git a/drivers/net/wireless/bcm4329/siutils_priv.h b/drivers/net/wireless/bcm4329/siutils_priv.h new file mode 100644 index 000000000000..e8ad7e50958a --- /dev/null +++ b/drivers/net/wireless/bcm4329/siutils_priv.h @@ -0,0 +1,213 @@ +/* + * Include file private to the SOC Interconnect support files. + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: siutils_priv.h,v 1.3.10.5.4.2 2009/09/22 13:28:16 Exp $ + */ + +#ifndef _siutils_priv_h_ +#define _siutils_priv_h_ + +/* debug/trace */ +#define SI_ERROR(args) + +#define SI_MSG(args) + +#define IS_SIM(chippkg) ((chippkg == HDLSIM_PKG_ID) || (chippkg == HWSIM_PKG_ID)) + +typedef uint32 (*si_intrsoff_t)(void *intr_arg); +typedef void (*si_intrsrestore_t)(void *intr_arg, uint32 arg); +typedef bool (*si_intrsenabled_t)(void *intr_arg); + +typedef struct gpioh_item { + void *arg; + bool level; + gpio_handler_t handler; + uint32 event; + struct gpioh_item *next; +} gpioh_item_t; + +/* misc si info needed by some of the routines */ +typedef struct si_common_info { + void *regs[SI_MAXCORES]; /* other regs va */ + void *regs2[SI_MAXCORES]; /* va of each core second register set (usbh20) */ + uint coreid[SI_MAXCORES]; /* id of each core */ + uint32 cia[SI_MAXCORES]; /* erom cia entry for each core */ + uint32 cib[SI_MAXCORES]; /* erom cia entry for each core */ + uint32 coresba_size[SI_MAXCORES]; /* backplane address space size */ + uint32 coresba2_size[SI_MAXCORES]; /* second address space size */ + uint32 coresba[SI_MAXCORES]; /* backplane address of each core */ + uint32 coresba2[SI_MAXCORES]; /* address of each core second register set (usbh20) */ + void *wrappers[SI_MAXCORES]; /* other cores wrapper va */ + uint32 wrapba[SI_MAXCORES]; /* address of controlling wrapper */ + uint32 oob_router; /* oob router registers for axi */ + uint8 attach_count; +} si_common_info_t; + +typedef struct si_info { + struct si_pub pub; /* back plane public state (must be first field) */ + + void *osh; /* osl os handle */ + void *sdh; /* bcmsdh handle */ + void *pch; /* PCI/E core handle */ + uint dev_coreid; /* the core provides driver functions */ + void *intr_arg; /* interrupt callback function arg */ + si_intrsoff_t intrsoff_fn; /* turns chip interrupts off */ + si_intrsrestore_t intrsrestore_fn; /* restore chip interrupts */ + si_intrsenabled_t intrsenabled_fn; /* check if interrupts are enabled */ + + + gpioh_item_t *gpioh_head; /* GPIO event handlers list */ + + bool memseg; /* flag to toggle MEM_SEG register */ + + char *vars; + uint varsz; + + void *curmap; /* current regs va */ + + uint curidx; /* current core index */ + uint numcores; /* # discovered cores */ + void *curwrap; /* current wrapper va */ + si_common_info_t *common_info; /* Common information for all the cores in a chip */ +} si_info_t; + +#define SI_INFO(sih) (si_info_t *)(uintptr)sih + +#define GOODCOREADDR(x, b) (((x) >= (b)) && ((x) < ((b) + SI_MAXCORES * SI_CORE_SIZE)) && \ + ISALIGNED((x), SI_CORE_SIZE)) +#define GOODREGS(regs) ((regs) != NULL && ISALIGNED((uintptr)(regs), SI_CORE_SIZE)) +#define BADCOREADDR 0 +#define GOODIDX(idx) (((uint)idx) < SI_MAXCORES) +#define BADIDX (SI_MAXCORES + 1) +#define NOREV -1 /* Invalid rev */ + +#define PCI(si) ((BUSTYPE((si)->pub.bustype) == PCI_BUS) && \ + ((si)->pub.buscoretype == PCI_CORE_ID)) +#define PCIE(si) ((BUSTYPE((si)->pub.bustype) == PCI_BUS) && \ + ((si)->pub.buscoretype == PCIE_CORE_ID)) +#define PCMCIA(si) ((BUSTYPE((si)->pub.bustype) == PCMCIA_BUS) && ((si)->memseg == TRUE)) + +/* Newer chips can access PCI/PCIE and CC core without requiring to change + * PCI BAR0 WIN + */ +#define SI_FAST(si) (((si)->pub.buscoretype == PCIE_CORE_ID) || \ + (((si)->pub.buscoretype == PCI_CORE_ID) && (si)->pub.buscorerev >= 13)) + +#define PCIEREGS(si) (((char *)((si)->curmap) + PCI_16KB0_PCIREGS_OFFSET)) +#define CCREGS_FAST(si) (((char *)((si)->curmap) + PCI_16KB0_CCREGS_OFFSET)) + +/* + * Macros to disable/restore function core(D11, ENET, ILINE20, etc) interrupts before/ + * after core switching to avoid invalid register accesss inside ISR. + */ +#define INTR_OFF(si, intr_val) \ + if ((si)->intrsoff_fn && (si)->common_info->coreid[(si)->curidx] == (si)->dev_coreid) { \ + intr_val = (*(si)->intrsoff_fn)((si)->intr_arg); } +#define INTR_RESTORE(si, intr_val) \ + if ((si)->intrsrestore_fn && (si)->common_info->coreid[(si)->curidx] == (si)->dev_coreid) {\ + (*(si)->intrsrestore_fn)((si)->intr_arg, intr_val); } + +/* dynamic clock control defines */ +#define LPOMINFREQ 25000 /* low power oscillator min */ +#define LPOMAXFREQ 43000 /* low power oscillator max */ +#define XTALMINFREQ 19800000 /* 20 MHz - 1% */ +#define XTALMAXFREQ 20200000 /* 20 MHz + 1% */ +#define PCIMINFREQ 25000000 /* 25 MHz */ +#define PCIMAXFREQ 34000000 /* 33 MHz + fudge */ + +#define ILP_DIV_5MHZ 0 /* ILP = 5 MHz */ +#define ILP_DIV_1MHZ 4 /* ILP = 1 MHz */ + +#define PCI_FORCEHT(si) \ + (((PCIE(si)) && (si->pub.chip == BCM4311_CHIP_ID) && ((si->pub.chiprev <= 1))) || \ + ((PCI(si) || PCIE(si)) && (si->pub.chip == BCM4321_CHIP_ID))) + +/* GPIO Based LED powersave defines */ +#define DEFAULT_GPIO_ONTIME 10 /* Default: 10% on */ +#define DEFAULT_GPIO_OFFTIME 90 /* Default: 10% on */ + +#ifndef DEFAULT_GPIOTIMERVAL +#define DEFAULT_GPIOTIMERVAL ((DEFAULT_GPIO_ONTIME << GPIO_ONTIME_SHIFT) | DEFAULT_GPIO_OFFTIME) +#endif + +/* Silicon Backplane externs */ +extern void sb_scan(si_t *sih, void *regs, uint devid); +extern uint sb_coreid(si_t *sih); +extern uint sb_flag(si_t *sih); +extern void sb_setint(si_t *sih, int siflag); +extern uint sb_corevendor(si_t *sih); +extern uint sb_corerev(si_t *sih); +extern uint sb_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val); +extern bool sb_iscoreup(si_t *sih); +extern void *sb_setcoreidx(si_t *sih, uint coreidx); +extern uint32 sb_core_cflags(si_t *sih, uint32 mask, uint32 val); +extern void sb_core_cflags_wo(si_t *sih, uint32 mask, uint32 val); +extern uint32 sb_core_sflags(si_t *sih, uint32 mask, uint32 val); +extern void sb_commit(si_t *sih); +extern uint32 sb_base(uint32 admatch); +extern uint32 sb_size(uint32 admatch); +extern void sb_core_reset(si_t *sih, uint32 bits, uint32 resetbits); +extern void sb_core_tofixup(si_t *sih); +extern void sb_core_disable(si_t *sih, uint32 bits); +extern uint32 sb_addrspace(si_t *sih, uint asidx); +extern uint32 sb_addrspacesize(si_t *sih, uint asidx); +extern int sb_numaddrspaces(si_t *sih); + +extern uint32 sb_set_initiator_to(si_t *sih, uint32 to, uint idx); + + + +/* Wake-on-wireless-LAN (WOWL) */ +extern bool sb_pci_pmecap(si_t *sih); +struct osl_info; +extern bool sb_pci_fastpmecap(struct osl_info *osh); +extern bool sb_pci_pmeclr(si_t *sih); +extern void sb_pci_pmeen(si_t *sih); +extern uint sb_pcie_readreg(void *sih, uint addrtype, uint offset); + +/* AMBA Interconnect exported externs */ +extern si_t *ai_attach(uint pcidev, osl_t *osh, void *regs, uint bustype, + void *sdh, char **vars, uint *varsz); +extern si_t *ai_kattach(osl_t *osh); +extern void ai_scan(si_t *sih, void *regs, uint devid); + +extern uint ai_flag(si_t *sih); +extern void ai_setint(si_t *sih, int siflag); +extern uint ai_coreidx(si_t *sih); +extern uint ai_corevendor(si_t *sih); +extern uint ai_corerev(si_t *sih); +extern bool ai_iscoreup(si_t *sih); +extern void *ai_setcoreidx(si_t *sih, uint coreidx); +extern uint32 ai_core_cflags(si_t *sih, uint32 mask, uint32 val); +extern void ai_core_cflags_wo(si_t *sih, uint32 mask, uint32 val); +extern uint32 ai_core_sflags(si_t *sih, uint32 mask, uint32 val); +extern uint ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val); +extern void ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits); +extern void ai_core_disable(si_t *sih, uint32 bits); +extern int ai_numaddrspaces(si_t *sih); +extern uint32 ai_addrspace(si_t *sih, uint asidx); +extern uint32 ai_addrspacesize(si_t *sih, uint asidx); +extern void ai_write_wrap_reg(si_t *sih, uint32 offset, uint32 val); + + +#endif /* _siutils_priv_h_ */ diff --git a/drivers/net/wireless/bcm4329/wl_iw.c b/drivers/net/wireless/bcm4329/wl_iw.c new file mode 100644 index 000000000000..2cd629cbd1c3 --- /dev/null +++ b/drivers/net/wireless/bcm4329/wl_iw.c @@ -0,0 +1,7875 @@ +/* + * Linux Wireless Extensions support + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: wl_iw.c,v 1.51.4.9.2.6.4.142.4.13 2010/09/15 03:34:56 Exp $ + */ + + +#include <typedefs.h> +#include <linuxver.h> +#include <osl.h> + +#include <bcmutils.h> +#include <bcmendian.h> +#include <proto/ethernet.h> + +#include <linux/if_arp.h> +#include <asm/uaccess.h> + +#include <dngl_stats.h> +#include <dhd.h> +#include <dhdioctl.h> + +typedef void wlc_info_t; +typedef void wl_info_t; +typedef const struct si_pub si_t; +#include <wlioctl.h> + +#include <proto/ethernet.h> +#include <dngl_stats.h> +#include <dhd.h> +#define WL_ERROR(x) printf x +#define WL_TRACE(x) +#define WL_ASSOC(x) +#define WL_INFORM(x) +#define WL_WSEC(x) +#define WL_SCAN(x) + +#include <wl_iw.h> + + + +#ifndef IW_ENCODE_ALG_SM4 +#define IW_ENCODE_ALG_SM4 0x20 +#endif + +#ifndef IW_AUTH_WAPI_ENABLED +#define IW_AUTH_WAPI_ENABLED 0x20 +#endif + +#ifndef IW_AUTH_WAPI_VERSION_1 +#define IW_AUTH_WAPI_VERSION_1 0x00000008 +#endif + +#ifndef IW_AUTH_CIPHER_SMS4 +#define IW_AUTH_CIPHER_SMS4 0x00000020 +#endif + +#ifndef IW_AUTH_KEY_MGMT_WAPI_PSK +#define IW_AUTH_KEY_MGMT_WAPI_PSK 4 +#endif + +#ifndef IW_AUTH_KEY_MGMT_WAPI_CERT +#define IW_AUTH_KEY_MGMT_WAPI_CERT 8 +#endif + + +#define IW_WSEC_ENABLED(wsec) ((wsec) & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED | SMS4_ENABLED)) + +#include <linux/rtnetlink.h> +#include <linux/mutex.h> + +#define WL_IW_USE_ISCAN 1 +#define ENABLE_ACTIVE_PASSIVE_SCAN_SUPPRESS 1 + +#if defined(SOFTAP) +#define WL_SOFTAP(x) printk x +static struct net_device *priv_dev; +static bool ap_cfg_running = FALSE; +static bool ap_fw_loaded = FALSE; +struct net_device *ap_net_dev = NULL; +struct semaphore ap_eth_sema; +static int wl_iw_set_ap_security(struct net_device *dev, struct ap_profile *ap); +static int wl_iw_softap_deassoc_stations(struct net_device *dev); +#endif + +#define WL_IW_IOCTL_CALL(func_call) \ + do { \ + func_call; \ + } while (0) + +static int g_onoff = G_WLAN_SET_ON; +wl_iw_extra_params_t g_wl_iw_params; +static struct mutex wl_start_lock; +static struct mutex wl_cache_lock; + +extern bool wl_iw_conn_status_str(uint32 event_type, uint32 status, + uint32 reason, char* stringBuf, uint buflen); +#include <bcmsdbus.h> +extern void dhd_customer_gpio_wlan_ctrl(int onoff); +extern uint dhd_dev_reset(struct net_device *dev, uint8 flag); +extern void dhd_dev_init_ioctl(struct net_device *dev); +int dev_iw_write_cfg1_bss_var(struct net_device *dev, int val); + +uint wl_msg_level = WL_ERROR_VAL; + +#define MAX_WLIW_IOCTL_LEN 1024 + + +#if defined(IL_BIGENDIAN) +#include <bcmendian.h> +#define htod32(i) (bcmswap32(i)) +#define htod16(i) (bcmswap16(i)) +#define dtoh32(i) (bcmswap32(i)) +#define dtoh16(i) (bcmswap16(i)) +#define htodchanspec(i) htod16(i) +#define dtohchanspec(i) dtoh16(i) +#else +#define htod32(i) i +#define htod16(i) i +#define dtoh32(i) i +#define dtoh16(i) i +#define htodchanspec(i) i +#define dtohchanspec(i) i +#endif + +#ifdef CONFIG_WIRELESS_EXT + +extern struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev); +extern int dhd_wait_pend8021x(struct net_device *dev); +#endif + +#if WIRELESS_EXT < 19 +#define IW_IOCTL_IDX(cmd) ((cmd) - SIOCIWFIRST) +#define IW_EVENT_IDX(cmd) ((cmd) - IWEVFIRST) +#endif + +static void *g_scan = NULL; +static volatile uint g_scan_specified_ssid; +static wlc_ssid_t g_specific_ssid; + +static wlc_ssid_t g_ssid; + +static wl_iw_ss_cache_ctrl_t g_ss_cache_ctrl; +static volatile uint g_first_broadcast_scan; +static volatile uint g_first_counter_scans; +#define MAX_ALLOWED_BLOCK_SCAN_FROM_FIRST_SCAN 3 + + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) +#define DAEMONIZE(a) daemonize(a); \ + allow_signal(SIGKILL); \ + allow_signal(SIGTERM); +#else +#define RAISE_RX_SOFTIRQ() \ + cpu_raise_softirq(smp_processor_id(), NET_RX_SOFTIRQ) +#define DAEMONIZE(a) daemonize(); \ + do { if (a) \ + strncpy(current->comm, a, MIN(sizeof(current->comm), (strlen(a) + 1))); \ + } while (0); +#endif + +#if defined(WL_IW_USE_ISCAN) +#if !defined(CSCAN) +static void wl_iw_free_ss_cache(void); +static int wl_iw_run_ss_cache_timer(int kick_off); +#endif +int wl_iw_iscan_set_scan_broadcast_prep(struct net_device *dev, uint flag); +static int dev_wlc_bufvar_set(struct net_device *dev, char *name, char *buf, int len); +#define ISCAN_STATE_IDLE 0 +#define ISCAN_STATE_SCANING 1 + +#define WLC_IW_ISCAN_MAXLEN 2048 +typedef struct iscan_buf { + struct iscan_buf * next; + char iscan_buf[WLC_IW_ISCAN_MAXLEN]; +} iscan_buf_t; + +typedef struct iscan_info { + struct net_device *dev; + struct timer_list timer; + uint32 timer_ms; + uint32 timer_on; + int iscan_state; + iscan_buf_t * list_hdr; + iscan_buf_t * list_cur; + + + long sysioc_pid; + struct semaphore sysioc_sem; + struct completion sysioc_exited; + + uint32 scan_flag; +#if defined CSCAN + char ioctlbuf[WLC_IOCTL_MEDLEN]; +#else + char ioctlbuf[WLC_IOCTL_SMLEN]; +#endif + wl_iscan_params_t *iscan_ex_params_p; + int iscan_ex_param_size; +} iscan_info_t; +#define COEX_DHCP 1 +static void wl_iw_bt_flag_set(struct net_device *dev, bool set); +static void wl_iw_bt_release(void); + +typedef enum bt_coex_status { + BT_DHCP_IDLE = 0, + BT_DHCP_START, + BT_DHCP_OPPORTUNITY_WINDOW, + BT_DHCP_FLAG_FORCE_TIMEOUT +} coex_status_t; +#define BT_DHCP_OPPORTUNITY_WINDOW_TIEM 2500 +#define BT_DHCP_FLAG_FORCE_TIME 5500 + +typedef struct bt_info { + struct net_device *dev; + struct timer_list timer; + uint32 timer_ms; + uint32 timer_on; + int bt_state; + + + long bt_pid; + struct semaphore bt_sem; + struct completion bt_exited; +} bt_info_t; + +bt_info_t *g_bt = NULL; +static void wl_iw_bt_timerfunc(ulong data); +iscan_info_t *g_iscan = NULL; +static void wl_iw_timerfunc(ulong data); +static void wl_iw_set_event_mask(struct net_device *dev); +static int +wl_iw_iscan(iscan_info_t *iscan, wlc_ssid_t *ssid, uint16 action); +#endif +static int +wl_iw_set_scan( + struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *extra +); + +#ifndef CSCAN +static int +wl_iw_get_scan( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, + char *extra +); + +static uint +wl_iw_get_scan_prep( + wl_scan_results_t *list, + struct iw_request_info *info, + char *extra, + short max_size +); +#endif + +static void swap_key_from_BE( + wl_wsec_key_t *key +) +{ + key->index = htod32(key->index); + key->len = htod32(key->len); + key->algo = htod32(key->algo); + key->flags = htod32(key->flags); + key->rxiv.hi = htod32(key->rxiv.hi); + key->rxiv.lo = htod16(key->rxiv.lo); + key->iv_initialized = htod32(key->iv_initialized); +} + +static void swap_key_to_BE( + wl_wsec_key_t *key +) +{ + key->index = dtoh32(key->index); + key->len = dtoh32(key->len); + key->algo = dtoh32(key->algo); + key->flags = dtoh32(key->flags); + key->rxiv.hi = dtoh32(key->rxiv.hi); + key->rxiv.lo = dtoh16(key->rxiv.lo); + key->iv_initialized = dtoh32(key->iv_initialized); +} + +static int +dev_wlc_ioctl( + struct net_device *dev, + int cmd, + void *arg, + int len +) +{ + struct ifreq ifr; + wl_ioctl_t ioc; + mm_segment_t fs; + int ret = -EINVAL; + + if (!dev) { + WL_ERROR(("%s: dev is null\n", __FUNCTION__)); + return ret; + } + + net_os_wake_lock(dev); + + WL_INFORM(("\n%s, PID:%x: send Local IOCTL -> dhd: cmd:0x%x, buf:%p, len:%d ,\n", + __FUNCTION__, current->pid, cmd, arg, len)); + + if (g_onoff == G_WLAN_SET_ON) { + memset(&ioc, 0, sizeof(ioc)); + ioc.cmd = cmd; + ioc.buf = arg; + ioc.len = len; + + strcpy(ifr.ifr_name, dev->name); + ifr.ifr_data = (caddr_t) &ioc; + + ret = dev_open(dev); + if (ret) { + WL_ERROR(("%s: Error dev_open: %d\n", __func__, ret)); + net_os_wake_unlock(dev); + return ret; + } + + fs = get_fs(); + set_fs(get_ds()); +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 31)) + ret = dev->do_ioctl(dev, &ifr, SIOCDEVPRIVATE); +#else + ret = dev->netdev_ops->ndo_do_ioctl(dev, &ifr, SIOCDEVPRIVATE); +#endif + set_fs(fs); + } + else { + WL_TRACE(("%s: call after driver stop : ignored\n", __FUNCTION__)); + } + + net_os_wake_unlock(dev); + + return ret; +} + + +static int +dev_wlc_intvar_get_reg( + struct net_device *dev, + char *name, + uint reg, + int *retval) +{ + union { + char buf[WLC_IOCTL_SMLEN]; + int val; + } var; + int error; + + uint len; + len = bcm_mkiovar(name, (char *)(®), sizeof(reg), (char *)(&var), sizeof(var.buf)); + ASSERT(len); + error = dev_wlc_ioctl(dev, WLC_GET_VAR, (void *)&var, len); + + *retval = dtoh32(var.val); + return (error); +} + + +static int +dev_wlc_intvar_set_reg( + struct net_device *dev, + char *name, + char *addr, + char * val) +{ + char reg_addr[8]; + + memset(reg_addr, 0, sizeof(reg_addr)); + memcpy((char *)®_addr[0], (char *)addr, 4); + memcpy((char *)®_addr[4], (char *)val, 4); + + return (dev_wlc_bufvar_set(dev, name, (char *)®_addr[0], sizeof(reg_addr))); +} + + +static int +dev_wlc_intvar_set( + struct net_device *dev, + char *name, + int val) +{ + char buf[WLC_IOCTL_SMLEN]; + uint len; + + val = htod32(val); + len = bcm_mkiovar(name, (char *)(&val), sizeof(val), buf, sizeof(buf)); + ASSERT(len); + + return (dev_wlc_ioctl(dev, WLC_SET_VAR, buf, len)); +} + +#if defined(WL_IW_USE_ISCAN) +static int +dev_iw_iovar_setbuf( + struct net_device *dev, + char *iovar, + void *param, + int paramlen, + void *bufptr, + int buflen) +{ + int iolen; + + iolen = bcm_mkiovar(iovar, param, paramlen, bufptr, buflen); + ASSERT(iolen); + + if (iolen == 0) + return 0; + + return (dev_wlc_ioctl(dev, WLC_SET_VAR, bufptr, iolen)); +} + +static int +dev_iw_iovar_getbuf( + struct net_device *dev, + char *iovar, + void *param, + int paramlen, + void *bufptr, + int buflen) +{ + int iolen; + + iolen = bcm_mkiovar(iovar, param, paramlen, bufptr, buflen); + ASSERT(iolen); + + return (dev_wlc_ioctl(dev, WLC_GET_VAR, bufptr, buflen)); +} +#endif + + +#if WIRELESS_EXT > 17 +static int +dev_wlc_bufvar_set( + struct net_device *dev, + char *name, + char *buf, int len) +{ + static char ioctlbuf[MAX_WLIW_IOCTL_LEN]; + uint buflen; + + buflen = bcm_mkiovar(name, buf, len, ioctlbuf, sizeof(ioctlbuf)); + ASSERT(buflen); + + return (dev_wlc_ioctl(dev, WLC_SET_VAR, ioctlbuf, buflen)); +} +#endif + + +static int +dev_wlc_bufvar_get( + struct net_device *dev, + char *name, + char *buf, int buflen) +{ + static char ioctlbuf[MAX_WLIW_IOCTL_LEN]; + int error; + uint len; + + len = bcm_mkiovar(name, NULL, 0, ioctlbuf, sizeof(ioctlbuf)); + ASSERT(len); + error = dev_wlc_ioctl(dev, WLC_GET_VAR, (void *)ioctlbuf, MAX_WLIW_IOCTL_LEN); + if (!error) + bcopy(ioctlbuf, buf, buflen); + + return (error); +} + + + +static int +dev_wlc_intvar_get( + struct net_device *dev, + char *name, + int *retval) +{ + union { + char buf[WLC_IOCTL_SMLEN]; + int val; + } var; + int error; + + uint len; + uint data_null; + + len = bcm_mkiovar(name, (char *)(&data_null), 0, (char *)(&var), sizeof(var.buf)); + ASSERT(len); + error = dev_wlc_ioctl(dev, WLC_GET_VAR, (void *)&var, len); + + *retval = dtoh32(var.val); + + return (error); +} + + +#if WIRELESS_EXT > 12 +static int +wl_iw_set_active_scan( + struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *extra +) +{ + int as = 0; + int error = 0; + char *p = extra; + +#if defined(WL_IW_USE_ISCAN) + if (g_iscan->iscan_state == ISCAN_STATE_IDLE) +#endif + error = dev_wlc_ioctl(dev, WLC_SET_PASSIVE_SCAN, &as, sizeof(as)); +#if defined(WL_IW_USE_ISCAN) + else + g_iscan->scan_flag = as; +#endif + p += snprintf(p, MAX_WX_STRING, "OK"); + + wrqu->data.length = p - extra + 1; + return error; +} + +static int +wl_iw_set_passive_scan( + struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *extra +) +{ + int ps = 1; + int error = 0; + char *p = extra; + +#if defined(WL_IW_USE_ISCAN) + if (g_iscan->iscan_state == ISCAN_STATE_IDLE) { +#endif + + + if (g_scan_specified_ssid == 0) { + error = dev_wlc_ioctl(dev, WLC_SET_PASSIVE_SCAN, &ps, sizeof(ps)); + } +#if defined(WL_IW_USE_ISCAN) + } + else + g_iscan->scan_flag = ps; +#endif + + p += snprintf(p, MAX_WX_STRING, "OK"); + + wrqu->data.length = p - extra + 1; + return error; +} + +static int +wl_iw_get_macaddr( + struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *extra +) +{ + int error; + char buf[128]; + struct ether_addr *id; + char *p = extra; + + + strcpy(buf, "cur_etheraddr"); + error = dev_wlc_ioctl(dev, WLC_GET_VAR, buf, sizeof(buf)); + id = (struct ether_addr *) buf; + p += snprintf(p, MAX_WX_STRING, "Macaddr = %02X:%02X:%02X:%02X:%02X:%02X\n", + id->octet[0], id->octet[1], id->octet[2], + id->octet[3], id->octet[4], id->octet[5]); + wrqu->data.length = p - extra + 1; + + return error; +} + + +static int +wl_iw_set_country( + struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *extra +) +{ + char country_code[WLC_CNTRY_BUF_SZ]; + int error = 0; + char *p = extra; + int country_offset; + int country_code_size; + + WL_TRACE(("%s\n", __FUNCTION__)); + memset(country_code, 0, sizeof(country_code)); + + country_offset = strcspn(extra, " "); + country_code_size = strlen(extra) - country_offset; + + + if (country_offset != 0) { + strncpy(country_code, extra + country_offset + 1, + MIN(country_code_size, sizeof(country_code))); + + + if ((error = dev_wlc_ioctl(dev, WLC_SET_COUNTRY, + &country_code, sizeof(country_code))) >= 0) { + p += snprintf(p, MAX_WX_STRING, "OK"); + WL_TRACE(("%s: set country %s OK\n", __FUNCTION__, country_code)); + goto exit; + } + } + + WL_ERROR(("%s: set country %s failed code %d\n", __FUNCTION__, country_code, error)); + p += snprintf(p, MAX_WX_STRING, "FAIL"); + +exit: + wrqu->data.length = p - extra + 1; + return error; +} + +#ifdef CUSTOMER_HW2 +static int +wl_iw_set_power_mode( + struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *extra +) +{ + int error = 0; + char *p = extra; + static int pm = PM_FAST; + int pm_local = PM_OFF; + char powermode_val = 0; + + strncpy((char *)&powermode_val, extra + strlen("POWERMODE") + 1, 1); + + if (strnicmp((char *)&powermode_val, "1", strlen("1")) == 0) { + + WL_TRACE(("%s: DHCP session starts\n", __FUNCTION__)); + + dev_wlc_ioctl(dev, WLC_GET_PM, &pm, sizeof(pm)); + dev_wlc_ioctl(dev, WLC_SET_PM, &pm_local, sizeof(pm_local)); + + /* Disable packet filtering if necessary */ + net_os_set_packet_filter(dev, 0); + + } else if (strnicmp((char *)&powermode_val, "0", strlen("0")) == 0) { + + WL_TRACE(("%s: DHCP session done\n", __FUNCTION__)); + + dev_wlc_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm)); + + /* Enable packet filtering if was turned off */ + net_os_set_packet_filter(dev, 1); + + } else { + WL_ERROR(("Unkwown yet power setting, ignored\n")); + } + + p += snprintf(p, MAX_WX_STRING, "OK"); + + wrqu->data.length = p - extra + 1; + + return error; +} +#endif + +static int +wl_iw_get_power_mode( + struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *extra +) +{ + int error; + char *p = extra; + int pm_local = PM_FAST; + + error = dev_wlc_ioctl(dev, WLC_GET_PM, &pm_local, sizeof(pm_local)); + if (!error) { + WL_TRACE(("%s: Powermode = %d\n", __func__, pm_local)); + if (pm_local == PM_OFF) + pm_local = 1; /* Active */ + else + pm_local = 0; /* Auto */ + p += snprintf(p, MAX_WX_STRING, "powermode = %d", pm_local); + } + else { + WL_TRACE(("%s: Error = %d\n", __func__, error)); + p += snprintf(p, MAX_WX_STRING, "FAIL"); + } + wrqu->data.length = p - extra + 1; + return error; +} + +static int +wl_iw_set_btcoex_dhcp( + struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *extra +) +{ + int error = 0; + char *p = extra; +#ifndef CUSTOMER_HW2 + static int pm = PM_FAST; + int pm_local = PM_OFF; +#endif + char powermode_val = 0; + char buf_reg66va_dhcp_on[8] = { 66, 00, 00, 00, 0x10, 0x27, 0x00, 0x00 }; + char buf_reg41va_dhcp_on[8] = { 41, 00, 00, 00, 0x33, 0x00, 0x00, 0x00 }; + char buf_reg68va_dhcp_on[8] = { 68, 00, 00, 00, 0x90, 0x01, 0x00, 0x00 }; + + uint32 regaddr; + static uint32 saved_reg66; + static uint32 saved_reg41; + static uint32 saved_reg68; + static bool saved_status = FALSE; + + char buf_flag7_default[8] = { 7, 00, 00, 00, 0x0, 0x00, 0x00, 0x00}; +#ifndef CUSTOMER_HW2 + uint32 temp1, temp2; +#endif + +#ifdef CUSTOMER_HW2 + strncpy((char *)&powermode_val, extra + strlen("BTCOEXMODE") + 1, 1); +#else + strncpy((char *)&powermode_val, extra + strlen("POWERMODE") + 1, 1); +#endif + + if (strnicmp((char *)&powermode_val, "1", strlen("1")) == 0) { + + WL_TRACE(("%s: DHCP session starts\n", __FUNCTION__)); + + if ((saved_status == FALSE) && +#ifndef CUSTOMER_HW2 + (!dev_wlc_ioctl(dev, WLC_GET_PM, &pm, sizeof(pm))) && +#endif + (!dev_wlc_intvar_get_reg(dev, "btc_params", 66, &saved_reg66)) && + (!dev_wlc_intvar_get_reg(dev, "btc_params", 41, &saved_reg41)) && + (!dev_wlc_intvar_get_reg(dev, "btc_params", 68, &saved_reg68))) { + saved_status = TRUE; + WL_TRACE(("Saved 0x%x 0x%x 0x%x\n", \ + saved_reg66, saved_reg41, saved_reg68)); + +#ifndef CUSTOMER_HW2 + dev_wlc_ioctl(dev, WLC_SET_PM, &pm_local, sizeof(pm_local)); +#endif + + dev_wlc_bufvar_set(dev, "btc_params", \ + (char *)&buf_reg66va_dhcp_on[0], sizeof(buf_reg66va_dhcp_on)); + dev_wlc_bufvar_set(dev, "btc_params", \ + (char *)&buf_reg41va_dhcp_on[0], sizeof(buf_reg41va_dhcp_on)); + dev_wlc_bufvar_set(dev, "btc_params", \ + (char *)&buf_reg68va_dhcp_on[0], sizeof(buf_reg68va_dhcp_on)); +#ifndef CUSTOMER_HW2 + if ((!dev_wlc_intvar_get_reg(dev, "btc_params", 12, &temp1)) && + (!dev_wlc_intvar_get_reg(dev, "btc_params", 13, &temp2))) + { + if ((temp1 != 0) && (temp2 != 0)) { +#endif + g_bt->bt_state = BT_DHCP_START; + g_bt->timer_on = 1; + mod_timer(&g_bt->timer, g_bt->timer.expires); + WL_TRACE(("%s enable BT DHCP Timer\n", \ + __FUNCTION__)); +#ifndef CUSTOMER_HW2 + } + } +#endif + } + else if (saved_status == TRUE) { + WL_ERROR(("%s was called w/o DHCP OFF. Continue\n", __FUNCTION__)); + } + } +#ifdef CUSTOMER_HW2 + else if (strnicmp((char *)&powermode_val, "2", strlen("2")) == 0) { +#else + else if (strnicmp((char *)&powermode_val, "0", strlen("0")) == 0) { +#endif + WL_TRACE(("%s: DHCP session done\n", __FUNCTION__)); + +#ifndef CUSTOMER_HW2 + dev_wlc_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm)); +#endif + + WL_TRACE(("%s disable BT DHCP Timer\n", __FUNCTION__)); + if (g_bt->timer_on) { + g_bt->timer_on = 0; + del_timer_sync(&g_bt->timer); + } + + dev_wlc_bufvar_set(dev, "btc_flags", \ + (char *)&buf_flag7_default[0], sizeof(buf_flag7_default)); + + if (saved_status) { + regaddr = 66; + dev_wlc_intvar_set_reg(dev, "btc_params", \ + (char *)®addr, (char *)&saved_reg66); + regaddr = 41; + dev_wlc_intvar_set_reg(dev, "btc_params", \ + (char *)®addr, (char *)&saved_reg41); + regaddr = 68; + dev_wlc_intvar_set_reg(dev, "btc_params", \ + (char *)®addr, (char *)&saved_reg68); + } + saved_status = FALSE; + } + else { + WL_ERROR(("Unkwown yet power setting, ignored\n")); + } + + p += snprintf(p, MAX_WX_STRING, "OK"); + + wrqu->data.length = p - extra + 1; + + return error; +} + +static int +wl_iw_set_suspend( + struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *extra +) +{ + int suspend_flag; + int ret_now; + int ret = 0; + + suspend_flag = *(extra + strlen(SETSUSPEND_CMD) + 1) - '0'; + + if (suspend_flag != 0) + suspend_flag = 1; + + ret_now = net_os_set_suspend_disable(dev, suspend_flag); + + if (ret_now != suspend_flag) { + if (!(ret = net_os_set_suspend(dev, ret_now))) + WL_ERROR(("%s: Suspend Flag %d -> %d\n", \ + __FUNCTION__, ret_now, suspend_flag)); + else + WL_ERROR(("%s: failed %d\n", __FUNCTION__, ret)); + } + + return ret; +} + +int +wl_format_ssid(char* ssid_buf, uint8* ssid, int ssid_len) +{ + int i, c; + char *p = ssid_buf; + + if (ssid_len > 32) ssid_len = 32; + + for (i = 0; i < ssid_len; i++) { + c = (int)ssid[i]; + if (c == '\\') { + *p++ = '\\'; + *p++ = '\\'; + } else if (isprint((uchar)c)) { + *p++ = (char)c; + } else { + p += sprintf(p, "\\x%02X", c); + } + } + *p = '\0'; + + return p - ssid_buf; +} + +static int +wl_iw_get_link_speed( + struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *extra +) +{ + int error = 0; + char *p = extra; + static int link_speed; + + net_os_wake_lock(dev); + if (g_onoff == G_WLAN_SET_ON) { + error = dev_wlc_ioctl(dev, WLC_GET_RATE, &link_speed, sizeof(link_speed)); + link_speed *= 500000; + } + + p += snprintf(p, MAX_WX_STRING, "LinkSpeed %d", link_speed/1000000); + + wrqu->data.length = p - extra + 1; + + net_os_wake_unlock(dev); + return error; +} + + +static int +wl_iw_get_dtim_skip( + struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *extra +) +{ + int error = -1; + char *p = extra; + char iovbuf[32]; + + net_os_wake_lock(dev); + if (g_onoff == G_WLAN_SET_ON) { + + memset(iovbuf, 0, sizeof(iovbuf)); + strcpy(iovbuf, "bcn_li_dtim"); + + if ((error = dev_wlc_ioctl(dev, WLC_GET_VAR, + &iovbuf, sizeof(iovbuf))) >= 0) { + + p += snprintf(p, MAX_WX_STRING, "Dtim_skip %d", iovbuf[0]); + WL_TRACE(("%s: get dtim_skip = %d\n", __FUNCTION__, iovbuf[0])); + wrqu->data.length = p - extra + 1; + } + else + WL_ERROR(("%s: get dtim_skip failed code %d\n", \ + __FUNCTION__, error)); + } + net_os_wake_unlock(dev); + return error; +} + + +static int +wl_iw_set_dtim_skip( + struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *extra +) +{ + int error = -1; + char *p = extra; + int bcn_li_dtim; + char iovbuf[32]; + + net_os_wake_lock(dev); + if (g_onoff == G_WLAN_SET_ON) { + + bcn_li_dtim = htod32((uint)*(extra + strlen(DTIM_SKIP_SET_CMD) + 1) - '0'); + + if ((bcn_li_dtim >= 0) || ((bcn_li_dtim <= 5))) { + + memset(iovbuf, 0, sizeof(iovbuf)); + bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim, + 4, iovbuf, sizeof(iovbuf)); + + if ((error = dev_wlc_ioctl(dev, WLC_SET_VAR, + &iovbuf, sizeof(iovbuf))) >= 0) { + p += snprintf(p, MAX_WX_STRING, "OK"); + + net_os_set_dtim_skip(dev, bcn_li_dtim); + + WL_TRACE(("%s: set dtim_skip %d OK\n", __FUNCTION__, \ + bcn_li_dtim)); + goto exit; + } + else WL_ERROR(("%s: set dtim_skip %d failed code %d\n", \ + __FUNCTION__, bcn_li_dtim, error)); + } + else WL_ERROR(("%s Incorrect dtim_skip setting %d, ignored\n", \ + __FUNCTION__, bcn_li_dtim)); + } + + p += snprintf(p, MAX_WX_STRING, "FAIL"); + +exit: + wrqu->data.length = p - extra + 1; + net_os_wake_unlock(dev); + return error; +} + + +static int +wl_iw_get_band( + struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *extra +) +{ + int error = -1; + char *p = extra; + static int band; + + net_os_wake_lock(dev); + + if (g_onoff == G_WLAN_SET_ON) { + error = dev_wlc_ioctl(dev, WLC_GET_BAND, &band, sizeof(band)); + + p += snprintf(p, MAX_WX_STRING, "Band %d", band); + + wrqu->data.length = p - extra + 1; + } + + net_os_wake_unlock(dev); + return error; +} + + +static int +wl_iw_set_band( + struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *extra +) +{ + int error = -1; + char *p = extra; + uint band; + + net_os_wake_lock(dev); + + if (g_onoff == G_WLAN_SET_ON) { + + band = htod32((uint)*(extra + strlen(BAND_SET_CMD) + 1) - '0'); + + if ((band == WLC_BAND_AUTO) || (band == WLC_BAND_5G) || (band == WLC_BAND_2G)) { + + + if ((error = dev_wlc_ioctl(dev, WLC_SET_BAND, + &band, sizeof(band))) >= 0) { + p += snprintf(p, MAX_WX_STRING, "OK"); + WL_TRACE(("%s: set band %d OK\n", __FUNCTION__, band)); + goto exit; + } + else WL_ERROR(("%s: set band %d failed code %d\n", __FUNCTION__, \ + band, error)); + } + else WL_ERROR(("%s Incorrect band setting %d, ignored\n", __FUNCTION__, band)); + } + + p += snprintf(p, MAX_WX_STRING, "FAIL"); + +exit: + wrqu->data.length = p - extra + 1; + net_os_wake_unlock(dev); + return error; +} + +#ifdef PNO_SUPPORT + +static int +wl_iw_set_pno_reset( + struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *extra +) +{ + int error = -1; + char *p = extra; + + net_os_wake_lock(dev); + if ((g_onoff == G_WLAN_SET_ON) && (dev != NULL)) { + + if ((error = dhd_dev_pno_reset(dev)) >= 0) { + p += snprintf(p, MAX_WX_STRING, "OK"); + WL_TRACE(("%s: set OK\n", __FUNCTION__)); + goto exit; + } + else WL_ERROR(("%s: failed code %d\n", __FUNCTION__, error)); + } + + p += snprintf(p, MAX_WX_STRING, "FAIL"); + +exit: + wrqu->data.length = p - extra + 1; + net_os_wake_unlock(dev); + return error; +} + + + +static int +wl_iw_set_pno_enable( + struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *extra +) +{ + int error = -1; + char *p = extra; + int pfn_enabled; + + net_os_wake_lock(dev); + pfn_enabled = htod32((uint)*(extra + strlen(PNOENABLE_SET_CMD) + 1) - '0'); + + if ((g_onoff == G_WLAN_SET_ON) && (dev != NULL)) { + + if ((error = dhd_dev_pno_enable(dev, pfn_enabled)) >= 0) { + p += snprintf(p, MAX_WX_STRING, "OK"); + WL_TRACE(("%s: set OK\n", __FUNCTION__)); + goto exit; + } + else WL_ERROR(("%s: failed code %d\n", __FUNCTION__, error)); + } + + p += snprintf(p, MAX_WX_STRING, "FAIL"); + +exit: + wrqu->data.length = p - extra + 1; + net_os_wake_unlock(dev); + return error; +} + + + +static int +wl_iw_set_pno_set( + struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *extra +) +{ + int res = -1; + wlc_ssid_t ssids_local[MAX_PFN_LIST_COUNT]; + int nssid = 0; + cmd_tlv_t *cmd_tlv_temp; + char type; + char *str_ptr; + int tlv_size_left; + int pno_time; + +#ifdef PNO_SET_DEBUG + int i; + char pno_in_example[] = {'P', 'N', 'O', 'S', 'E', 'T', 'U', 'P', ' ', \ + 'S', 0x01, 0x01, 0x00, + 'S', + 0x04, + 'B', 'R', 'C', 'M', + 'S', + 0x04, + 'G', 'O', 'O', 'G', + 'T', + 0x00, + 0x0A + }; +#endif + + net_os_wake_lock(dev); + WL_ERROR(("\n### %s: info->cmd:%x, info->flags:%x, u.data=0x%p, u.len=%d\n", + __FUNCTION__, info->cmd, info->flags, + wrqu->data.pointer, wrqu->data.length)); + + if (g_onoff == G_WLAN_SET_OFF) { + WL_TRACE(("%s: driver is not up yet after START\n", __FUNCTION__)); + goto exit_proc; + } + + if (wrqu->data.length < (strlen(PNOSETUP_SET_CMD) + sizeof(cmd_tlv_t))) { + WL_ERROR(("%s aggument=%d less %d\n", __FUNCTION__, \ + wrqu->data.length, strlen(PNOSETUP_SET_CMD) + sizeof(cmd_tlv_t))); + goto exit_proc; + } + +#ifdef PNO_SET_DEBUG + if (!(extra = kmalloc(sizeof(pno_in_example) +100, GFP_KERNEL))) { + res = -ENOMEM; + goto exit_proc; + } + memcpy(extra, pno_in_example, sizeof(pno_in_example)); + wrqu->data.length = sizeof(pno_in_example); + for (i = 0; i < wrqu->data.length; i++) + printf("%02X ", extra[i]); + printf("\n"); +#endif + + str_ptr = extra; +#ifdef PNO_SET_DEBUG + str_ptr += strlen("PNOSETUP "); + tlv_size_left = wrqu->data.length - strlen("PNOSETUP "); +#else + str_ptr += strlen(PNOSETUP_SET_CMD); + tlv_size_left = wrqu->data.length - strlen(PNOSETUP_SET_CMD); +#endif + + cmd_tlv_temp = (cmd_tlv_t *)str_ptr; + memset(ssids_local, 0, sizeof(ssids_local)); + + if ((cmd_tlv_temp->prefix == PNO_TLV_PREFIX) && \ + (cmd_tlv_temp->version == PNO_TLV_VERSION) && \ + (cmd_tlv_temp->subver == PNO_TLV_SUBVERSION)) + { + str_ptr += sizeof(cmd_tlv_t); + tlv_size_left -= sizeof(cmd_tlv_t); + + if ((nssid = wl_iw_parse_ssid_list_tlv(&str_ptr, ssids_local, \ + MAX_PFN_LIST_COUNT, &tlv_size_left)) <= 0) { + WL_ERROR(("SSID is not presented or corrupted ret=%d\n", nssid)); + goto exit_proc; + } + else { + while (tlv_size_left > 0) + { + type = str_ptr[0]; + switch (type) { + case PNO_TLV_TYPE_TIME: + + if ((res = wl_iw_parse_data_tlv(&str_ptr, \ + &pno_time, \ + sizeof(pno_time), \ + type, sizeof(short), &tlv_size_left)) == -1) { + WL_ERROR(("%s return %d\n", \ + __FUNCTION__, res)); + goto exit_proc; + } + break; + + default: + WL_ERROR(("%s get unkwown type %X\n", \ + __FUNCTION__, type)); + goto exit_proc; + break; + } + } + } + } + else { + WL_ERROR(("%s get wrong TLV command\n", __FUNCTION__)); + goto exit_proc; + } + + res = dhd_dev_pno_set(dev, ssids_local, nssid, pno_time); + +exit_proc: + net_os_wake_unlock(dev); + return res; +} +#endif + +static int +wl_iw_get_rssi( + struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *extra +) +{ + static int rssi = 0; + static wlc_ssid_t ssid = {0}; + int error = 0; + char *p = extra; + static char ssidbuf[SSID_FMT_BUF_LEN]; + scb_val_t scb_val; + + net_os_wake_lock(dev); + + bzero(&scb_val, sizeof(scb_val_t)); + + if (g_onoff == G_WLAN_SET_ON) { + error = dev_wlc_ioctl(dev, WLC_GET_RSSI, &scb_val, sizeof(scb_val_t)); + if (error) { + WL_ERROR(("%s: Fails %d\n", __FUNCTION__, error)); + net_os_wake_unlock(dev); + return error; + } + rssi = dtoh32(scb_val.val); + + error = dev_wlc_ioctl(dev, WLC_GET_SSID, &ssid, sizeof(ssid)); + if (!error) { + ssid.SSID_len = dtoh32(ssid.SSID_len); + wl_format_ssid(ssidbuf, ssid.SSID, dtoh32(ssid.SSID_len)); + } + } + + p += snprintf(p, MAX_WX_STRING, "%s rssi %d ", ssidbuf, rssi); + wrqu->data.length = p - extra + 1; + + net_os_wake_unlock(dev); + return error; +} + +int +wl_iw_send_priv_event( + struct net_device *dev, + char *flag +) +{ + union iwreq_data wrqu; + char extra[IW_CUSTOM_MAX + 1]; + int cmd; + + cmd = IWEVCUSTOM; + memset(&wrqu, 0, sizeof(wrqu)); + if (strlen(flag) > sizeof(extra)) + return -1; + + strcpy(extra, flag); + wrqu.data.length = strlen(extra); + wireless_send_event(dev, cmd, &wrqu, extra); + net_os_wake_lock_timeout_enable(dev); + WL_TRACE(("Send IWEVCUSTOM Event as %s\n", extra)); + + return 0; +} + + +int +wl_control_wl_start(struct net_device *dev) +{ + int ret = 0; + + WL_TRACE(("Enter %s \n", __FUNCTION__)); + + if (!dev) { + WL_ERROR(("%s: dev is null\n", __FUNCTION__)); + return -1; + } + + mutex_lock(&wl_start_lock); + + if (g_onoff == G_WLAN_SET_OFF) { + dhd_customer_gpio_wlan_ctrl(WLAN_RESET_ON); + +#if defined(BCMLXSDMMC) + sdioh_start(NULL, 0); +#endif + + dhd_dev_reset(dev, 0); + +#if defined(BCMLXSDMMC) + sdioh_start(NULL, 1); +#endif + + dhd_dev_init_ioctl(dev); + + g_onoff = G_WLAN_SET_ON; + } + WL_TRACE(("Exited %s \n", __FUNCTION__)); + + mutex_unlock(&wl_start_lock); + return ret; +} + + +static int +wl_iw_control_wl_off( + struct net_device *dev, + struct iw_request_info *info +) +{ + int ret = 0; + WL_TRACE(("Enter %s\n", __FUNCTION__)); + + if (!dev) { + WL_ERROR(("%s: dev is null\n", __FUNCTION__)); + return -1; + } + + mutex_lock(&wl_start_lock); + +#ifdef SOFTAP + ap_cfg_running = FALSE; +#endif + + if (g_onoff == G_WLAN_SET_ON) { + g_onoff = G_WLAN_SET_OFF; +#if defined(WL_IW_USE_ISCAN) + g_iscan->iscan_state = ISCAN_STATE_IDLE; +#endif + + dhd_dev_reset(dev, 1); + +#if defined(WL_IW_USE_ISCAN) +#if !defined(CSCAN) + wl_iw_free_ss_cache(); + wl_iw_run_ss_cache_timer(0); + + g_ss_cache_ctrl.m_link_down = 1; +#endif + memset(g_scan, 0, G_SCAN_RESULTS); + g_scan_specified_ssid = 0; + + g_first_broadcast_scan = BROADCAST_SCAN_FIRST_IDLE; + g_first_counter_scans = 0; +#endif + +#if defined(BCMLXSDMMC) + sdioh_stop(NULL); +#endif + + net_os_set_dtim_skip(dev, 0); + + dhd_customer_gpio_wlan_ctrl(WLAN_RESET_OFF); + + wl_iw_send_priv_event(dev, "STOP"); + } + + mutex_unlock(&wl_start_lock); + + WL_TRACE(("Exited %s\n", __FUNCTION__)); + + return ret; +} + +static int +wl_iw_control_wl_on( + struct net_device *dev, + struct iw_request_info *info +) +{ + int ret = 0; + + WL_TRACE(("Enter %s \n", __FUNCTION__)); + + ret = wl_control_wl_start(dev); + + wl_iw_send_priv_event(dev, "START"); + +#ifdef SOFTAP + if (!ap_fw_loaded) { + wl_iw_iscan_set_scan_broadcast_prep(dev, 0); + } +#else + wl_iw_iscan_set_scan_broadcast_prep(dev, 0); +#endif + + WL_TRACE(("Exited %s \n", __FUNCTION__)); + + return ret; +} + +#ifdef SOFTAP +static struct ap_profile my_ap; +static int set_ap_cfg(struct net_device *dev, struct ap_profile *ap); +static int get_assoc_sta_list(struct net_device *dev, char *buf, int len); +static int set_ap_mac_list(struct net_device *dev, char *buf); + +#define PTYPE_STRING 0 +#define PTYPE_INTDEC 1 +#define PTYPE_INTHEX 2 +#define PTYPE_STR_HEX 3 +int get_parmeter_from_string( + char **str_ptr, const char *token, int param_type, void *dst, int param_max_len); + +#endif + +int hex2num(char c) +{ + if (c >= '0' && c <= '9') + return c - '0'; + if (c >= 'a' && c <= 'f') + return c - 'a' + 10; + if (c >= 'A' && c <= 'F') + return c - 'A' + 10; + return -1; +} + +int hex2byte(const char *hex) +{ + int a, b; + a = hex2num(*hex++); + if (a < 0) + return -1; + b = hex2num(*hex++); + if (b < 0) + return -1; + return (a << 4) | b; +} + + + +int hstr_2_buf(const char *txt, u8 *buf, int len) +{ + int i; + + for (i = 0; i < len; i++) { + int a, b; + + a = hex2num(*txt++); + if (a < 0) + return -1; + b = hex2num(*txt++); + if (b < 0) + return -1; + *buf++ = (a << 4) | b; + } + + return 0; +} + +#ifdef SOFTAP +int init_ap_profile_from_string(char *param_str, struct ap_profile *ap_cfg) +{ + char *str_ptr = param_str; + char sub_cmd[16]; + int ret = 0; + + memset(sub_cmd, 0, sizeof(sub_cmd)); + memset(ap_cfg, 0, sizeof(struct ap_profile)); + + if (get_parmeter_from_string(&str_ptr, "ASCII_CMD=", + PTYPE_STRING, sub_cmd, SSID_LEN) != 0) { + return -1; + } + if (strncmp(sub_cmd, "AP_CFG", 6)) { + WL_ERROR(("ERROR: sub_cmd:%s != 'AP_CFG'!\n", sub_cmd)); + return -1; + } + + ret = get_parmeter_from_string(&str_ptr, "SSID=", PTYPE_STRING, ap_cfg->ssid, SSID_LEN); + + ret |= get_parmeter_from_string(&str_ptr, "SEC=", PTYPE_STRING, ap_cfg->sec, SEC_LEN); + + ret |= get_parmeter_from_string(&str_ptr, "KEY=", PTYPE_STRING, ap_cfg->key, KEY_LEN); + + ret |= get_parmeter_from_string(&str_ptr, "CHANNEL=", PTYPE_INTDEC, &ap_cfg->channel, 5); + + ret |= get_parmeter_from_string(&str_ptr, "PREAMBLE=", PTYPE_INTDEC, &ap_cfg->preamble, 5); + + ret |= get_parmeter_from_string(&str_ptr, "MAX_SCB=", PTYPE_INTDEC, &ap_cfg->max_scb, 5); + + return ret; +} +#endif + + +#ifdef SOFTAP +static int iwpriv_set_ap_config(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *ext) +{ + int res = 0; + char *extra = NULL; + struct ap_profile *ap_cfg = &my_ap; + + WL_TRACE(("> Got IWPRIV SET_AP IOCTL: info->cmd:%x, info->flags:%x, u.data:%p, u.len:%d\n", + info->cmd, info->flags, + wrqu->data.pointer, wrqu->data.length)); + + if (wrqu->data.length != 0) { + + char *str_ptr; + + if (!(extra = kmalloc(wrqu->data.length+1, GFP_KERNEL))) + return -ENOMEM; + + if (copy_from_user(extra, wrqu->data.pointer, wrqu->data.length)) { + kfree(extra); + return -EFAULT; + } + + extra[wrqu->data.length] = 0; + WL_SOFTAP((" Got str param in iw_point:\n %s\n", extra)); + + memset(ap_cfg, 0, sizeof(struct ap_profile)); + + str_ptr = extra; + + if ((res = init_ap_profile_from_string(extra, ap_cfg)) < 0) { + WL_ERROR(("%s failed to parse %d\n", __FUNCTION__, res)); + kfree(extra); + return -1; + } + + } else { + WL_ERROR(("IWPRIV argument len = 0 \n")); + return -1; + } + + if ((res = set_ap_cfg(dev, ap_cfg)) < 0) + WL_ERROR(("%s failed to set_ap_cfg %d\n", __FUNCTION__, res)); + + kfree(extra); + + return res; +} +#endif + + +#ifdef SOFTAP +static int iwpriv_get_assoc_list(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *p_iwrq, + char *extra) +{ + int i, ret = 0; + char mac_buf[256]; + struct maclist *sta_maclist = (struct maclist *)mac_buf; + + char mac_lst[256]; + char *p_mac_str; + + WL_TRACE(("\n %s: IWPRIV IOCTL: cmd:%hx, flags:%hx, extra:%p, iwp.len:%d, \ + iwp.len:%p, iwp.flags:%x \n", __FUNCTION__, info->cmd, info->flags, \ + extra, p_iwrq->data.length, p_iwrq->data.pointer, p_iwrq->data.flags)); + + WL_SOFTAP(("extra:%s\n", extra)); + print_buf((u8 *)p_iwrq, 16, 0); + + memset(sta_maclist, 0, sizeof(mac_buf)); + + sta_maclist->count = 8; + + WL_TRACE((" net device:%s, buf_sz:%d\n", dev->name, sizeof(mac_buf))); + get_assoc_sta_list(dev, mac_buf, 256); + WL_TRACE((" got %d stations\n", sta_maclist->count)); + + memset(mac_lst, 0, sizeof(mac_lst)); + p_mac_str = mac_lst; + + for (i = 0; i < 8; i++) { + struct ether_addr *id = &sta_maclist->ea[i]; + + WL_SOFTAP(("dhd_drv>> sta_mac[%d] :", i)); + print_buf((unsigned char *)&sta_maclist->ea[i], 6, 0); + + p_mac_str += snprintf(p_mac_str, MAX_WX_STRING, + "Mac[%d]=%02X:%02X:%02X:%02X:%02X:%02X\n", i, + id->octet[0], id->octet[1], id->octet[2], + id->octet[3], id->octet[4], id->octet[5]); + + } + + p_iwrq->data.length = strlen(mac_lst); + + WL_TRACE(("u.pointer:%p\n", p_iwrq->data.pointer)); + WL_TRACE(("resulting str:\n%s \n len:%d\n\n", mac_lst, p_iwrq->data.length)); + + if (p_iwrq->data.length) { + if (copy_to_user(p_iwrq->data.pointer, mac_lst, p_iwrq->data.length)) { + WL_ERROR(("%s: Can't copy to user\n", __FUNCTION__)); + return -EFAULT; + } + } + + WL_TRACE(("Exited %s \n", __FUNCTION__)); + return ret; +} +#endif + + +#ifdef SOFTAP +static int iwpriv_set_mac_filters(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *ext) +{ + + int i, ret = -1; + char *extra = NULL; + u8 macfilt[8][6]; + int mac_cnt = 0; + char sub_cmd[16]; + + WL_TRACE((">>> Got IWPRIV SET_MAC_FILTER IOCTL: info->cmd:%x, \ + info->flags:%x, u.data:%p, u.len:%d\n", + info->cmd, info->flags, + wrqu->data.pointer, wrqu->data.length)); + + if (wrqu->data.length != 0) { + + char *str_ptr; + + if (!(extra = kmalloc(wrqu->data.length+1, GFP_KERNEL))) + return -ENOMEM; + + if (copy_from_user(extra, wrqu->data.pointer, wrqu->data.length)) { + kfree(extra); + return -EFAULT; + } + + extra[wrqu->data.length] = 0; + WL_SOFTAP((" Got parameter string in iw_point:\n %s \n", extra)); + + memset(macfilt, 0, sizeof(macfilt)); + memset(sub_cmd, 0, sizeof(sub_cmd)); + + str_ptr = extra; + + if (get_parmeter_from_string(&str_ptr, "ASCII_CMD=", PTYPE_STRING, sub_cmd, 15) != 0) { + goto exit_proc; + } + +#define MAC_FILT_MAX 8 + + if (strncmp(sub_cmd, "MAC_FLT_W", strlen("MAC_FLT_W"))) { + WL_ERROR(("ERROR: sub_cmd:%s != 'MAC_FLT_W'!\n", sub_cmd)); + goto exit_proc; + } + + if (get_parmeter_from_string(&str_ptr, "MAC_CNT=", + PTYPE_INTDEC, &mac_cnt, 4) != 0) { + WL_ERROR(("ERROR: MAC_CNT param is missing \n")); + goto exit_proc; + } + + if (mac_cnt > MAC_FILT_MAX) { + WL_ERROR(("ERROR: number of MAC filters > MAX\n")); + goto exit_proc; + } + + for (i = 0; i < mac_cnt; i++) { + if (get_parmeter_from_string(&str_ptr, "MAC=", + PTYPE_STR_HEX, macfilt[i], 12) != 0) { + WL_ERROR(("ERROR: MAC_filter[%d] is missing !\n", i)); + goto exit_proc; + } + } + + for (i = 0; i < mac_cnt; i++) { + WL_SOFTAP(("mac_filt[%d]:", i)); + print_buf(macfilt[i], 6, 0); + } + + wrqu->data.pointer = NULL; + wrqu->data.length = 0; + ret = 0; + + } else { + WL_ERROR(("IWPRIV argument len is 0\n")); + return -1; + } + + exit_proc: + kfree(extra); + return ret; +} +#endif + +#endif + +#if WIRELESS_EXT < 13 +struct iw_request_info +{ + __u16 cmd; + __u16 flags; +}; + +typedef int (*iw_handler)(struct net_device *dev, + struct iw_request_info *info, + void *wrqu, + char *extra); +#endif + +static int +wl_iw_config_commit( + struct net_device *dev, + struct iw_request_info *info, + void *zwrq, + char *extra +) +{ + wlc_ssid_t ssid; + int error; + struct sockaddr bssid; + + WL_TRACE(("%s: SIOCSIWCOMMIT\n", dev->name)); + + if ((error = dev_wlc_ioctl(dev, WLC_GET_SSID, &ssid, sizeof(ssid)))) + return error; + + ssid.SSID_len = dtoh32(ssid.SSID_len); + + if (!ssid.SSID_len) + return 0; + + bzero(&bssid, sizeof(struct sockaddr)); + if ((error = dev_wlc_ioctl(dev, WLC_REASSOC, &bssid, ETHER_ADDR_LEN))) { + WL_ERROR(("%s: WLC_REASSOC to %s failed \n", __FUNCTION__, ssid.SSID)); + return error; + } + + return 0; +} + +static int +wl_iw_get_name( + struct net_device *dev, + struct iw_request_info *info, + char *cwrq, + char *extra +) +{ + WL_TRACE(("%s: SIOCGIWNAME\n", dev->name)); + + strcpy(cwrq, "IEEE 802.11-DS"); + + return 0; +} + +static int +wl_iw_set_freq( + struct net_device *dev, + struct iw_request_info *info, + struct iw_freq *fwrq, + char *extra +) +{ + int error, chan; + uint sf = 0; + + WL_TRACE(("%s %s: SIOCSIWFREQ\n", __FUNCTION__, dev->name)); + +#if defined(SOFTAP) + if (ap_cfg_running) { + WL_TRACE(("%s:>> not executed, 'SOFT_AP is active' \n", __FUNCTION__)); + return 0; + } +#endif + + + if (fwrq->e == 0 && fwrq->m < MAXCHANNEL) { + chan = fwrq->m; + } + + + else { + + if (fwrq->e >= 6) { + fwrq->e -= 6; + while (fwrq->e--) + fwrq->m *= 10; + } else if (fwrq->e < 6) { + while (fwrq->e++ < 6) + fwrq->m /= 10; + } + + if (fwrq->m > 4000 && fwrq->m < 5000) + sf = WF_CHAN_FACTOR_4_G; + + chan = wf_mhz2channel(fwrq->m, sf); + } + chan = htod32(chan); + if ((error = dev_wlc_ioctl(dev, WLC_SET_CHANNEL, &chan, sizeof(chan)))) + return error; + + g_wl_iw_params.target_channel = chan; + + return -EINPROGRESS; +} + +static int +wl_iw_get_freq( + struct net_device *dev, + struct iw_request_info *info, + struct iw_freq *fwrq, + char *extra +) +{ + channel_info_t ci; + int error; + + WL_TRACE(("%s: SIOCGIWFREQ\n", dev->name)); + + if ((error = dev_wlc_ioctl(dev, WLC_GET_CHANNEL, &ci, sizeof(ci)))) + return error; + + fwrq->m = dtoh32(ci.hw_channel); + fwrq->e = dtoh32(0); + return 0; +} + +static int +wl_iw_set_mode( + struct net_device *dev, + struct iw_request_info *info, + __u32 *uwrq, + char *extra +) +{ + int infra = 0, ap = 0, error = 0; + + WL_TRACE(("%s: SIOCSIWMODE\n", dev->name)); + + switch (*uwrq) { + case IW_MODE_MASTER: + infra = ap = 1; + break; + case IW_MODE_ADHOC: + case IW_MODE_AUTO: + break; + case IW_MODE_INFRA: + infra = 1; + break; + default: + return -EINVAL; + } + infra = htod32(infra); + ap = htod32(ap); + + if ((error = dev_wlc_ioctl(dev, WLC_SET_INFRA, &infra, sizeof(infra))) || + (error = dev_wlc_ioctl(dev, WLC_SET_AP, &ap, sizeof(ap)))) + return error; + + + return -EINPROGRESS; +} + +static int +wl_iw_get_mode( + struct net_device *dev, + struct iw_request_info *info, + __u32 *uwrq, + char *extra +) +{ + int error, infra = 0, ap = 0; + + WL_TRACE(("%s: SIOCGIWMODE\n", dev->name)); + + if ((error = dev_wlc_ioctl(dev, WLC_GET_INFRA, &infra, sizeof(infra))) || + (error = dev_wlc_ioctl(dev, WLC_GET_AP, &ap, sizeof(ap)))) + return error; + + infra = dtoh32(infra); + ap = dtoh32(ap); + *uwrq = infra ? ap ? IW_MODE_MASTER : IW_MODE_INFRA : IW_MODE_ADHOC; + + return 0; +} + +static int +wl_iw_get_range( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, + char *extra +) +{ + struct iw_range *range = (struct iw_range *) extra; + wl_uint32_list_t *list; + wl_rateset_t rateset; + int8 *channels; + int error, i, k; + uint sf, ch; + + int phytype; + int bw_cap = 0, sgi_tx = 0, nmode = 0; + channel_info_t ci; + uint8 nrate_list2copy = 0; + uint16 nrate_list[4][8] = { {13, 26, 39, 52, 78, 104, 117, 130}, + {14, 29, 43, 58, 87, 116, 130, 144}, + {27, 54, 81, 108, 162, 216, 243, 270}, + {30, 60, 90, 120, 180, 240, 270, 300}}; + + WL_TRACE(("%s: SIOCGIWRANGE\n", dev->name)); + + if (!extra) + return -EINVAL; + + channels = kmalloc((MAXCHANNEL+1)*4, GFP_KERNEL); + if (!channels) { + WL_ERROR(("Could not alloc channels\n")); + return -ENOMEM; + } + list = (wl_uint32_list_t *)channels; + + dwrq->length = sizeof(struct iw_range); + memset(range, 0, sizeof(range)); + + range->min_nwid = range->max_nwid = 0; + + list->count = htod32(MAXCHANNEL); + if ((error = dev_wlc_ioctl(dev, WLC_GET_VALID_CHANNELS, channels, (MAXCHANNEL+1)*4))) { + kfree(channels); + return error; + } + for (i = 0; i < dtoh32(list->count) && i < IW_MAX_FREQUENCIES; i++) { + range->freq[i].i = dtoh32(list->element[i]); + + ch = dtoh32(list->element[i]); + if (ch <= CH_MAX_2G_CHANNEL) + sf = WF_CHAN_FACTOR_2_4_G; + else + sf = WF_CHAN_FACTOR_5_G; + + range->freq[i].m = wf_channel2mhz(ch, sf); + range->freq[i].e = 6; + } + range->num_frequency = range->num_channels = i; + + range->max_qual.qual = 5; + + range->max_qual.level = 0x100 - 200; + + range->max_qual.noise = 0x100 - 200; + + range->sensitivity = 65535; + +#if WIRELESS_EXT > 11 + + range->avg_qual.qual = 3; + + range->avg_qual.level = 0x100 + WL_IW_RSSI_GOOD; + + range->avg_qual.noise = 0x100 - 75; +#endif + + if ((error = dev_wlc_ioctl(dev, WLC_GET_CURR_RATESET, &rateset, sizeof(rateset)))) { + kfree(channels); + return error; + } + rateset.count = dtoh32(rateset.count); + range->num_bitrates = rateset.count; + for (i = 0; i < rateset.count && i < IW_MAX_BITRATES; i++) + range->bitrate[i] = (rateset.rates[i]& 0x7f) * 500000; + dev_wlc_intvar_get(dev, "nmode", &nmode); + dev_wlc_ioctl(dev, WLC_GET_PHYTYPE, &phytype, sizeof(phytype)); + + if (nmode == 1 && phytype == WLC_PHY_TYPE_SSN) { + dev_wlc_intvar_get(dev, "mimo_bw_cap", &bw_cap); + dev_wlc_intvar_get(dev, "sgi_tx", &sgi_tx); + dev_wlc_ioctl(dev, WLC_GET_CHANNEL, &ci, sizeof(channel_info_t)); + ci.hw_channel = dtoh32(ci.hw_channel); + + if (bw_cap == 0 || + (bw_cap == 2 && ci.hw_channel <= 14)) { + if (sgi_tx == 0) + nrate_list2copy = 0; + else + nrate_list2copy = 1; + } + if (bw_cap == 1 || + (bw_cap == 2 && ci.hw_channel >= 36)) { + if (sgi_tx == 0) + nrate_list2copy = 2; + else + nrate_list2copy = 3; + } + range->num_bitrates += 8; + for (k = 0; i < range->num_bitrates; k++, i++) { + + range->bitrate[i] = (nrate_list[nrate_list2copy][k]) * 500000; + } + } + + if ((error = dev_wlc_ioctl(dev, WLC_GET_PHYTYPE, &i, sizeof(i)))) { + kfree(channels); + return error; + } + i = dtoh32(i); + if (i == WLC_PHY_TYPE_A) + range->throughput = 24000000; + else + range->throughput = 1500000; + + range->min_rts = 0; + range->max_rts = 2347; + range->min_frag = 256; + range->max_frag = 2346; + + range->max_encoding_tokens = DOT11_MAX_DEFAULT_KEYS; + range->num_encoding_sizes = 4; + range->encoding_size[0] = WEP1_KEY_SIZE; + range->encoding_size[1] = WEP128_KEY_SIZE; +#if WIRELESS_EXT > 17 + range->encoding_size[2] = TKIP_KEY_SIZE; +#else + range->encoding_size[2] = 0; +#endif + range->encoding_size[3] = AES_KEY_SIZE; + + range->min_pmp = 0; + range->max_pmp = 0; + range->min_pmt = 0; + range->max_pmt = 0; + range->pmp_flags = 0; + range->pm_capa = 0; + + range->num_txpower = 2; + range->txpower[0] = 1; + range->txpower[1] = 255; + range->txpower_capa = IW_TXPOW_MWATT; + +#if WIRELESS_EXT > 10 + range->we_version_compiled = WIRELESS_EXT; + range->we_version_source = 19; + + range->retry_capa = IW_RETRY_LIMIT; + range->retry_flags = IW_RETRY_LIMIT; + range->r_time_flags = 0; + + range->min_retry = 1; + range->max_retry = 255; + + range->min_r_time = 0; + range->max_r_time = 0; +#endif + +#if WIRELESS_EXT > 17 + range->enc_capa = IW_ENC_CAPA_WPA; + range->enc_capa |= IW_ENC_CAPA_CIPHER_TKIP; + range->enc_capa |= IW_ENC_CAPA_CIPHER_CCMP; +#ifdef BCMWPA2 + range->enc_capa |= IW_ENC_CAPA_WPA2; +#endif + + IW_EVENT_CAPA_SET_KERNEL(range->event_capa); + + IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWAP); + IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWSCAN); + IW_EVENT_CAPA_SET(range->event_capa, IWEVTXDROP); + IW_EVENT_CAPA_SET(range->event_capa, IWEVMICHAELMICFAILURE); +#ifdef BCMWPA2 + IW_EVENT_CAPA_SET(range->event_capa, IWEVPMKIDCAND); +#endif +#endif + + kfree(channels); + + return 0; +} + +static int +rssi_to_qual(int rssi) +{ + if (rssi <= WL_IW_RSSI_NO_SIGNAL) + return 0; + else if (rssi <= WL_IW_RSSI_VERY_LOW) + return 1; + else if (rssi <= WL_IW_RSSI_LOW) + return 2; + else if (rssi <= WL_IW_RSSI_GOOD) + return 3; + else if (rssi <= WL_IW_RSSI_VERY_GOOD) + return 4; + else + return 5; +} + +static int +wl_iw_set_spy( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, + char *extra +) +{ + wl_iw_t *iw = *(wl_iw_t **)netdev_priv(dev); + struct sockaddr *addr = (struct sockaddr *) extra; + int i; + + WL_TRACE(("%s: SIOCSIWSPY\n", dev->name)); + + if (!extra) + return -EINVAL; + + iw->spy_num = MIN(ARRAYSIZE(iw->spy_addr), dwrq->length); + for (i = 0; i < iw->spy_num; i++) + memcpy(&iw->spy_addr[i], addr[i].sa_data, ETHER_ADDR_LEN); + memset(iw->spy_qual, 0, sizeof(iw->spy_qual)); + + return 0; +} + +static int +wl_iw_get_spy( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, + char *extra +) +{ + wl_iw_t *iw = *(wl_iw_t **)netdev_priv(dev); + struct sockaddr *addr = (struct sockaddr *) extra; + struct iw_quality *qual = (struct iw_quality *) &addr[iw->spy_num]; + int i; + + WL_TRACE(("%s: SIOCGIWSPY\n", dev->name)); + + if (!extra) + return -EINVAL; + + dwrq->length = iw->spy_num; + for (i = 0; i < iw->spy_num; i++) { + memcpy(addr[i].sa_data, &iw->spy_addr[i], ETHER_ADDR_LEN); + addr[i].sa_family = AF_UNIX; + memcpy(&qual[i], &iw->spy_qual[i], sizeof(struct iw_quality)); + iw->spy_qual[i].updated = 0; + } + + return 0; +} + + +static int +wl_iw_ch_to_chanspec(int ch, wl_join_params_t *join_params, int *join_params_size) +{ + chanspec_t chanspec = 0; + + if (ch != 0) { + + join_params->params.chanspec_num = 1; + join_params->params.chanspec_list[0] = ch; + + if (join_params->params.chanspec_list[0]) + chanspec |= WL_CHANSPEC_BAND_2G; + else + chanspec |= WL_CHANSPEC_BAND_5G; + + chanspec |= WL_CHANSPEC_BW_20; + chanspec |= WL_CHANSPEC_CTL_SB_NONE; + + *join_params_size += WL_ASSOC_PARAMS_FIXED_SIZE + + join_params->params.chanspec_num * sizeof(chanspec_t); + + join_params->params.chanspec_list[0] &= WL_CHANSPEC_CHAN_MASK; + join_params->params.chanspec_list[0] |= chanspec; + join_params->params.chanspec_list[0] = + htodchanspec(join_params->params.chanspec_list[0]); + + join_params->params.chanspec_num = htod32(join_params->params.chanspec_num); + + WL_TRACE(("%s join_params->params.chanspec_list[0]= %X\n", \ + __FUNCTION__, join_params->params.chanspec_list[0])); + } + return 1; +} + +static int +wl_iw_set_wap( + struct net_device *dev, + struct iw_request_info *info, + struct sockaddr *awrq, + char *extra +) +{ + int error = -EINVAL; + wl_join_params_t join_params; + int join_params_size; + + WL_TRACE(("%s: SIOCSIWAP\n", dev->name)); + + if (awrq->sa_family != ARPHRD_ETHER) { + WL_ERROR(("Invalid Header...sa_family\n")); + return -EINVAL; + } + + + if (ETHER_ISBCAST(awrq->sa_data) || ETHER_ISNULLADDR(awrq->sa_data)) { + scb_val_t scbval; + + bzero(&scbval, sizeof(scb_val_t)); + + (void) dev_wlc_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t)); + return 0; + } + + + + memset(&join_params, 0, sizeof(join_params)); + join_params_size = sizeof(join_params.ssid); + + memcpy(join_params.ssid.SSID, g_ssid.SSID, g_ssid.SSID_len); + join_params.ssid.SSID_len = htod32(g_ssid.SSID_len); + memcpy(&join_params.params.bssid, awrq->sa_data, ETHER_ADDR_LEN); + + WL_TRACE(("%s target_channel=%d\n", __FUNCTION__, g_wl_iw_params.target_channel)); + wl_iw_ch_to_chanspec(g_wl_iw_params.target_channel, &join_params, &join_params_size); + + if ((error = dev_wlc_ioctl(dev, WLC_SET_SSID, &join_params, join_params_size))) { + WL_ERROR(("%s Invalid ioctl data=%d\n", __FUNCTION__, error)); + return error; + } + + if (g_ssid.SSID_len) { + WL_TRACE(("%s: join SSID=%s BSSID="MACSTR" ch=%d\n", __FUNCTION__, \ + g_ssid.SSID, MAC2STR((u8 *)awrq->sa_data), \ + g_wl_iw_params.target_channel)); + } + + + memset(&g_ssid, 0, sizeof(g_ssid)); + return 0; +} + +static int +wl_iw_get_wap( + struct net_device *dev, + struct iw_request_info *info, + struct sockaddr *awrq, + char *extra +) +{ + WL_TRACE(("%s: SIOCGIWAP\n", dev->name)); + + awrq->sa_family = ARPHRD_ETHER; + memset(awrq->sa_data, 0, ETHER_ADDR_LEN); + + + (void) dev_wlc_ioctl(dev, WLC_GET_BSSID, awrq->sa_data, ETHER_ADDR_LEN); + + return 0; +} + +#if WIRELESS_EXT > 17 +static int +wl_iw_mlme( + struct net_device *dev, + struct iw_request_info *info, + struct sockaddr *awrq, + char *extra +) +{ + struct iw_mlme *mlme; + scb_val_t scbval; + int error = -EINVAL; + + WL_TRACE(("%s: SIOCSIWMLME DISASSOC/DEAUTH\n", dev->name)); + + mlme = (struct iw_mlme *)extra; + if (mlme == NULL) { + WL_ERROR(("Invalid ioctl data.\n")); + return error; + } + + scbval.val = mlme->reason_code; + bcopy(&mlme->addr.sa_data, &scbval.ea, ETHER_ADDR_LEN); + + if (mlme->cmd == IW_MLME_DISASSOC) { + scbval.val = htod32(scbval.val); + error = dev_wlc_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t)); + } + else if (mlme->cmd == IW_MLME_DEAUTH) { + scbval.val = htod32(scbval.val); + error = dev_wlc_ioctl(dev, WLC_SCB_DEAUTHENTICATE_FOR_REASON, &scbval, + sizeof(scb_val_t)); + } + else { + WL_ERROR(("Invalid ioctl data.\n")); + return error; + } + + return error; +} +#endif + +#ifndef WL_IW_USE_ISCAN +static int +wl_iw_get_aplist( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, + char *extra +) +{ + wl_scan_results_t *list; + struct sockaddr *addr = (struct sockaddr *) extra; + struct iw_quality qual[IW_MAX_AP]; + wl_bss_info_t *bi = NULL; + int error, i; + uint buflen = dwrq->length; + + WL_TRACE(("%s: SIOCGIWAPLIST\n", dev->name)); + + if (!extra) + return -EINVAL; + + list = kmalloc(buflen, GFP_KERNEL); + if (!list) + return -ENOMEM; + memset(list, 0, buflen); + list->buflen = htod32(buflen); + if ((error = dev_wlc_ioctl(dev, WLC_SCAN_RESULTS, list, buflen))) { + WL_ERROR(("%d: Scan results error %d\n", __LINE__, error)); + kfree(list); + return error; + } + list->buflen = dtoh32(list->buflen); + list->version = dtoh32(list->version); + list->count = dtoh32(list->count); + if (list->version != WL_BSS_INFO_VERSION) { + WL_ERROR(("%s : list->version %d != WL_BSS_INFO_VERSION\n", \ + __FUNCTION__, list->version)); + kfree(list); + return -EINVAL; + } + + for (i = 0, dwrq->length = 0; i < list->count && dwrq->length < IW_MAX_AP; i++) { + bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : list->bss_info; + ASSERT(((uintptr)bi + dtoh32(bi->length)) <= ((uintptr)list + + buflen)); + + + if (!(dtoh16(bi->capability) & DOT11_CAP_ESS)) + continue; + + + memcpy(addr[dwrq->length].sa_data, &bi->BSSID, ETHER_ADDR_LEN); + addr[dwrq->length].sa_family = ARPHRD_ETHER; + qual[dwrq->length].qual = rssi_to_qual(dtoh16(bi->RSSI)); + qual[dwrq->length].level = 0x100 + dtoh16(bi->RSSI); + qual[dwrq->length].noise = 0x100 + bi->phy_noise; + + +#if WIRELESS_EXT > 18 + qual[dwrq->length].updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM; +#else + qual[dwrq->length].updated = 7; +#endif + + dwrq->length++; + } + + kfree(list); + + if (dwrq->length) { + memcpy(&addr[dwrq->length], qual, sizeof(struct iw_quality) * dwrq->length); + + dwrq->flags = 1; + } + return 0; +} +#endif + +#ifdef WL_IW_USE_ISCAN +static int +wl_iw_iscan_get_aplist( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, + char *extra +) +{ + wl_scan_results_t *list; + iscan_buf_t * buf; + iscan_info_t *iscan = g_iscan; + + struct sockaddr *addr = (struct sockaddr *) extra; + struct iw_quality qual[IW_MAX_AP]; + wl_bss_info_t *bi = NULL; + int i; + + WL_TRACE(("%s: SIOCGIWAPLIST\n", dev->name)); + + if (!extra) + return -EINVAL; + + if ((!iscan) || (iscan->sysioc_pid < 0)) { + WL_ERROR(("%s error\n", __FUNCTION__)); + return 0; + } + + buf = iscan->list_hdr; + + while (buf) { + list = &((wl_iscan_results_t*)buf->iscan_buf)->results; + if (list->version != WL_BSS_INFO_VERSION) { + WL_ERROR(("%s : list->version %d != WL_BSS_INFO_VERSION\n", \ + __FUNCTION__, list->version)); + return -EINVAL; + } + + bi = NULL; + for (i = 0, dwrq->length = 0; i < list->count && dwrq->length < IW_MAX_AP; i++) { + bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) + : list->bss_info; + ASSERT(((uintptr)bi + dtoh32(bi->length)) <= ((uintptr)list + + WLC_IW_ISCAN_MAXLEN)); + + + if (!(dtoh16(bi->capability) & DOT11_CAP_ESS)) + continue; + + + memcpy(addr[dwrq->length].sa_data, &bi->BSSID, ETHER_ADDR_LEN); + addr[dwrq->length].sa_family = ARPHRD_ETHER; + qual[dwrq->length].qual = rssi_to_qual(dtoh16(bi->RSSI)); + qual[dwrq->length].level = 0x100 + dtoh16(bi->RSSI); + qual[dwrq->length].noise = 0x100 + bi->phy_noise; + + +#if WIRELESS_EXT > 18 + qual[dwrq->length].updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM; +#else + qual[dwrq->length].updated = 7; +#endif + + dwrq->length++; + } + buf = buf->next; + } + if (dwrq->length) { + memcpy(&addr[dwrq->length], qual, sizeof(struct iw_quality) * dwrq->length); + + dwrq->flags = 1; + } + return 0; +} + +static int +wl_iw_iscan_prep(wl_scan_params_t *params, wlc_ssid_t *ssid) +{ + int err = 0; + + memcpy(¶ms->bssid, ðer_bcast, ETHER_ADDR_LEN); + params->bss_type = DOT11_BSSTYPE_ANY; + params->scan_type = 0; + params->nprobes = -1; + params->active_time = -1; + params->passive_time = -1; + params->home_time = -1; + params->channel_num = 0; + + params->nprobes = htod32(params->nprobes); + params->active_time = htod32(params->active_time); + params->passive_time = htod32(params->passive_time); + params->home_time = htod32(params->home_time); + if (ssid && ssid->SSID_len) + memcpy(¶ms->ssid, ssid, sizeof(wlc_ssid_t)); + + return err; +} + +static int +wl_iw_iscan(iscan_info_t *iscan, wlc_ssid_t *ssid, uint16 action) +{ + int err = 0; + + iscan->iscan_ex_params_p->version = htod32(ISCAN_REQ_VERSION); + iscan->iscan_ex_params_p->action = htod16(action); + iscan->iscan_ex_params_p->scan_duration = htod16(0); + + WL_SCAN(("%s : nprobes=%d\n", __FUNCTION__, iscan->iscan_ex_params_p->params.nprobes)); + WL_SCAN(("active_time=%d\n", iscan->iscan_ex_params_p->params.active_time)); + WL_SCAN(("passive_time=%d\n", iscan->iscan_ex_params_p->params.passive_time)); + WL_SCAN(("home_time=%d\n", iscan->iscan_ex_params_p->params.home_time)); + WL_SCAN(("scan_type=%d\n", iscan->iscan_ex_params_p->params.scan_type)); + WL_SCAN(("bss_type=%d\n", iscan->iscan_ex_params_p->params.bss_type)); + + + if ((err = dev_iw_iovar_setbuf(iscan->dev, "iscan", iscan->iscan_ex_params_p, \ + iscan->iscan_ex_param_size, iscan->ioctlbuf, sizeof(iscan->ioctlbuf)))) { + WL_ERROR(("Set ISCAN for %s failed with %d\n", __FUNCTION__, err)); + err = -1; + } + + return err; +} + +static void +wl_iw_timerfunc(ulong data) +{ + iscan_info_t *iscan = (iscan_info_t *)data; + if (iscan) { + iscan->timer_on = 0; + if (iscan->iscan_state != ISCAN_STATE_IDLE) { + WL_SCAN(("timer trigger\n")); + up(&iscan->sysioc_sem); + } + } +} +static void wl_iw_set_event_mask(struct net_device *dev) +{ + char eventmask[WL_EVENTING_MASK_LEN]; + char iovbuf[WL_EVENTING_MASK_LEN + 12]; + + dev_iw_iovar_getbuf(dev, "event_msgs", "", 0, iovbuf, sizeof(iovbuf)); + bcopy(iovbuf, eventmask, WL_EVENTING_MASK_LEN); + setbit(eventmask, WLC_E_SCAN_COMPLETE); + dev_iw_iovar_setbuf(dev, "event_msgs", eventmask, WL_EVENTING_MASK_LEN, + iovbuf, sizeof(iovbuf)); +} + +static uint32 +wl_iw_iscan_get(iscan_info_t *iscan) +{ + iscan_buf_t * buf; + iscan_buf_t * ptr; + wl_iscan_results_t * list_buf; + wl_iscan_results_t list; + wl_scan_results_t *results; + uint32 status; + int res; + + mutex_lock(&wl_cache_lock); + if (iscan->list_cur) { + buf = iscan->list_cur; + iscan->list_cur = buf->next; + } + else { + buf = kmalloc(sizeof(iscan_buf_t), GFP_KERNEL); + if (!buf) { + WL_ERROR(("%s can't alloc iscan_buf_t : going to abort currect iscan\n", \ + __FUNCTION__)); + mutex_unlock(&wl_cache_lock); + return WL_SCAN_RESULTS_NO_MEM; + } + buf->next = NULL; + if (!iscan->list_hdr) + iscan->list_hdr = buf; + else { + ptr = iscan->list_hdr; + while (ptr->next) { + ptr = ptr->next; + } + ptr->next = buf; + } + } + memset(buf->iscan_buf, 0, WLC_IW_ISCAN_MAXLEN); + list_buf = (wl_iscan_results_t*)buf->iscan_buf; + results = &list_buf->results; + results->buflen = WL_ISCAN_RESULTS_FIXED_SIZE; + results->version = 0; + results->count = 0; + + memset(&list, 0, sizeof(list)); + list.results.buflen = htod32(WLC_IW_ISCAN_MAXLEN); + res = dev_iw_iovar_getbuf( + iscan->dev, + "iscanresults", + &list, + WL_ISCAN_RESULTS_FIXED_SIZE, + buf->iscan_buf, + WLC_IW_ISCAN_MAXLEN); + if (res == 0) { + results->buflen = dtoh32(results->buflen); + results->version = dtoh32(results->version); + results->count = dtoh32(results->count); + WL_SCAN(("results->count = %d\n", results->count)); + + WL_SCAN(("results->buflen = %d\n", results->buflen)); + status = dtoh32(list_buf->status); + } else { + WL_ERROR(("%s returns error %d\n", __FUNCTION__, res)); + status = WL_SCAN_RESULTS_NO_MEM; + } + mutex_unlock(&wl_cache_lock); + return status; +} + +static void wl_iw_force_specific_scan(iscan_info_t *iscan) +{ + WL_TRACE(("%s force Specific SCAN for %s\n", __FUNCTION__, g_specific_ssid.SSID)); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + rtnl_lock(); +#endif + (void) dev_wlc_ioctl(iscan->dev, WLC_SCAN, &g_specific_ssid, sizeof(g_specific_ssid)); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + rtnl_unlock(); +#endif +} + +static void wl_iw_send_scan_complete(iscan_info_t *iscan) +{ +#ifndef SANDGATE2G + union iwreq_data wrqu; + + memset(&wrqu, 0, sizeof(wrqu)); + + wireless_send_event(iscan->dev, SIOCGIWSCAN, &wrqu, NULL); + if (g_first_broadcast_scan == BROADCAST_SCAN_FIRST_STARTED) + g_first_broadcast_scan = BROADCAST_SCAN_FIRST_RESULT_READY; + WL_TRACE(("Send Event ISCAN complete\n")); +#endif +} + +static int +_iscan_sysioc_thread(void *data) +{ + uint32 status; + iscan_info_t *iscan = (iscan_info_t *)data; + static bool iscan_pass_abort = FALSE; + + DAEMONIZE("iscan_sysioc"); + + status = WL_SCAN_RESULTS_PARTIAL; + while (down_interruptible(&iscan->sysioc_sem) == 0) { + + net_os_wake_lock(iscan->dev); + +#if defined(SOFTAP) + if (ap_cfg_running) { + WL_SCAN(("%s skipping SCAN ops in AP mode !!!\n", __FUNCTION__)); + net_os_wake_unlock(iscan->dev); + continue; + } +#endif + + if (iscan->timer_on) { + iscan->timer_on = 0; + del_timer_sync(&iscan->timer); + } + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + rtnl_lock(); +#endif + status = wl_iw_iscan_get(iscan); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + rtnl_unlock(); +#endif + + if (g_scan_specified_ssid && (iscan_pass_abort == TRUE)) { + WL_SCAN(("%s Get results from specific scan status=%d\n", __FUNCTION__, status)); + wl_iw_send_scan_complete(iscan); + iscan_pass_abort = FALSE; + status = -1; + } + + switch (status) { + case WL_SCAN_RESULTS_PARTIAL: + WL_SCAN(("iscanresults incomplete\n")); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + rtnl_lock(); +#endif + + wl_iw_iscan(iscan, NULL, WL_SCAN_ACTION_CONTINUE); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + rtnl_unlock(); +#endif + + mod_timer(&iscan->timer, jiffies + iscan->timer_ms*HZ/1000); + iscan->timer_on = 1; + break; + case WL_SCAN_RESULTS_SUCCESS: + WL_SCAN(("iscanresults complete\n")); + iscan->iscan_state = ISCAN_STATE_IDLE; + wl_iw_send_scan_complete(iscan); + break; + case WL_SCAN_RESULTS_PENDING: + WL_SCAN(("iscanresults pending\n")); + + mod_timer(&iscan->timer, jiffies + iscan->timer_ms*HZ/1000); + iscan->timer_on = 1; + break; + case WL_SCAN_RESULTS_ABORTED: + WL_SCAN(("iscanresults aborted\n")); + iscan->iscan_state = ISCAN_STATE_IDLE; + if (g_scan_specified_ssid == 0) + wl_iw_send_scan_complete(iscan); + else { + iscan_pass_abort = TRUE; + wl_iw_force_specific_scan(iscan); + } + break; + case WL_SCAN_RESULTS_NO_MEM: + WL_SCAN(("iscanresults can't alloc memory: skip\n")); + iscan->iscan_state = ISCAN_STATE_IDLE; + break; + default: + WL_SCAN(("iscanresults returned unknown status %d\n", status)); + break; + } + + net_os_wake_unlock(iscan->dev); + } + + if (iscan->timer_on) { + iscan->timer_on = 0; + del_timer_sync(&iscan->timer); + } + + complete_and_exit(&iscan->sysioc_exited, 0); +} +#endif + +#if !defined(CSCAN) + +static void +wl_iw_set_ss_cache_timer_flag(void) +{ + g_ss_cache_ctrl.m_timer_expired = 1; + WL_TRACE(("%s called\n", __FUNCTION__)); +} + +static int +wl_iw_init_ss_cache_ctrl(void) +{ + WL_TRACE(("%s :\n", __FUNCTION__)); + g_ss_cache_ctrl.m_prev_scan_mode = 0; + g_ss_cache_ctrl.m_cons_br_scan_cnt = 0; + g_ss_cache_ctrl.m_cache_head = NULL; + g_ss_cache_ctrl.m_link_down = 0; + g_ss_cache_ctrl.m_timer_expired = 0; + memset(g_ss_cache_ctrl.m_active_bssid, 0, ETHER_ADDR_LEN); + + g_ss_cache_ctrl.m_timer = kmalloc(sizeof(struct timer_list), GFP_KERNEL); + if (!g_ss_cache_ctrl.m_timer) { + return -ENOMEM; + } + g_ss_cache_ctrl.m_timer->function = (void *)wl_iw_set_ss_cache_timer_flag; + init_timer(g_ss_cache_ctrl.m_timer); + + return 0; +} + + + +static void +wl_iw_free_ss_cache(void) +{ + wl_iw_ss_cache_t *node, *cur; + wl_iw_ss_cache_t **spec_scan_head; + + WL_TRACE(("%s called\n", __FUNCTION__)); + + mutex_lock(&wl_cache_lock); + spec_scan_head = &g_ss_cache_ctrl.m_cache_head; + node = *spec_scan_head; + + for (;node;) { + WL_TRACE(("%s : SSID - %s\n", __FUNCTION__, node->bss_info->SSID)); + cur = node; + node = cur->next; + kfree(cur); + } + *spec_scan_head = NULL; + mutex_unlock(&wl_cache_lock); +} + + + +static int +wl_iw_run_ss_cache_timer(int kick_off) +{ + struct timer_list **timer; + + timer = &g_ss_cache_ctrl.m_timer; + + if (*timer) { + if (kick_off) { + (*timer)->expires = jiffies + 30000 * HZ / 1000; + add_timer(*timer); + WL_TRACE(("%s : timer starts \n", __FUNCTION__)); + } else { + del_timer_sync(*timer); + WL_TRACE(("%s : timer stops \n", __FUNCTION__)); + } + } + + return 0; +} + + +void +wl_iw_release_ss_cache_ctrl(void) +{ + WL_TRACE(("%s :\n", __FUNCTION__)); + wl_iw_free_ss_cache(); + wl_iw_run_ss_cache_timer(0); + if (g_ss_cache_ctrl.m_timer) { + kfree(g_ss_cache_ctrl.m_timer); + } +} + + + +static void +wl_iw_reset_ss_cache(void) +{ + wl_iw_ss_cache_t *node, *prev, *cur; + wl_iw_ss_cache_t **spec_scan_head; + + mutex_lock(&wl_cache_lock); + spec_scan_head = &g_ss_cache_ctrl.m_cache_head; + node = *spec_scan_head; + prev = node; + + for (;node;) { + WL_TRACE(("%s : node SSID %s \n", __FUNCTION__, node->bss_info->SSID)); + if (!node->dirty) { + cur = node; + if (cur == *spec_scan_head) { + *spec_scan_head = cur->next; + prev = *spec_scan_head; + } + else { + prev->next = cur->next; + } + node = cur->next; + + WL_TRACE(("%s : Del node : SSID %s\n", __FUNCTION__, cur->bss_info->SSID)); + kfree(cur); + continue; + } + + node->dirty = 0; + prev = node; + node = node->next; + } + mutex_unlock(&wl_cache_lock); +} + + +static int +wl_iw_add_bss_to_ss_cache(wl_scan_results_t *ss_list) +{ + + wl_iw_ss_cache_t *node, *prev, *leaf; + wl_iw_ss_cache_t **spec_scan_head; + wl_bss_info_t *bi = NULL; + int i; + + if (!ss_list->count) { + return 0; + } + + mutex_lock(&wl_cache_lock); + spec_scan_head = &g_ss_cache_ctrl.m_cache_head; + + for (i = 0; i < ss_list->count; i++) { + + node = *spec_scan_head; + prev = node; + + bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : ss_list->bss_info; + + WL_TRACE(("%s : find %d with specific SSID %s\n", __FUNCTION__, i, bi->SSID)); + for (;node;) { + if (!memcmp(&node->bss_info->BSSID, &bi->BSSID, ETHER_ADDR_LEN)) { + + WL_TRACE(("dirty marked : SSID %s\n", bi->SSID)); + node->dirty = 1; + break; + } + prev = node; + node = node->next; + } + + if (node) { + continue; + } + leaf = kmalloc(bi->length + WLC_IW_SS_CACHE_CTRL_FIELD_MAXLEN, GFP_KERNEL); + if (!leaf) { + WL_ERROR(("Memory alloc failure %d\n", \ + bi->length + WLC_IW_SS_CACHE_CTRL_FIELD_MAXLEN)); + mutex_unlock(&wl_cache_lock); + return -ENOMEM; + } + + memcpy(leaf->bss_info, bi, bi->length); + leaf->next = NULL; + leaf->dirty = 1; + leaf->count = 1; + leaf->version = ss_list->version; + + if (!prev) { + *spec_scan_head = leaf; + } + else { + prev->next = leaf; + } + } + mutex_unlock(&wl_cache_lock); + return 0; +} + + +static int +wl_iw_merge_scan_cache(struct iw_request_info *info, char *extra, uint buflen_from_user, +__u16 *merged_len) +{ + wl_iw_ss_cache_t *node; + wl_scan_results_t *list_merge; + + mutex_lock(&wl_cache_lock); + node = g_ss_cache_ctrl.m_cache_head; + for (;node;) { + list_merge = (wl_scan_results_t *)node; + WL_TRACE(("%s: Cached Specific APs list=%d\n", __FUNCTION__, list_merge->count)); + if (buflen_from_user - *merged_len > 0) { + *merged_len += (__u16) wl_iw_get_scan_prep(list_merge, info, + extra + *merged_len, buflen_from_user - *merged_len); + } + else { + WL_TRACE(("%s: exit with break\n", __FUNCTION__)); + break; + } + node = node->next; + } + mutex_unlock(&wl_cache_lock); + return 0; +} + + +static int +wl_iw_delete_bss_from_ss_cache(void *addr) +{ + + wl_iw_ss_cache_t *node, *prev; + wl_iw_ss_cache_t **spec_scan_head; + + mutex_lock(&wl_cache_lock); + spec_scan_head = &g_ss_cache_ctrl.m_cache_head; + node = *spec_scan_head; + prev = node; + for (;node;) { + if (!memcmp(&node->bss_info->BSSID, addr, ETHER_ADDR_LEN)) { + if (node == *spec_scan_head) { + *spec_scan_head = node->next; + } + else { + prev->next = node->next; + } + + WL_TRACE(("%s : Del node : %s\n", __FUNCTION__, node->bss_info->SSID)); + kfree(node); + break; + } + + prev = node; + node = node->next; + } + + memset(addr, 0, ETHER_ADDR_LEN); + mutex_unlock(&wl_cache_lock); + return 0; +} + +#endif + + +static int +wl_iw_set_scan( + struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *extra +) +{ + int error; + WL_TRACE(("%s dev:%s: SIOCSIWSCAN : SCAN\n", __FUNCTION__, dev->name)); + +#if defined(CSCAN) + WL_ERROR(("%s: Scan from SIOCGIWSCAN not supported\n", __FUNCTION__)); + return -EINVAL; +#endif + +#if defined(SOFTAP) + if (ap_cfg_running) { + WL_TRACE(("\n>%s: Not executed, reason -'SOFTAP is active'\n", __FUNCTION__)); + return 0; + } +#endif + + if (g_onoff == G_WLAN_SET_OFF) + return 0; + + memset(&g_specific_ssid, 0, sizeof(g_specific_ssid)); +#ifndef WL_IW_USE_ISCAN + g_scan_specified_ssid = 0; +#endif + +#if WIRELESS_EXT > 17 + + if (wrqu->data.length == sizeof(struct iw_scan_req)) { + if (wrqu->data.flags & IW_SCAN_THIS_ESSID) { + struct iw_scan_req *req = (struct iw_scan_req *)extra; + if (g_first_broadcast_scan != BROADCAST_SCAN_FIRST_RESULT_CONSUMED) { + WL_TRACE(("%s Ignoring SC %s first BC is not done = %d\n", \ + __FUNCTION__, req->essid, \ + g_first_broadcast_scan)); + return -EBUSY; + } + if (g_scan_specified_ssid) { + WL_TRACE(("%s Specific SCAN is not done ignore scan for = %s \n", \ + __FUNCTION__, req->essid)); + return -EBUSY; + } + else { + g_specific_ssid.SSID_len = MIN(sizeof(g_specific_ssid.SSID), \ + req->essid_len); + memcpy(g_specific_ssid.SSID, req->essid, g_specific_ssid.SSID_len); + g_specific_ssid.SSID_len = htod32(g_specific_ssid.SSID_len); + g_scan_specified_ssid = 1; + WL_TRACE(("### Specific scan ssid=%s len=%d\n", \ + g_specific_ssid.SSID, g_specific_ssid.SSID_len)); + } + } + } +#endif + + if ((error = dev_wlc_ioctl(dev, WLC_SCAN, &g_specific_ssid, sizeof(g_specific_ssid)))) { + WL_TRACE(("#### Set SCAN for %s failed with %d\n", g_specific_ssid.SSID, error)); + g_scan_specified_ssid = 0; + return -EBUSY; + } + + return 0; +} + +#ifdef WL_IW_USE_ISCAN +int +wl_iw_iscan_set_scan_broadcast_prep(struct net_device *dev, uint flag) +{ + wlc_ssid_t ssid; + iscan_info_t *iscan = g_iscan; + + if (g_first_broadcast_scan == BROADCAST_SCAN_FIRST_IDLE) { + g_first_broadcast_scan = BROADCAST_SCAN_FIRST_STARTED; + WL_TRACE(("%s: First Brodcast scan was forced\n", __FUNCTION__)); + } + else if (g_first_broadcast_scan == BROADCAST_SCAN_FIRST_STARTED) { + WL_TRACE(("%s: ignore ISCAN request first BS is not done yet\n", __FUNCTION__)); + return 0; + } + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + if (flag) + rtnl_lock(); +#endif + + dev_wlc_ioctl(dev, WLC_SET_PASSIVE_SCAN, &iscan->scan_flag, sizeof(iscan->scan_flag)); + wl_iw_set_event_mask(dev); + + WL_TRACE(("+++: Set Broadcast ISCAN\n")); + + memset(&ssid, 0, sizeof(ssid)); + + iscan->list_cur = iscan->list_hdr; + iscan->iscan_state = ISCAN_STATE_SCANING; + + memset(&iscan->iscan_ex_params_p->params, 0, iscan->iscan_ex_param_size); + wl_iw_iscan_prep(&iscan->iscan_ex_params_p->params, &ssid); + wl_iw_iscan(iscan, &ssid, WL_SCAN_ACTION_START); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + if (flag) + rtnl_unlock(); +#endif + + mod_timer(&iscan->timer, jiffies + iscan->timer_ms*HZ/1000); + + iscan->timer_on = 1; + + return 0; +} + +static int +wl_iw_iscan_set_scan( + struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *extra +) +{ + wlc_ssid_t ssid; + iscan_info_t *iscan = g_iscan; + int ret = 0; + + WL_TRACE(("%s: SIOCSIWSCAN : ISCAN\n", dev->name)); + +#if defined(CSCAN) + WL_ERROR(("%s: Scan from SIOCGIWSCAN not supported\n", __FUNCTION__)); + return -EINVAL; +#endif + + net_os_wake_lock(dev); + +#if defined(SOFTAP) + if (ap_cfg_running) { + WL_TRACE(("\n>%s: Not executed, reason -'SOFTAP is active'\n", __FUNCTION__)); + goto set_scan_end; + } +#endif + + if (g_onoff == G_WLAN_SET_OFF) { + WL_TRACE(("%s: driver is not up yet after START\n", __FUNCTION__)); + goto set_scan_end; + } + +#ifdef PNO_SUPPORT + if (dhd_dev_get_pno_status(dev)) { + WL_ERROR(("%s: Scan called when PNO is active\n", __FUNCTION__)); + } +#endif + + if ((!iscan) || (iscan->sysioc_pid < 0)) { + WL_ERROR(("%s error\n", __FUNCTION__)); + goto set_scan_end; + } + + if (g_scan_specified_ssid) { + WL_TRACE(("%s Specific SCAN already running ignoring BC scan\n", \ + __FUNCTION__)); + ret = EBUSY; + goto set_scan_end; + } + + memset(&ssid, 0, sizeof(ssid)); + +#if WIRELESS_EXT > 17 + + if (wrqu->data.length == sizeof(struct iw_scan_req)) { + if (wrqu->data.flags & IW_SCAN_THIS_ESSID) { + int as = 0; + struct iw_scan_req *req = (struct iw_scan_req *)extra; + ssid.SSID_len = MIN(sizeof(ssid.SSID), req->essid_len); + memcpy(ssid.SSID, req->essid, ssid.SSID_len); + ssid.SSID_len = htod32(ssid.SSID_len); + dev_wlc_ioctl(dev, WLC_SET_PASSIVE_SCAN, &as, sizeof(as)); + wl_iw_set_event_mask(dev); + ret = wl_iw_set_scan(dev, info, wrqu, extra); + goto set_scan_end; + } + else { + g_scan_specified_ssid = 0; + + if (iscan->iscan_state == ISCAN_STATE_SCANING) { + WL_TRACE(("%s ISCAN already in progress \n", __FUNCTION__)); + goto set_scan_end; + } + } + } +#endif + +#if !defined(CSCAN) + if (g_first_broadcast_scan < BROADCAST_SCAN_FIRST_RESULT_CONSUMED) { + if (++g_first_counter_scans == MAX_ALLOWED_BLOCK_SCAN_FROM_FIRST_SCAN) { + + WL_ERROR(("%s Clean up First scan flag which is %d\n", \ + __FUNCTION__, g_first_broadcast_scan)); + g_first_broadcast_scan = BROADCAST_SCAN_FIRST_RESULT_CONSUMED; + } + else { + WL_ERROR(("%s Ignoring Broadcast Scan:First Scan is not done yet %d\n", \ + __FUNCTION__, g_first_counter_scans)); + ret = -EBUSY; + goto set_scan_end; + } + } +#endif + + wl_iw_iscan_set_scan_broadcast_prep(dev, 0); + +set_scan_end: + net_os_wake_unlock(dev); + return ret; +} +#endif + +#if WIRELESS_EXT > 17 +static bool +ie_is_wpa_ie(uint8 **wpaie, uint8 **tlvs, int *tlvs_len) +{ + uint8 *ie = *wpaie; + + if ((ie[1] >= 6) && + !bcmp((const void *)&ie[2], (const void *)(WPA_OUI "\x01"), 4)) { + return TRUE; + } + + ie += ie[1] + 2; + + *tlvs_len -= (int)(ie - *tlvs); + + *tlvs = ie; + return FALSE; +} + +static bool +ie_is_wps_ie(uint8 **wpsie, uint8 **tlvs, int *tlvs_len) +{ + uint8 *ie = *wpsie; + + if ((ie[1] >= 4) && + !bcmp((const void *)&ie[2], (const void *)(WPA_OUI "\x04"), 4)) { + return TRUE; + } + + ie += ie[1] + 2; + + *tlvs_len -= (int)(ie - *tlvs); + + *tlvs = ie; + return FALSE; +} +#endif + +static inline int _wpa_snprintf_hex(char *buf, size_t buf_size, const u8 *data, + size_t len, int uppercase) +{ + size_t i; + char *pos = buf, *end = buf + buf_size; + int ret; + if (buf_size == 0) + return 0; + for (i = 0; i < len; i++) { + ret = snprintf(pos, end - pos, uppercase ? "%02X" : "%02x", + data[i]); + if (ret < 0 || ret >= end - pos) { + end[-1] = '\0'; + return pos - buf; + } + pos += ret; + } + end[-1] = '\0'; + return pos - buf; +} + + +int wpa_snprintf_hex(char *buf, size_t buf_size, const u8 *data, size_t len) +{ + return _wpa_snprintf_hex(buf, buf_size, data, len, 0); +} + +static int +wl_iw_handle_scanresults_ies(char **event_p, char *end, + struct iw_request_info *info, wl_bss_info_t *bi) +{ +#if WIRELESS_EXT > 17 + struct iw_event iwe; + char *event; + char *buf; + int custom_event_len; + + event = *event_p; + if (bi->ie_length) { + + bcm_tlv_t *ie; + uint8 *ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t); + int ptr_len = bi->ie_length; + +#ifdef BCMWPA2 + if ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_RSN_ID))) { + iwe.cmd = IWEVGENIE; + iwe.u.data.length = ie->len + 2; + event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie); + } + ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t); +#endif + + while ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_WPA_ID))) { + + if (ie_is_wps_ie(((uint8 **)&ie), &ptr, &ptr_len)) { + iwe.cmd = IWEVGENIE; + iwe.u.data.length = ie->len + 2; + event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie); + break; + } + } + + ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t); + ptr_len = bi->ie_length; + while ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_WPA_ID))) { + if (ie_is_wpa_ie(((uint8 **)&ie), &ptr, &ptr_len)) { + iwe.cmd = IWEVGENIE; + iwe.u.data.length = ie->len + 2; + event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie); + break; + } + } + + ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t); + ptr_len = bi->ie_length; + + while ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_WAPI_ID))) { + WL_TRACE(("%s: found a WAPI IE...\n", __FUNCTION__)); +#ifdef WAPI_IE_USE_GENIE + iwe.cmd = IWEVGENIE; + iwe.u.data.length = ie->len + 2; + event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie); +#else + iwe.cmd = IWEVCUSTOM; + custom_event_len = strlen("wapi_ie=") + 2*(ie->len + 2); + iwe.u.data.length = custom_event_len; + + buf = kmalloc(custom_event_len+1, GFP_KERNEL); + if (buf == NULL) + { + WL_ERROR(("malloc(%d) returned NULL...\n", custom_event_len)); + break; + } + + memcpy(buf, "wapi_ie=", 8); + wpa_snprintf_hex(buf + 8, 2+1, &(ie->id), 1); + wpa_snprintf_hex(buf + 10, 2+1, &(ie->len), 1); + wpa_snprintf_hex(buf + 12, 2*ie->len+1, ie->data, ie->len); + event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, buf); +#endif + break; + } + *event_p = event; + } +#endif + + return 0; +} + +#ifndef CSCAN +static uint +wl_iw_get_scan_prep( + wl_scan_results_t *list, + struct iw_request_info *info, + char *extra, + short max_size) +{ + int i, j; + struct iw_event iwe; + wl_bss_info_t *bi = NULL; + char *event = extra, *end = extra + max_size - WE_ADD_EVENT_FIX, *value; + int ret = 0; + + ASSERT(list); + + for (i = 0; i < list->count && i < IW_MAX_AP; i++) + { + if (list->version != WL_BSS_INFO_VERSION) { + WL_ERROR(("%s : list->version %d != WL_BSS_INFO_VERSION\n", \ + __FUNCTION__, list->version)); + return ret; + } + + bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : list->bss_info; + + WL_TRACE(("%s : %s\n", __FUNCTION__, bi->SSID)); + + iwe.cmd = SIOCGIWAP; + iwe.u.ap_addr.sa_family = ARPHRD_ETHER; + memcpy(iwe.u.ap_addr.sa_data, &bi->BSSID, ETHER_ADDR_LEN); + event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_ADDR_LEN); + + iwe.u.data.length = dtoh32(bi->SSID_len); + iwe.cmd = SIOCGIWESSID; + iwe.u.data.flags = 1; + event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, bi->SSID); + + + if (dtoh16(bi->capability) & (DOT11_CAP_ESS | DOT11_CAP_IBSS)) { + iwe.cmd = SIOCGIWMODE; + if (dtoh16(bi->capability) & DOT11_CAP_ESS) + iwe.u.mode = IW_MODE_INFRA; + else + iwe.u.mode = IW_MODE_ADHOC; + event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_UINT_LEN); + } + + + iwe.cmd = SIOCGIWFREQ; + iwe.u.freq.m = wf_channel2mhz(CHSPEC_CHANNEL(bi->chanspec), + CHSPEC_CHANNEL(bi->chanspec) <= CH_MAX_2G_CHANNEL ? + WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G); + iwe.u.freq.e = 6; + event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_FREQ_LEN); + + + iwe.cmd = IWEVQUAL; + iwe.u.qual.qual = rssi_to_qual(dtoh16(bi->RSSI)); + iwe.u.qual.level = 0x100 + dtoh16(bi->RSSI); + iwe.u.qual.noise = 0x100 + bi->phy_noise; + event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_QUAL_LEN); + + wl_iw_handle_scanresults_ies(&event, end, info, bi); + + iwe.cmd = SIOCGIWENCODE; + if (dtoh16(bi->capability) & DOT11_CAP_PRIVACY) + iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY; + else + iwe.u.data.flags = IW_ENCODE_DISABLED; + iwe.u.data.length = 0; + event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)event); + + + if (bi->rateset.count) { + if (((event -extra) + IW_EV_LCP_LEN) <= (uintptr)end) { + value = event + IW_EV_LCP_LEN; + iwe.cmd = SIOCGIWRATE; + + iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0; + for (j = 0; j < bi->rateset.count && j < IW_MAX_BITRATES; j++) { + iwe.u.bitrate.value = (bi->rateset.rates[j] & 0x7f) * 500000; + value = IWE_STREAM_ADD_VALUE(info, event, value, end, &iwe, + IW_EV_PARAM_LEN); + } + event = value; + } + } + } + + if ((ret = (event - extra)) < 0) { + WL_ERROR(("==> Wrong size\n")); + ret = 0; + } + WL_TRACE(("%s: size=%d bytes prepared \n", __FUNCTION__, (unsigned int)(event - extra))); + return (uint)ret; +} + +static int +wl_iw_get_scan( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, + char *extra +) +{ + channel_info_t ci; + wl_scan_results_t *list_merge; + wl_scan_results_t *list = (wl_scan_results_t *) g_scan; + int error; + uint buflen_from_user = dwrq->length; + uint len = G_SCAN_RESULTS; + __u16 len_ret = 0; +#if !defined(CSCAN) + __u16 merged_len = 0; +#endif +#if defined(WL_IW_USE_ISCAN) + iscan_info_t *iscan = g_iscan; + iscan_buf_t * p_buf; +#if !defined(CSCAN) + uint32 counter = 0; +#endif +#endif + WL_TRACE(("%s: buflen_from_user %d: \n", dev->name, buflen_from_user)); + + if (!extra) { + WL_TRACE(("%s: wl_iw_get_scan return -EINVAL\n", dev->name)); + return -EINVAL; + } + + + if ((error = dev_wlc_ioctl(dev, WLC_GET_CHANNEL, &ci, sizeof(ci)))) + return error; + ci.scan_channel = dtoh32(ci.scan_channel); + if (ci.scan_channel) + return -EAGAIN; + +#if !defined(CSCAN) + if (g_ss_cache_ctrl.m_timer_expired) { + wl_iw_free_ss_cache(); + g_ss_cache_ctrl.m_timer_expired ^= 1; + } + if ((!g_scan_specified_ssid && g_ss_cache_ctrl.m_prev_scan_mode) || + g_ss_cache_ctrl.m_cons_br_scan_cnt > 4) { + g_ss_cache_ctrl.m_cons_br_scan_cnt = 0; + + wl_iw_reset_ss_cache(); + } + g_ss_cache_ctrl.m_prev_scan_mode = g_scan_specified_ssid; + if (g_scan_specified_ssid) { + g_ss_cache_ctrl.m_cons_br_scan_cnt = 0; + } + else { + g_ss_cache_ctrl.m_cons_br_scan_cnt++; + } +#endif + + if (g_scan_specified_ssid) { + + list = kmalloc(len, GFP_KERNEL); + if (!list) { + WL_TRACE(("%s: wl_iw_get_scan return -ENOMEM\n", dev->name)); + g_scan_specified_ssid = 0; + return -ENOMEM; + } + } + + memset(list, 0, len); + list->buflen = htod32(len); + if ((error = dev_wlc_ioctl(dev, WLC_SCAN_RESULTS, list, len))) { + WL_ERROR(("%s: %s : Scan_results ERROR %d\n", dev->name, __FUNCTION__, error)); + dwrq->length = len; + if (g_scan_specified_ssid) { + g_scan_specified_ssid = 0; + kfree(list); + } + return 0; + } + list->buflen = dtoh32(list->buflen); + list->version = dtoh32(list->version); + list->count = dtoh32(list->count); + + if (list->version != WL_BSS_INFO_VERSION) { + WL_ERROR(("%s : list->version %d != WL_BSS_INFO_VERSION\n", + __FUNCTION__, list->version)); + if (g_scan_specified_ssid) { + g_scan_specified_ssid = 0; + kfree(list); + } + return -EINVAL; + } + +#if !defined(CSCAN) + if (g_scan_specified_ssid) { + + wl_iw_add_bss_to_ss_cache(list); + kfree(list); + } + + mutex_lock(&wl_cache_lock); +#if defined(WL_IW_USE_ISCAN) + if (g_scan_specified_ssid) + WL_TRACE(("%s: Specified scan APs from scan=%d\n", __FUNCTION__, list->count)); + p_buf = iscan->list_hdr; + + while (p_buf != iscan->list_cur) { + list_merge = &((wl_iscan_results_t*)p_buf->iscan_buf)->results; + WL_TRACE(("%s: Bcast APs list=%d\n", __FUNCTION__, list_merge->count)); + counter += list_merge->count; + if (list_merge->count > 0) + len_ret += (__u16) wl_iw_get_scan_prep(list_merge, info, + extra+len_ret, buflen_from_user -len_ret); + p_buf = p_buf->next; + } + WL_TRACE(("%s merged with total Bcast APs=%d\n", __FUNCTION__, counter)); +#else + list_merge = (wl_scan_results_t *) g_scan; + len_ret = (__u16) wl_iw_get_scan_prep(list_merge, info, extra, buflen_from_user); +#endif + mutex_unlock(&wl_cache_lock); + if (g_ss_cache_ctrl.m_link_down) { + wl_iw_delete_bss_from_ss_cache(g_ss_cache_ctrl.m_active_bssid); + } + + wl_iw_merge_scan_cache(info, extra+len_ret, buflen_from_user-len_ret, &merged_len); + len_ret += merged_len; + wl_iw_run_ss_cache_timer(0); + wl_iw_run_ss_cache_timer(1); +#else + + if (g_scan_specified_ssid) { + WL_TRACE(("%s: Specified scan APs in the list =%d\n", __FUNCTION__, list->count)); + len_ret = (__u16) wl_iw_get_scan_prep(list, info, extra, buflen_from_user); + kfree(list); + +#if defined(WL_IW_USE_ISCAN) + p_buf = iscan->list_hdr; + + while (p_buf != iscan->list_cur) { + list_merge = &((wl_iscan_results_t*)p_buf->iscan_buf)->results; + WL_TRACE(("%s: Bcast APs list=%d\n", __FUNCTION__, list_merge->count)); + if (list_merge->count > 0) + len_ret += (__u16) wl_iw_get_scan_prep(list_merge, info, + extra+len_ret, buflen_from_user -len_ret); + p_buf = p_buf->next; + } +#else + list_merge = (wl_scan_results_t *) g_scan; + WL_TRACE(("%s: Bcast APs list=%d\n", __FUNCTION__, list_merge->count)); + if (list_merge->count > 0) + len_ret += (__u16) wl_iw_get_scan_prep(list_merge, info, extra+len_ret, + buflen_from_user -len_ret); +#endif + } + else { + list = (wl_scan_results_t *) g_scan; + len_ret = (__u16) wl_iw_get_scan_prep(list, info, extra, buflen_from_user); + } +#endif + +#if defined(WL_IW_USE_ISCAN) + + g_scan_specified_ssid = 0; +#endif + + if ((len_ret + WE_ADD_EVENT_FIX) < buflen_from_user) + len = len_ret; + + dwrq->length = len; + dwrq->flags = 0; + + WL_TRACE(("%s return to WE %d bytes APs=%d\n", __FUNCTION__, dwrq->length, list->count)); + return 0; +} +#endif + +#if defined(WL_IW_USE_ISCAN) +static int +wl_iw_iscan_get_scan( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, + char *extra +) +{ + wl_scan_results_t *list; + struct iw_event iwe; + wl_bss_info_t *bi = NULL; + int ii, j; + int apcnt; + char *event = extra, *end = extra + dwrq->length, *value; + iscan_info_t *iscan = g_iscan; + iscan_buf_t * p_buf; + uint32 counter = 0; + uint8 channel; +#if !defined(CSCAN) + __u16 merged_len = 0; + uint buflen_from_user = dwrq->length; +#endif + + WL_TRACE(("%s %s buflen_from_user %d:\n", dev->name, __FUNCTION__, dwrq->length)); + +#if defined(SOFTAP) + if (ap_cfg_running) { + WL_TRACE(("%s: Not executed, reason -'SOFTAP is active'\n", __FUNCTION__)); + return -EINVAL; + } +#endif + + if (!extra) { + WL_TRACE(("%s: INVALID SIOCGIWSCAN GET bad parameter\n", dev->name)); + return -EINVAL; + } + + if (g_first_broadcast_scan < BROADCAST_SCAN_FIRST_RESULT_READY) { + WL_TRACE(("%s %s: first ISCAN results are NOT ready yet \n", \ + dev->name, __FUNCTION__)); + return -EAGAIN; + } + + if ((!iscan) || (iscan->sysioc_pid < 0)) { + WL_ERROR(("%ssysioc_pid\n", __FUNCTION__)); + return -EAGAIN; + } + +#if !defined(CSCAN) + if (g_ss_cache_ctrl.m_timer_expired) { + wl_iw_free_ss_cache(); + g_ss_cache_ctrl.m_timer_expired ^= 1; + } + if (g_scan_specified_ssid) { + return wl_iw_get_scan(dev, info, dwrq, extra); + } + else { + if (g_ss_cache_ctrl.m_link_down) { + wl_iw_delete_bss_from_ss_cache(g_ss_cache_ctrl.m_active_bssid); + } + if (g_ss_cache_ctrl.m_prev_scan_mode || g_ss_cache_ctrl.m_cons_br_scan_cnt > 4) { + g_ss_cache_ctrl.m_cons_br_scan_cnt = 0; + + wl_iw_reset_ss_cache(); + } + g_ss_cache_ctrl.m_prev_scan_mode = g_scan_specified_ssid; + g_ss_cache_ctrl.m_cons_br_scan_cnt++; + } +#endif + + WL_TRACE(("%s: SIOCGIWSCAN GET broadcast results\n", dev->name)); + apcnt = 0; + p_buf = iscan->list_hdr; + + while (p_buf != iscan->list_cur) { + list = &((wl_iscan_results_t*)p_buf->iscan_buf)->results; + + counter += list->count; + + if (list->version != WL_BSS_INFO_VERSION) { + WL_ERROR(("%s : list->version %d != WL_BSS_INFO_VERSION\n", + __FUNCTION__, list->version)); + return -EINVAL; + } + + bi = NULL; + for (ii = 0; ii < list->count && apcnt < IW_MAX_AP; apcnt++, ii++) { + bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : list->bss_info; + ASSERT(((uintptr)bi + dtoh32(bi->length)) <= ((uintptr)list + + WLC_IW_ISCAN_MAXLEN)); + + + if (event + ETHER_ADDR_LEN + bi->SSID_len + IW_EV_UINT_LEN + IW_EV_FREQ_LEN + + IW_EV_QUAL_LEN >= end) + return -E2BIG; + + iwe.cmd = SIOCGIWAP; + iwe.u.ap_addr.sa_family = ARPHRD_ETHER; + memcpy(iwe.u.ap_addr.sa_data, &bi->BSSID, ETHER_ADDR_LEN); + event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_ADDR_LEN); + + + iwe.u.data.length = dtoh32(bi->SSID_len); + iwe.cmd = SIOCGIWESSID; + iwe.u.data.flags = 1; + event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, bi->SSID); + + + if (dtoh16(bi->capability) & (DOT11_CAP_ESS | DOT11_CAP_IBSS)) { + iwe.cmd = SIOCGIWMODE; + if (dtoh16(bi->capability) & DOT11_CAP_ESS) + iwe.u.mode = IW_MODE_INFRA; + else + iwe.u.mode = IW_MODE_ADHOC; + event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_UINT_LEN); + } + + + iwe.cmd = SIOCGIWFREQ; + channel = (bi->ctl_ch == 0) ? CHSPEC_CHANNEL(bi->chanspec) : bi->ctl_ch; + iwe.u.freq.m = wf_channel2mhz(channel, + channel <= CH_MAX_2G_CHANNEL ? + WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G); + iwe.u.freq.e = 6; + event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_FREQ_LEN); + + + iwe.cmd = IWEVQUAL; + iwe.u.qual.qual = rssi_to_qual(dtoh16(bi->RSSI)); + iwe.u.qual.level = 0x100 + dtoh16(bi->RSSI); + iwe.u.qual.noise = 0x100 + bi->phy_noise; + event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_QUAL_LEN); + + + wl_iw_handle_scanresults_ies(&event, end, info, bi); + + + iwe.cmd = SIOCGIWENCODE; + if (dtoh16(bi->capability) & DOT11_CAP_PRIVACY) + iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY; + else + iwe.u.data.flags = IW_ENCODE_DISABLED; + iwe.u.data.length = 0; + event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)event); + + + if (bi->rateset.count) { + if (event + IW_MAX_BITRATES*IW_EV_PARAM_LEN >= end) + return -E2BIG; + + value = event + IW_EV_LCP_LEN; + iwe.cmd = SIOCGIWRATE; + + iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0; + for (j = 0; j < bi->rateset.count && j < IW_MAX_BITRATES; j++) { + iwe.u.bitrate.value = (bi->rateset.rates[j] & 0x7f) * 500000; + value = IWE_STREAM_ADD_VALUE(info, event, value, end, &iwe, + IW_EV_PARAM_LEN); + } + event = value; + } + } + p_buf = p_buf->next; + } + + dwrq->length = event - extra; + dwrq->flags = 0; + +#if !defined(CSCAN) + wl_iw_merge_scan_cache(info, event, buflen_from_user - dwrq->length, &merged_len); + dwrq->length += merged_len; + wl_iw_run_ss_cache_timer(0); + wl_iw_run_ss_cache_timer(1); +#endif /* CSCAN */ + g_first_broadcast_scan = BROADCAST_SCAN_FIRST_RESULT_CONSUMED; + + WL_TRACE(("%s return to WE %d bytes APs=%d\n", __FUNCTION__, dwrq->length, counter)); + + return 0; +} +#endif + +static int +wl_iw_set_essid( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, + char *extra +) +{ + int error; + wl_join_params_t join_params; + int join_params_size; + + WL_TRACE(("%s: SIOCSIWESSID\n", dev->name)); + + + memset(&g_ssid, 0, sizeof(g_ssid)); + + CHECK_EXTRA_FOR_NULL(extra); + + if (dwrq->length && extra) { +#if WIRELESS_EXT > 20 + g_ssid.SSID_len = MIN(sizeof(g_ssid.SSID), dwrq->length); +#else + g_ssid.SSID_len = MIN(sizeof(g_ssid.SSID), dwrq->length-1); +#endif + memcpy(g_ssid.SSID, extra, g_ssid.SSID_len); + } else { + + g_ssid.SSID_len = 0; + } + g_ssid.SSID_len = htod32(g_ssid.SSID_len); + + memset(&join_params, 0, sizeof(join_params)); + join_params_size = sizeof(join_params.ssid); + + memcpy(&join_params.ssid.SSID, g_ssid.SSID, g_ssid.SSID_len); + join_params.ssid.SSID_len = htod32(g_ssid.SSID_len); + memcpy(&join_params.params.bssid, ðer_bcast, ETHER_ADDR_LEN); + + wl_iw_ch_to_chanspec(g_wl_iw_params.target_channel, &join_params, &join_params_size); + + if ((error = dev_wlc_ioctl(dev, WLC_SET_SSID, &join_params, join_params_size))) { + WL_ERROR(("Invalid ioctl data=%d\n", error)); + return error; + } + + if (g_ssid.SSID_len) { + WL_TRACE(("%s: join SSID=%s ch=%d\n", __FUNCTION__, \ + g_ssid.SSID, g_wl_iw_params.target_channel)); + } + return 0; +} + +static int +wl_iw_get_essid( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, + char *extra +) +{ + wlc_ssid_t ssid; + int error; + + WL_TRACE(("%s: SIOCGIWESSID\n", dev->name)); + + if (!extra) + return -EINVAL; + + if ((error = dev_wlc_ioctl(dev, WLC_GET_SSID, &ssid, sizeof(ssid)))) { + WL_ERROR(("Error getting the SSID\n")); + return error; + } + + ssid.SSID_len = dtoh32(ssid.SSID_len); + + memcpy(extra, ssid.SSID, ssid.SSID_len); + + dwrq->length = ssid.SSID_len; + + dwrq->flags = 1; + + return 0; +} + +static int +wl_iw_set_nick( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, + char *extra +) +{ + wl_iw_t *iw = *(wl_iw_t **)netdev_priv(dev); + + WL_TRACE(("%s: SIOCSIWNICKN\n", dev->name)); + + if (!extra) + return -EINVAL; + + if (dwrq->length > sizeof(iw->nickname)) + return -E2BIG; + + memcpy(iw->nickname, extra, dwrq->length); + iw->nickname[dwrq->length - 1] = '\0'; + + return 0; +} + +static int +wl_iw_get_nick( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, + char *extra +) +{ + wl_iw_t *iw = *(wl_iw_t **)netdev_priv(dev); + + WL_TRACE(("%s: SIOCGIWNICKN\n", dev->name)); + + if (!extra) + return -EINVAL; + + strcpy(extra, iw->nickname); + dwrq->length = strlen(extra) + 1; + + return 0; +} + +static int wl_iw_set_rate( + struct net_device *dev, + struct iw_request_info *info, + struct iw_param *vwrq, + char *extra +) +{ + wl_rateset_t rateset; + int error, rate, i, error_bg, error_a; + + WL_TRACE(("%s: SIOCSIWRATE\n", dev->name)); + + + if ((error = dev_wlc_ioctl(dev, WLC_GET_CURR_RATESET, &rateset, sizeof(rateset)))) + return error; + + rateset.count = dtoh32(rateset.count); + + if (vwrq->value < 0) { + + rate = rateset.rates[rateset.count - 1] & 0x7f; + } else if (vwrq->value < rateset.count) { + + rate = rateset.rates[vwrq->value] & 0x7f; + } else { + + rate = vwrq->value / 500000; + } + + if (vwrq->fixed) { + + error_bg = dev_wlc_intvar_set(dev, "bg_rate", rate); + error_a = dev_wlc_intvar_set(dev, "a_rate", rate); + + if (error_bg && error_a) + return (error_bg | error_a); + } else { + + + error_bg = dev_wlc_intvar_set(dev, "bg_rate", 0); + + error_a = dev_wlc_intvar_set(dev, "a_rate", 0); + + if (error_bg && error_a) + return (error_bg | error_a); + + + for (i = 0; i < rateset.count; i++) + if ((rateset.rates[i] & 0x7f) > rate) + break; + rateset.count = htod32(i); + + + if ((error = dev_wlc_ioctl(dev, WLC_SET_RATESET, &rateset, sizeof(rateset)))) + return error; + } + + return 0; +} + +static int wl_iw_get_rate( + struct net_device *dev, + struct iw_request_info *info, + struct iw_param *vwrq, + char *extra +) +{ + int error, rate; + + WL_TRACE(("%s: SIOCGIWRATE\n", dev->name)); + + + if ((error = dev_wlc_ioctl(dev, WLC_GET_RATE, &rate, sizeof(rate)))) + return error; + rate = dtoh32(rate); + vwrq->value = rate * 500000; + + return 0; +} + +static int +wl_iw_set_rts( + struct net_device *dev, + struct iw_request_info *info, + struct iw_param *vwrq, + char *extra +) +{ + int error, rts; + + WL_TRACE(("%s: SIOCSIWRTS\n", dev->name)); + + if (vwrq->disabled) + rts = DOT11_DEFAULT_RTS_LEN; + else if (vwrq->value < 0 || vwrq->value > DOT11_DEFAULT_RTS_LEN) + return -EINVAL; + else + rts = vwrq->value; + + if ((error = dev_wlc_intvar_set(dev, "rtsthresh", rts))) + return error; + + return 0; +} + +static int +wl_iw_get_rts( + struct net_device *dev, + struct iw_request_info *info, + struct iw_param *vwrq, + char *extra +) +{ + int error, rts; + + WL_TRACE(("%s: SIOCGIWRTS\n", dev->name)); + + if ((error = dev_wlc_intvar_get(dev, "rtsthresh", &rts))) + return error; + + vwrq->value = rts; + vwrq->disabled = (rts >= DOT11_DEFAULT_RTS_LEN); + vwrq->fixed = 1; + + return 0; +} + +static int +wl_iw_set_frag( + struct net_device *dev, + struct iw_request_info *info, + struct iw_param *vwrq, + char *extra +) +{ + int error, frag; + + WL_TRACE(("%s: SIOCSIWFRAG\n", dev->name)); + + if (vwrq->disabled) + frag = DOT11_DEFAULT_FRAG_LEN; + else if (vwrq->value < 0 || vwrq->value > DOT11_DEFAULT_FRAG_LEN) + return -EINVAL; + else + frag = vwrq->value; + + if ((error = dev_wlc_intvar_set(dev, "fragthresh", frag))) + return error; + + return 0; +} + +static int +wl_iw_get_frag( + struct net_device *dev, + struct iw_request_info *info, + struct iw_param *vwrq, + char *extra +) +{ + int error, fragthreshold; + + WL_TRACE(("%s: SIOCGIWFRAG\n", dev->name)); + + if ((error = dev_wlc_intvar_get(dev, "fragthresh", &fragthreshold))) + return error; + + vwrq->value = fragthreshold; + vwrq->disabled = (fragthreshold >= DOT11_DEFAULT_FRAG_LEN); + vwrq->fixed = 1; + + return 0; +} + +static int +wl_iw_set_txpow( + struct net_device *dev, + struct iw_request_info *info, + struct iw_param *vwrq, + char *extra +) +{ + int error, disable; + uint16 txpwrmw; + WL_TRACE(("%s: SIOCSIWTXPOW\n", dev->name)); + + + disable = vwrq->disabled ? WL_RADIO_SW_DISABLE : 0; + disable += WL_RADIO_SW_DISABLE << 16; + + disable = htod32(disable); + if ((error = dev_wlc_ioctl(dev, WLC_SET_RADIO, &disable, sizeof(disable)))) + return error; + + + if (disable & WL_RADIO_SW_DISABLE) + return 0; + + + if (!(vwrq->flags & IW_TXPOW_MWATT)) + return -EINVAL; + + + if (vwrq->value < 0) + return 0; + + if (vwrq->value > 0xffff) txpwrmw = 0xffff; + else txpwrmw = (uint16)vwrq->value; + + + error = dev_wlc_intvar_set(dev, "qtxpower", (int)(bcm_mw_to_qdbm(txpwrmw))); + return error; +} + +static int +wl_iw_get_txpow( + struct net_device *dev, + struct iw_request_info *info, + struct iw_param *vwrq, + char *extra +) +{ + int error, disable, txpwrdbm; + uint8 result; + + WL_TRACE(("%s: SIOCGIWTXPOW\n", dev->name)); + + if ((error = dev_wlc_ioctl(dev, WLC_GET_RADIO, &disable, sizeof(disable))) || + (error = dev_wlc_intvar_get(dev, "qtxpower", &txpwrdbm))) + return error; + + disable = dtoh32(disable); + result = (uint8)(txpwrdbm & ~WL_TXPWR_OVERRIDE); + vwrq->value = (int32)bcm_qdbm_to_mw(result); + vwrq->fixed = 0; + vwrq->disabled = (disable & (WL_RADIO_SW_DISABLE | WL_RADIO_HW_DISABLE)) ? 1 : 0; + vwrq->flags = IW_TXPOW_MWATT; + + return 0; +} + +#if WIRELESS_EXT > 10 +static int +wl_iw_set_retry( + struct net_device *dev, + struct iw_request_info *info, + struct iw_param *vwrq, + char *extra +) +{ + int error, lrl, srl; + + WL_TRACE(("%s: SIOCSIWRETRY\n", dev->name)); + + + if (vwrq->disabled || (vwrq->flags & IW_RETRY_LIFETIME)) + return -EINVAL; + + + if (vwrq->flags & IW_RETRY_LIMIT) { + + +#if WIRELESS_EXT > 20 + if ((vwrq->flags & IW_RETRY_LONG) ||(vwrq->flags & IW_RETRY_MAX) || + !((vwrq->flags & IW_RETRY_SHORT) || (vwrq->flags & IW_RETRY_MIN))) { +#else + if ((vwrq->flags & IW_RETRY_MAX) || !(vwrq->flags & IW_RETRY_MIN)) { +#endif + lrl = htod32(vwrq->value); + if ((error = dev_wlc_ioctl(dev, WLC_SET_LRL, &lrl, sizeof(lrl)))) + return error; + } + + +#if WIRELESS_EXT > 20 + if ((vwrq->flags & IW_RETRY_SHORT) ||(vwrq->flags & IW_RETRY_MIN) || + !((vwrq->flags & IW_RETRY_LONG) || (vwrq->flags & IW_RETRY_MAX))) { +#else + if ((vwrq->flags & IW_RETRY_MIN) || !(vwrq->flags & IW_RETRY_MAX)) { +#endif + srl = htod32(vwrq->value); + if ((error = dev_wlc_ioctl(dev, WLC_SET_SRL, &srl, sizeof(srl)))) + return error; + } + } + return 0; +} + +static int +wl_iw_get_retry( + struct net_device *dev, + struct iw_request_info *info, + struct iw_param *vwrq, + char *extra +) +{ + int error, lrl, srl; + + WL_TRACE(("%s: SIOCGIWRETRY\n", dev->name)); + + vwrq->disabled = 0; + + + if ((vwrq->flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) + return -EINVAL; + + + if ((error = dev_wlc_ioctl(dev, WLC_GET_LRL, &lrl, sizeof(lrl))) || + (error = dev_wlc_ioctl(dev, WLC_GET_SRL, &srl, sizeof(srl)))) + return error; + + lrl = dtoh32(lrl); + srl = dtoh32(srl); + + + if (vwrq->flags & IW_RETRY_MAX) { + vwrq->flags = IW_RETRY_LIMIT | IW_RETRY_MAX; + vwrq->value = lrl; + } else { + vwrq->flags = IW_RETRY_LIMIT; + vwrq->value = srl; + if (srl != lrl) + vwrq->flags |= IW_RETRY_MIN; + } + + return 0; +} +#endif + +static int +wl_iw_set_encode( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, + char *extra +) +{ + wl_wsec_key_t key; + int error, val, wsec; + + WL_TRACE(("%s: SIOCSIWENCODE\n", dev->name)); + + memset(&key, 0, sizeof(key)); + + if ((dwrq->flags & IW_ENCODE_INDEX) == 0) { + + for (key.index = 0; key.index < DOT11_MAX_DEFAULT_KEYS; key.index++) { + val = htod32(key.index); + if ((error = dev_wlc_ioctl(dev, WLC_GET_KEY_PRIMARY, &val, sizeof(val)))) + return error; + val = dtoh32(val); + if (val) + break; + } + + if (key.index == DOT11_MAX_DEFAULT_KEYS) + key.index = 0; + } else { + key.index = (dwrq->flags & IW_ENCODE_INDEX) - 1; + if (key.index >= DOT11_MAX_DEFAULT_KEYS) + return -EINVAL; + } + + + if (!extra || !dwrq->length || (dwrq->flags & IW_ENCODE_NOKEY)) { + + val = htod32(key.index); + if ((error = dev_wlc_ioctl(dev, WLC_SET_KEY_PRIMARY, &val, sizeof(val)))) + return error; + } else { + key.len = dwrq->length; + + if (dwrq->length > sizeof(key.data)) + return -EINVAL; + + memcpy(key.data, extra, dwrq->length); + + key.flags = WL_PRIMARY_KEY; + switch (key.len) { + case WEP1_KEY_SIZE: + key.algo = CRYPTO_ALGO_WEP1; + break; + case WEP128_KEY_SIZE: + key.algo = CRYPTO_ALGO_WEP128; + break; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14) + case TKIP_KEY_SIZE: + key.algo = CRYPTO_ALGO_TKIP; + break; +#endif + case AES_KEY_SIZE: + key.algo = CRYPTO_ALGO_AES_CCM; + break; + default: + return -EINVAL; + } + + + swap_key_from_BE(&key); + if ((error = dev_wlc_ioctl(dev, WLC_SET_KEY, &key, sizeof(key)))) + return error; + } + + + val = (dwrq->flags & IW_ENCODE_DISABLED) ? 0 : WEP_ENABLED; + + if ((error = dev_wlc_intvar_get(dev, "wsec", &wsec))) + return error; + + wsec &= ~(WEP_ENABLED); + wsec |= val; + + if ((error = dev_wlc_intvar_set(dev, "wsec", wsec))) + return error; + + + val = (dwrq->flags & IW_ENCODE_RESTRICTED) ? 1 : 0; + val = htod32(val); + if ((error = dev_wlc_ioctl(dev, WLC_SET_AUTH, &val, sizeof(val)))) + return error; + + return 0; +} + +static int +wl_iw_get_encode( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, + char *extra +) +{ + wl_wsec_key_t key; + int error, val, wsec, auth; + + WL_TRACE(("%s: SIOCGIWENCODE\n", dev->name)); + + + bzero(&key, sizeof(wl_wsec_key_t)); + + if ((dwrq->flags & IW_ENCODE_INDEX) == 0) { + + for (key.index = 0; key.index < DOT11_MAX_DEFAULT_KEYS; key.index++) { + val = key.index; + if ((error = dev_wlc_ioctl(dev, WLC_GET_KEY_PRIMARY, &val, sizeof(val)))) + return error; + val = dtoh32(val); + if (val) + break; + } + } else + key.index = (dwrq->flags & IW_ENCODE_INDEX) - 1; + + if (key.index >= DOT11_MAX_DEFAULT_KEYS) + key.index = 0; + + + + if ((error = dev_wlc_ioctl(dev, WLC_GET_WSEC, &wsec, sizeof(wsec))) || + (error = dev_wlc_ioctl(dev, WLC_GET_AUTH, &auth, sizeof(auth)))) + return error; + + swap_key_to_BE(&key); + + wsec = dtoh32(wsec); + auth = dtoh32(auth); + + dwrq->length = MIN(DOT11_MAX_KEY_SIZE, key.len); + + + dwrq->flags = key.index + 1; + if (!(wsec & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED))) { + + dwrq->flags |= IW_ENCODE_DISABLED; + } + if (auth) { + + dwrq->flags |= IW_ENCODE_RESTRICTED; + } + + + if (dwrq->length && extra) + memcpy(extra, key.data, dwrq->length); + + return 0; +} + +static int +wl_iw_set_power( + struct net_device *dev, + struct iw_request_info *info, + struct iw_param *vwrq, + char *extra +) +{ + int error, pm; + + WL_TRACE(("%s: SIOCSIWPOWER\n", dev->name)); + + pm = vwrq->disabled ? PM_OFF : PM_MAX; + + pm = htod32(pm); + if ((error = dev_wlc_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm)))) + return error; + + return 0; +} + +static int +wl_iw_get_power( + struct net_device *dev, + struct iw_request_info *info, + struct iw_param *vwrq, + char *extra +) +{ + int error, pm; + + WL_TRACE(("%s: SIOCGIWPOWER\n", dev->name)); + + if ((error = dev_wlc_ioctl(dev, WLC_GET_PM, &pm, sizeof(pm)))) + return error; + + pm = dtoh32(pm); + vwrq->disabled = pm ? 0 : 1; + vwrq->flags = IW_POWER_ALL_R; + + return 0; +} + +#if WIRELESS_EXT > 17 +static int +wl_iw_set_wpaie( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *iwp, + char *extra +) +{ + uchar buf[WLC_IOCTL_SMLEN] = {0}; + uchar *p = buf; + int wapi_ie_size; + + WL_TRACE(("%s: SIOCSIWGENIE\n", dev->name)); + + CHECK_EXTRA_FOR_NULL(extra); + + if (extra[0] == DOT11_MNG_WAPI_ID) + { + wapi_ie_size = iwp->length; + memcpy(p, extra, iwp->length); + dev_wlc_bufvar_set(dev, "wapiie", buf, wapi_ie_size); + } + else + dev_wlc_bufvar_set(dev, "wpaie", extra, iwp->length); + + return 0; +} + +static int +wl_iw_get_wpaie( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *iwp, + char *extra +) +{ + WL_TRACE(("%s: SIOCGIWGENIE\n", dev->name)); + iwp->length = 64; + dev_wlc_bufvar_get(dev, "wpaie", extra, iwp->length); + return 0; +} + +static int +wl_iw_set_encodeext( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, + char *extra +) +{ + wl_wsec_key_t key; + int error; + struct iw_encode_ext *iwe; + + WL_TRACE(("%s: SIOCSIWENCODEEXT\n", dev->name)); + + CHECK_EXTRA_FOR_NULL(extra); + + memset(&key, 0, sizeof(key)); + iwe = (struct iw_encode_ext *)extra; + + + if (dwrq->flags & IW_ENCODE_DISABLED) { + + } + + + key.index = 0; + if (dwrq->flags & IW_ENCODE_INDEX) + key.index = (dwrq->flags & IW_ENCODE_INDEX) - 1; + + key.len = iwe->key_len; + + + if (!ETHER_ISMULTI(iwe->addr.sa_data)) + bcopy((void *)&iwe->addr.sa_data, (char *)&key.ea, ETHER_ADDR_LEN); + + + if (key.len == 0) { + if (iwe->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) { + WL_WSEC(("Changing the the primary Key to %d\n", key.index)); + + key.index = htod32(key.index); + error = dev_wlc_ioctl(dev, WLC_SET_KEY_PRIMARY, + &key.index, sizeof(key.index)); + if (error) + return error; + } + + else { + swap_key_from_BE(&key); + dev_wlc_ioctl(dev, WLC_SET_KEY, &key, sizeof(key)); + } + } + else { + if (iwe->key_len > sizeof(key.data)) + return -EINVAL; + + WL_WSEC(("Setting the key index %d\n", key.index)); + if (iwe->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) { + WL_WSEC(("key is a Primary Key\n")); + key.flags = WL_PRIMARY_KEY; + } + + bcopy((void *)iwe->key, key.data, iwe->key_len); + + if (iwe->alg == IW_ENCODE_ALG_TKIP) { + uint8 keybuf[8]; + bcopy(&key.data[24], keybuf, sizeof(keybuf)); + bcopy(&key.data[16], &key.data[24], sizeof(keybuf)); + bcopy(keybuf, &key.data[16], sizeof(keybuf)); + } + + + if (iwe->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID) { + uchar *ivptr; + ivptr = (uchar *)iwe->rx_seq; + key.rxiv.hi = (ivptr[5] << 24) | (ivptr[4] << 16) | + (ivptr[3] << 8) | ivptr[2]; + key.rxiv.lo = (ivptr[1] << 8) | ivptr[0]; + key.iv_initialized = TRUE; + } + + switch (iwe->alg) { + case IW_ENCODE_ALG_NONE: + key.algo = CRYPTO_ALGO_OFF; + break; + case IW_ENCODE_ALG_WEP: + if (iwe->key_len == WEP1_KEY_SIZE) + key.algo = CRYPTO_ALGO_WEP1; + else + key.algo = CRYPTO_ALGO_WEP128; + break; + case IW_ENCODE_ALG_TKIP: + key.algo = CRYPTO_ALGO_TKIP; + break; + case IW_ENCODE_ALG_CCMP: + key.algo = CRYPTO_ALGO_AES_CCM; + break; + case IW_ENCODE_ALG_SM4: + key.algo = CRYPTO_ALGO_SMS4; + if (iwe->ext_flags & IW_ENCODE_EXT_GROUP_KEY) { + key.flags &= ~WL_PRIMARY_KEY; + } + break; + default: + break; + } + swap_key_from_BE(&key); + + dhd_wait_pend8021x(dev); + + error = dev_wlc_ioctl(dev, WLC_SET_KEY, &key, sizeof(key)); + if (error) + return error; + } + return 0; +} + +#if WIRELESS_EXT > 17 +#ifdef BCMWPA2 +struct { + pmkid_list_t pmkids; + pmkid_t foo[MAXPMKID-1]; +} pmkid_list; + +static int +wl_iw_set_pmksa( + struct net_device *dev, + struct iw_request_info *info, + struct iw_param *vwrq, + char *extra +) +{ + struct iw_pmksa *iwpmksa; + uint i; + int ret = 0; + char eabuf[ETHER_ADDR_STR_LEN]; + + WL_WSEC(("%s: SIOCSIWPMKSA\n", dev->name)); + CHECK_EXTRA_FOR_NULL(extra); + + iwpmksa = (struct iw_pmksa *)extra; + bzero((char *)eabuf, ETHER_ADDR_STR_LEN); + + if (iwpmksa->cmd == IW_PMKSA_FLUSH) { + WL_WSEC(("wl_iw_set_pmksa - IW_PMKSA_FLUSH\n")); + bzero((char *)&pmkid_list, sizeof(pmkid_list)); + } + + else if (iwpmksa->cmd == IW_PMKSA_REMOVE) { + { + pmkid_list_t pmkid, *pmkidptr; + uint j; + pmkidptr = &pmkid; + + bcopy(&iwpmksa->bssid.sa_data[0], &pmkidptr->pmkid[0].BSSID, ETHER_ADDR_LEN); + bcopy(&iwpmksa->pmkid[0], &pmkidptr->pmkid[0].PMKID, WPA2_PMKID_LEN); + + WL_WSEC(("wl_iw_set_pmksa,IW_PMKSA_REMOVE - PMKID: %s = ", + bcm_ether_ntoa(&pmkidptr->pmkid[0].BSSID, + eabuf))); + for (j = 0; j < WPA2_PMKID_LEN; j++) + WL_WSEC(("%02x ", pmkidptr->pmkid[0].PMKID[j])); + WL_WSEC(("\n")); + } + + for (i = 0; i < pmkid_list.pmkids.npmkid; i++) + if (!bcmp(&iwpmksa->bssid.sa_data[0], &pmkid_list.pmkids.pmkid[i].BSSID, + ETHER_ADDR_LEN)) + break; + + if ((pmkid_list.pmkids.npmkid > 0) && (i < pmkid_list.pmkids.npmkid)) { + bzero(&pmkid_list.pmkids.pmkid[i], sizeof(pmkid_t)); + for (; i < (pmkid_list.pmkids.npmkid - 1); i++) { + bcopy(&pmkid_list.pmkids.pmkid[i+1].BSSID, + &pmkid_list.pmkids.pmkid[i].BSSID, + ETHER_ADDR_LEN); + bcopy(&pmkid_list.pmkids.pmkid[i+1].PMKID, + &pmkid_list.pmkids.pmkid[i].PMKID, + WPA2_PMKID_LEN); + } + pmkid_list.pmkids.npmkid--; + } + else + ret = -EINVAL; + } + + else if (iwpmksa->cmd == IW_PMKSA_ADD) { + for (i = 0; i < pmkid_list.pmkids.npmkid; i++) + if (!bcmp(&iwpmksa->bssid.sa_data[0], &pmkid_list.pmkids.pmkid[i].BSSID, + ETHER_ADDR_LEN)) + break; + if (i < MAXPMKID) { + bcopy(&iwpmksa->bssid.sa_data[0], + &pmkid_list.pmkids.pmkid[i].BSSID, + ETHER_ADDR_LEN); + bcopy(&iwpmksa->pmkid[0], &pmkid_list.pmkids.pmkid[i].PMKID, + WPA2_PMKID_LEN); + if (i == pmkid_list.pmkids.npmkid) + pmkid_list.pmkids.npmkid++; + } + else + ret = -EINVAL; + + { + uint j; + uint k; + k = pmkid_list.pmkids.npmkid; + WL_WSEC(("wl_iw_set_pmksa,IW_PMKSA_ADD - PMKID: %s = ", + bcm_ether_ntoa(&pmkid_list.pmkids.pmkid[k].BSSID, + eabuf))); + for (j = 0; j < WPA2_PMKID_LEN; j++) + WL_WSEC(("%02x ", pmkid_list.pmkids.pmkid[k].PMKID[j])); + WL_WSEC(("\n")); + } + } + WL_WSEC(("PRINTING pmkid LIST - No of elements %d, ret = %d\n", pmkid_list.pmkids.npmkid, ret)); + for (i = 0; i < pmkid_list.pmkids.npmkid; i++) { + uint j; + WL_WSEC(("PMKID[%d]: %s = ", i, + bcm_ether_ntoa(&pmkid_list.pmkids.pmkid[i].BSSID, + eabuf))); + for (j = 0; j < WPA2_PMKID_LEN; j++) + WL_WSEC(("%02x ", pmkid_list.pmkids.pmkid[i].PMKID[j])); + WL_WSEC(("\n")); + } + WL_WSEC(("\n")); + + if (!ret) + ret = dev_wlc_bufvar_set(dev, "pmkid_info", (char *)&pmkid_list, sizeof(pmkid_list)); + return ret; +} +#endif +#endif + +static int +wl_iw_get_encodeext( + struct net_device *dev, + struct iw_request_info *info, + struct iw_param *vwrq, + char *extra +) +{ + WL_TRACE(("%s: SIOCGIWENCODEEXT\n", dev->name)); + return 0; +} + +static int +wl_iw_set_wpaauth( + struct net_device *dev, + struct iw_request_info *info, + struct iw_param *vwrq, + char *extra +) +{ + int error = 0; + int paramid; + int paramval; + int val = 0; + wl_iw_t *iw = *(wl_iw_t **)netdev_priv(dev); + + WL_TRACE(("%s: SIOCSIWAUTH\n", dev->name)); + +#if defined(SOFTAP) + if (ap_cfg_running) { + WL_TRACE(("%s: Not executed, reason -'SOFTAP is active'\n", __FUNCTION__)); + return 0; + } +#endif + + paramid = vwrq->flags & IW_AUTH_INDEX; + paramval = vwrq->value; + + WL_TRACE(("%s: SIOCSIWAUTH, paramid = 0x%0x, paramval = 0x%0x\n", + dev->name, paramid, paramval)); + + switch (paramid) { + case IW_AUTH_WPA_VERSION: + + if (paramval & IW_AUTH_WPA_VERSION_DISABLED) + val = WPA_AUTH_DISABLED; + else if (paramval & (IW_AUTH_WPA_VERSION_WPA)) + val = WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED; +#ifdef BCMWPA2 + else if (paramval & IW_AUTH_WPA_VERSION_WPA2) + val = WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED; +#endif + else if (paramval & IW_AUTH_WAPI_VERSION_1) + val = WPA_AUTH_WAPI; + WL_INFORM(("%s: %d: setting wpa_auth to 0x%0x\n", __FUNCTION__, __LINE__, val)); + if ((error = dev_wlc_intvar_set(dev, "wpa_auth", val))) + return error; + break; + case IW_AUTH_CIPHER_PAIRWISE: + case IW_AUTH_CIPHER_GROUP: + + + if (paramval & (IW_AUTH_CIPHER_WEP40 | IW_AUTH_CIPHER_WEP104)) + val = WEP_ENABLED; + if (paramval & IW_AUTH_CIPHER_TKIP) + val = TKIP_ENABLED; + if (paramval & IW_AUTH_CIPHER_CCMP) + val = AES_ENABLED; + if (paramval & IW_AUTH_CIPHER_SMS4) + val = SMS4_ENABLED; + + if (paramid == IW_AUTH_CIPHER_PAIRWISE) { + iw->pwsec = val; + val |= iw->gwsec; + } + else { + iw->gwsec = val; + val |= iw->pwsec; + } + + if (iw->privacy_invoked && !val) { + WL_WSEC(("%s: %s: 'Privacy invoked' TRUE but clearing wsec, assuming " + "we're a WPS enrollee\n", dev->name, __FUNCTION__)); + if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", TRUE))) { + WL_WSEC(("Failed to set iovar is_WPS_enrollee\n")); + return error; + } + } else if (val) { + if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", FALSE))) { + WL_WSEC(("Failed to clear iovar is_WPS_enrollee\n")); + return error; + } + } + + if ((error = dev_wlc_intvar_set(dev, "wsec", val))) + return error; + + break; + + case IW_AUTH_KEY_MGMT: + if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val))) + return error; + + if (val & (WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED)) { + if (paramval & IW_AUTH_KEY_MGMT_PSK) + val = WPA_AUTH_PSK; + else + val = WPA_AUTH_UNSPECIFIED; + } +#ifdef BCMWPA2 + else if (val & (WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED)) { + if (paramval & IW_AUTH_KEY_MGMT_PSK) + val = WPA2_AUTH_PSK; + else + val = WPA2_AUTH_UNSPECIFIED; + } +#endif + if (paramval & (IW_AUTH_KEY_MGMT_WAPI_PSK | IW_AUTH_KEY_MGMT_WAPI_CERT)) + val = WPA_AUTH_WAPI; + WL_INFORM(("%s: %d: setting wpa_auth to %d\n", __FUNCTION__, __LINE__, val)); + if ((error = dev_wlc_intvar_set(dev, "wpa_auth", val))) + return error; + + break; + case IW_AUTH_TKIP_COUNTERMEASURES: + dev_wlc_bufvar_set(dev, "tkip_countermeasures", (char *)¶mval, 1); + break; + + case IW_AUTH_80211_AUTH_ALG: + + WL_INFORM(("Setting the D11auth %d\n", paramval)); + if (paramval == IW_AUTH_ALG_OPEN_SYSTEM) + val = 0; + else if (paramval == IW_AUTH_ALG_SHARED_KEY) + val = 1; + else if (paramval == (IW_AUTH_ALG_OPEN_SYSTEM | IW_AUTH_ALG_SHARED_KEY)) + val = 2; + else + error = 1; + if (!error && (error = dev_wlc_intvar_set(dev, "auth", val))) + return error; + break; + + case IW_AUTH_WPA_ENABLED: + if (paramval == 0) { + iw->pwsec = 0; + iw->gwsec = 0; + if ((error = dev_wlc_intvar_get(dev, "wsec", &val))) + return error; + if (val & (TKIP_ENABLED | AES_ENABLED)) { + val &= ~(TKIP_ENABLED | AES_ENABLED); + dev_wlc_intvar_set(dev, "wsec", val); + } + val = 0; + WL_INFORM(("%s: %d: setting wpa_auth to %d\n", __FUNCTION__, __LINE__, val)); + dev_wlc_intvar_set(dev, "wpa_auth", 0); + return error; + } + + + break; + + case IW_AUTH_DROP_UNENCRYPTED: + dev_wlc_bufvar_set(dev, "wsec_restrict", (char *)¶mval, 1); + break; + + case IW_AUTH_RX_UNENCRYPTED_EAPOL: + dev_wlc_bufvar_set(dev, "rx_unencrypted_eapol", (char *)¶mval, 1); + break; + +#if WIRELESS_EXT > 17 + case IW_AUTH_ROAMING_CONTROL: + WL_INFORM(("%s: IW_AUTH_ROAMING_CONTROL\n", __FUNCTION__)); + + break; + case IW_AUTH_PRIVACY_INVOKED: { + int wsec; + + if (paramval == 0) { + iw->privacy_invoked = FALSE; + if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", FALSE))) { + WL_WSEC(("Failed to clear iovar is_WPS_enrollee\n")); + return error; + } + } else { + iw->privacy_invoked = TRUE; + if ((error = dev_wlc_intvar_get(dev, "wsec", &wsec))) + return error; + + if (!(IW_WSEC_ENABLED(wsec))) { + + if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", TRUE))) { + WL_WSEC(("Failed to set iovar is_WPS_enrollee\n")); + return error; + } + } else { + if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", FALSE))) { + WL_WSEC(("Failed to clear iovar is_WPS_enrollee\n")); + return error; + } + } + } + break; + } +#endif + case IW_AUTH_WAPI_ENABLED: + if ((error = dev_wlc_intvar_get(dev, "wsec", &val))) + return error; + if (paramval) { + val |= SMS4_ENABLED; + if ((error = dev_wlc_intvar_set(dev, "wsec", val))) { + WL_ERROR(("%s: setting wsec to 0x%0x returned error %d\n", + __FUNCTION__, val, error)); + return error; + } + if ((error = dev_wlc_intvar_set(dev, "wpa_auth", WPA_AUTH_WAPI))) { + WL_ERROR(("%s: setting wpa_auth(WPA_AUTH_WAPI) returned %d\n", + __FUNCTION__, error)); + return error; + } + } + + break; + default: + break; + } + return 0; +} +#ifdef BCMWPA2 +#define VAL_PSK(_val) (((_val) & WPA_AUTH_PSK) || ((_val) & WPA2_AUTH_PSK)) +#else +#define VAL_PSK(_val) (((_val) & WPA_AUTH_PSK)) +#endif + +static int +wl_iw_get_wpaauth( + struct net_device *dev, + struct iw_request_info *info, + struct iw_param *vwrq, + char *extra +) +{ + int error; + int paramid; + int paramval = 0; + int val; + wl_iw_t *iw = *(wl_iw_t **)netdev_priv(dev); + + WL_TRACE(("%s: SIOCGIWAUTH\n", dev->name)); + + paramid = vwrq->flags & IW_AUTH_INDEX; + + switch (paramid) { + case IW_AUTH_WPA_VERSION: + + if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val))) + return error; + if (val & (WPA_AUTH_NONE | WPA_AUTH_DISABLED)) + paramval = IW_AUTH_WPA_VERSION_DISABLED; + else if (val & (WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED)) + paramval = IW_AUTH_WPA_VERSION_WPA; +#ifdef BCMWPA2 + else if (val & (WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED)) + paramval = IW_AUTH_WPA_VERSION_WPA2; +#endif + break; + case IW_AUTH_CIPHER_PAIRWISE: + case IW_AUTH_CIPHER_GROUP: + if (paramid == IW_AUTH_CIPHER_PAIRWISE) + val = iw->pwsec; + else + val = iw->gwsec; + + paramval = 0; + if (val) { + if (val & WEP_ENABLED) + paramval |= (IW_AUTH_CIPHER_WEP40 | IW_AUTH_CIPHER_WEP104); + if (val & TKIP_ENABLED) + paramval |= (IW_AUTH_CIPHER_TKIP); + if (val & AES_ENABLED) + paramval |= (IW_AUTH_CIPHER_CCMP); + } + else + paramval = IW_AUTH_CIPHER_NONE; + break; + case IW_AUTH_KEY_MGMT: + + if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val))) + return error; + if (VAL_PSK(val)) + paramval = IW_AUTH_KEY_MGMT_PSK; + else + paramval = IW_AUTH_KEY_MGMT_802_1X; + + break; + case IW_AUTH_TKIP_COUNTERMEASURES: + dev_wlc_bufvar_get(dev, "tkip_countermeasures", (char *)¶mval, 1); + break; + + case IW_AUTH_DROP_UNENCRYPTED: + dev_wlc_bufvar_get(dev, "wsec_restrict", (char *)¶mval, 1); + break; + + case IW_AUTH_RX_UNENCRYPTED_EAPOL: + dev_wlc_bufvar_get(dev, "rx_unencrypted_eapol", (char *)¶mval, 1); + break; + + case IW_AUTH_80211_AUTH_ALG: + + if ((error = dev_wlc_intvar_get(dev, "auth", &val))) + return error; + if (!val) + paramval = IW_AUTH_ALG_OPEN_SYSTEM; + else + paramval = IW_AUTH_ALG_SHARED_KEY; + break; + case IW_AUTH_WPA_ENABLED: + if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val))) + return error; + if (val) + paramval = TRUE; + else + paramval = FALSE; + break; +#if WIRELESS_EXT > 17 + case IW_AUTH_ROAMING_CONTROL: + WL_ERROR(("%s: IW_AUTH_ROAMING_CONTROL\n", __FUNCTION__)); + + break; + case IW_AUTH_PRIVACY_INVOKED: + paramval = iw->privacy_invoked; + break; +#endif + } + vwrq->value = paramval; + return 0; +} +#endif + + +#ifdef SOFTAP + +static int ap_macmode = MACLIST_MODE_DISABLED; +static struct mflist ap_black_list; +static int +wl_iw_parse_wep(char *keystr, wl_wsec_key_t *key) +{ + char hex[] = "XX"; + unsigned char *data = key->data; + + switch (strlen(keystr)) { + case 5: + case 13: + case 16: + key->len = strlen(keystr); + memcpy(data, keystr, key->len + 1); + break; + case 12: + case 28: + case 34: + case 66: + if (!strnicmp(keystr, "0x", 2)) + keystr += 2; + else + return -1; + case 10: + case 26: + case 32: + case 64: + key->len = strlen(keystr) / 2; + while (*keystr) { + strncpy(hex, keystr, 2); + *data++ = (char) bcm_strtoul(hex, NULL, 16); + keystr += 2; + } + break; + default: + return -1; + } + + switch (key->len) { + case 5: + key->algo = CRYPTO_ALGO_WEP1; + break; + case 13: + key->algo = CRYPTO_ALGO_WEP128; + break; + case 16: + key->algo = CRYPTO_ALGO_AES_CCM; + break; + case 32: + key->algo = CRYPTO_ALGO_TKIP; + break; + default: + return -1; + } + + key->flags |= WL_PRIMARY_KEY; + + return 0; +} + +#ifdef EXT_WPA_CRYPTO +#define SHA1HashSize 20 +extern void pbkdf2_sha1(const char *passphrase, const char *ssid, size_t ssid_len, + int iterations, u8 *buf, size_t buflen); + +#else + +#define SHA1HashSize 20 +int pbkdf2_sha1(const char *passphrase, const char *ssid, size_t ssid_len, + int iterations, u8 *buf, size_t buflen) +{ + WL_ERROR(("WARNING: %s is not implemented !!!\n", __FUNCTION__)); + return -1; +} + +#endif + + +int dev_iw_write_cfg1_bss_var(struct net_device *dev, int val) +{ + struct { + int cfg; + int val; + } bss_setbuf; + + int bss_set_res; + char smbuf[WLC_IOCTL_SMLEN]; + memset(smbuf, 0, sizeof(smbuf)); + + bss_setbuf.cfg = 1; + bss_setbuf.val = val; + + bss_set_res = dev_iw_iovar_setbuf(dev, "bss", + &bss_setbuf, sizeof(bss_setbuf), smbuf, sizeof(smbuf)); + WL_TRACE(("%s: bss_set_result:%d set with %d\n", __FUNCTION__, bss_set_res, val)); + + return bss_set_res; +} + + +int dev_iw_read_cfg1_bss_var(struct net_device *dev, int *val) +{ + int bsscfg_idx = 1; + int bss_set_res; + char smbuf[WLC_IOCTL_SMLEN]; + memset(smbuf, 0, sizeof(smbuf)); + + bss_set_res = dev_iw_iovar_getbuf(dev, "bss", \ + &bsscfg_idx, sizeof(bsscfg_idx), smbuf, sizeof(smbuf)); + *val = *(int*)smbuf; + *val = dtoh32(*val); + WL_TRACE(("%s: status=%d bss_get_result=%d\n", __FUNCTION__, bss_set_res, *val)); + return bss_set_res; +} + + +#ifndef AP_ONLY +static int wl_bssiovar_mkbuf( + const char *iovar, + int bssidx, + void *param, + int paramlen, + void *bufptr, + int buflen, + int *perr) +{ + const char *prefix = "bsscfg:"; + int8 *p; + uint prefixlen; + uint namelen; + uint iolen; + + prefixlen = strlen(prefix); + namelen = strlen(iovar) + 1; + iolen = prefixlen + namelen + sizeof(int) + paramlen; + + if (buflen < 0 || iolen > (uint)buflen) { + *perr = BCME_BUFTOOSHORT; + return 0; + } + + p = (int8 *)bufptr; + + memcpy(p, prefix, prefixlen); + p += prefixlen; + + memcpy(p, iovar, namelen); + p += namelen; + + bssidx = htod32(bssidx); + memcpy(p, &bssidx, sizeof(int32)); + p += sizeof(int32); + + if (paramlen) + memcpy(p, param, paramlen); + + *perr = 0; + return iolen; +} +#endif + + +int get_user_params(char *user_params, struct iw_point *dwrq) +{ + int ret = 0; + + if (copy_from_user(user_params, dwrq->pointer, dwrq->length)) { + WL_ERROR(("\n%s: no user params: uptr:%p, ulen:%d\n", + __FUNCTION__, dwrq->pointer, dwrq->length)); + return -EFAULT; + } + + WL_TRACE(("\n%s: iwpriv user params:%s\n", __FUNCTION__, user_params)); + + return ret; +} + + +#define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base)) + +#if defined(CSCAN) + +static int +wl_iw_combined_scan_set(struct net_device *dev, wlc_ssid_t* ssids_local, int nssid, int nchan) +{ + int params_size = WL_SCAN_PARAMS_FIXED_SIZE + WL_NUMCHANNELS * sizeof(uint16); + int err = 0; + char *p; + int i; + iscan_info_t *iscan = g_iscan; + + WL_TRACE(("%s nssid=%d nchan=%d\n", __FUNCTION__, nssid, nchan)); + + if ((!dev) && (!g_iscan) && (!iscan->iscan_ex_params_p)) { + WL_ERROR(("%s error exit\n", __FUNCTION__)); + err = -1; + goto exit; + } + +#ifdef PNO_SUPPORT + if (dhd_dev_get_pno_status(dev)) { + WL_ERROR(("%s: Scan called when PNO is active\n", __FUNCTION__)); + } +#endif + + params_size += WL_SCAN_PARAMS_SSID_MAX * sizeof(wlc_ssid_t); + + if (nssid > 0) { + i = OFFSETOF(wl_scan_params_t, channel_list) + nchan * sizeof(uint16); + i = ROUNDUP(i, sizeof(uint32)); + if (i + nssid * sizeof(wlc_ssid_t) > params_size) { + printf("additional ssids exceed params_size\n"); + err = -1; + goto exit; + } + + p = ((char*)&iscan->iscan_ex_params_p->params) + i; + memcpy(p, ssids_local, nssid * sizeof(wlc_ssid_t)); + p += nssid * sizeof(wlc_ssid_t); + } else { + p = (char*)iscan->iscan_ex_params_p->params.channel_list + nchan * sizeof(uint16); + } + + + iscan->iscan_ex_params_p->params.channel_num = \ + htod32((nssid << WL_SCAN_PARAMS_NSSID_SHIFT) | \ + (nchan & WL_SCAN_PARAMS_COUNT_MASK)); + + nssid = \ + (uint)((iscan->iscan_ex_params_p->params.channel_num >> WL_SCAN_PARAMS_NSSID_SHIFT) & \ + WL_SCAN_PARAMS_COUNT_MASK); + + + params_size = (int) (p - (char*)iscan->iscan_ex_params_p + nssid * sizeof(wlc_ssid_t)); + iscan->iscan_ex_param_size = params_size; + + iscan->list_cur = iscan->list_hdr; + iscan->iscan_state = ISCAN_STATE_SCANING; + wl_iw_set_event_mask(dev); + mod_timer(&iscan->timer, jiffies + iscan->timer_ms*HZ/1000); + + iscan->timer_on = 1; + +#ifdef SCAN_DUMP + { + int i; + WL_SCAN(("\n### List of SSIDs to scan ###\n")); + for (i = 0; i < nssid; i++) { + if (!ssids_local[i].SSID_len) + WL_SCAN(("%d: Broadcast scan\n", i)); + else + WL_SCAN(("%d: scan for %s size =%d\n", i, \ + ssids_local[i].SSID, ssids_local[i].SSID_len)); + } + WL_SCAN(("### List of channels to scan ###\n")); + for (i = 0; i < nchan; i++) + { + WL_SCAN(("%d ", iscan->iscan_ex_params_p->params.channel_list[i])); + } + WL_SCAN(("\nnprobes=%d\n", iscan->iscan_ex_params_p->params.nprobes)); + WL_SCAN(("active_time=%d\n", iscan->iscan_ex_params_p->params.active_time)); + WL_SCAN(("passive_time=%d\n", iscan->iscan_ex_params_p->params.passive_time)); + WL_SCAN(("home_time=%d\n", iscan->iscan_ex_params_p->params.home_time)); + WL_SCAN(("scan_type=%d\n", iscan->iscan_ex_params_p->params.scan_type)); + WL_SCAN(("\n###################\n")); + } +#endif + + if (params_size > WLC_IOCTL_MEDLEN) { + WL_ERROR(("Set ISCAN for %s due to params_size=%d \n", \ + __FUNCTION__, params_size)); + err = -1; + } + + if ((err = dev_iw_iovar_setbuf(dev, "iscan", iscan->iscan_ex_params_p, \ + iscan->iscan_ex_param_size, \ + iscan->ioctlbuf, sizeof(iscan->ioctlbuf)))) { + WL_ERROR(("Set ISCAN for %s failed with %d\n", __FUNCTION__, err)); + err = -1; + } + +exit: + + return err; +} + + +static int iwpriv_set_cscan(struct net_device *dev, struct iw_request_info *info, \ + union iwreq_data *wrqu, char *ext) +{ + int res = 0; + char *extra = NULL; + iscan_info_t *iscan = g_iscan; + wlc_ssid_t ssids_local[WL_SCAN_PARAMS_SSID_MAX]; + int nssid = 0; + int nchan = 0; + + WL_TRACE(("\%s: info->cmd:%x, info->flags:%x, u.data=0x%p, u.len=%d\n", + __FUNCTION__, info->cmd, info->flags, + wrqu->data.pointer, wrqu->data.length)); + + if (g_onoff == G_WLAN_SET_OFF) { + WL_TRACE(("%s: driver is not up yet after START\n", __FUNCTION__)); + return -1; + } + + if (wrqu->data.length != 0) { + + char *str_ptr; + + if (!iscan->iscan_ex_params_p) { + return -EFAULT; + } + + if (!(extra = kmalloc(wrqu->data.length+1, GFP_KERNEL))) + return -ENOMEM; + + if (copy_from_user(extra, wrqu->data.pointer, wrqu->data.length)) { + kfree(extra); + return -EFAULT; + } + + extra[wrqu->data.length] = 0; + WL_ERROR(("Got str param in iw_point:\n %s\n", extra)); + + str_ptr = extra; + + if (strncmp(str_ptr, GET_SSID, strlen(GET_SSID))) { + WL_ERROR(("%s Error: extracting SSID='' string\n", __FUNCTION__)); + goto exit_proc; + } + str_ptr += strlen(GET_SSID); + nssid = wl_iw_parse_ssid_list(&str_ptr, ssids_local, nssid, \ + WL_SCAN_PARAMS_SSID_MAX); + if (nssid == -1) { + WL_ERROR(("%s wrong ssid list", __FUNCTION__)); + return -1; + } + + memset(iscan->iscan_ex_params_p, 0, iscan->iscan_ex_param_size); + ASSERT(iscan->iscan_ex_param_size < WLC_IOCTL_MAXLEN); + + + wl_iw_iscan_prep(&iscan->iscan_ex_params_p->params, NULL); + iscan->iscan_ex_params_p->version = htod32(ISCAN_REQ_VERSION); + iscan->iscan_ex_params_p->action = htod16(WL_SCAN_ACTION_START); + iscan->iscan_ex_params_p->scan_duration = htod16(0); + + + if ((nchan = wl_iw_parse_channel_list(&str_ptr, \ + &iscan->iscan_ex_params_p->params.channel_list[0], \ + WL_NUMCHANNELS)) == -1) { + WL_ERROR(("%s missing channel list\n", __FUNCTION__)); + return -1; + } + + + get_parmeter_from_string(&str_ptr, \ + GET_NPROBE, PTYPE_INTDEC, \ + &iscan->iscan_ex_params_p->params.nprobes, 2); + + get_parmeter_from_string(&str_ptr, GET_ACTIVE_ASSOC_DWELL, PTYPE_INTDEC, \ + &iscan->iscan_ex_params_p->params.active_time, 4); + + get_parmeter_from_string(&str_ptr, GET_PASSIVE_ASSOC_DWELL, PTYPE_INTDEC, \ + &iscan->iscan_ex_params_p->params.passive_time, 4); + + get_parmeter_from_string(&str_ptr, GET_HOME_DWELL, PTYPE_INTDEC, \ + &iscan->iscan_ex_params_p->params.home_time, 4); + + get_parmeter_from_string(&str_ptr, GET_SCAN_TYPE, PTYPE_INTDEC, \ + &iscan->iscan_ex_params_p->params.scan_type, 1); + + res = wl_iw_combined_scan_set(dev, ssids_local, nssid, nchan); + + } else { + WL_ERROR(("IWPRIV argument len = 0 \n")); + return -1; + } + +exit_proc: + + kfree(extra); + + return res; +} + + +static int +wl_iw_set_cscan( + struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *extra +) +{ + int res = -1; + iscan_info_t *iscan = g_iscan; + wlc_ssid_t ssids_local[WL_SCAN_PARAMS_SSID_MAX]; + int nssid = 0; + int nchan = 0; + cscan_tlv_t *cscan_tlv_temp; + char type; + char *str_ptr; + int tlv_size_left; +#ifdef TLV_DEBUG + int i; + char tlv_in_example[] = { 'C', 'S', 'C', 'A', 'N', ' ', \ + 0x53, 0x01, 0x00, 0x00, + 'S', + 0x00, + 'S', + 0x04, + 'B', 'R', 'C', 'M', + 'C', + 0x06, + 'P', + 0x94, + 0x11, + 'T', + 0x01 + }; +#endif + + WL_TRACE(("\n### %s: info->cmd:%x, info->flags:%x, u.data=0x%p, u.len=%d\n", + __FUNCTION__, info->cmd, info->flags, + wrqu->data.pointer, wrqu->data.length)); + + net_os_wake_lock(dev); + + if (g_onoff == G_WLAN_SET_OFF) { + WL_TRACE(("%s: driver is not up yet after START\n", __FUNCTION__)); + goto exit_proc; + } + + + if (wrqu->data.length < (strlen(CSCAN_COMMAND) + sizeof(cscan_tlv_t))) { + WL_ERROR(("%s aggument=%d less %d\n", __FUNCTION__, \ + wrqu->data.length, strlen(CSCAN_COMMAND) + sizeof(cscan_tlv_t))); + goto exit_proc; + } + +#ifdef TLV_DEBUG + memcpy(extra, tlv_in_example, sizeof(tlv_in_example)); + wrqu->data.length = sizeof(tlv_in_example); + for (i = 0; i < wrqu->data.length; i++) + printf("%02X ", extra[i]); + printf("\n"); +#endif + + str_ptr = extra; + str_ptr += strlen(CSCAN_COMMAND); + tlv_size_left = wrqu->data.length - strlen(CSCAN_COMMAND); + + cscan_tlv_temp = (cscan_tlv_t *)str_ptr; + memset(ssids_local, 0, sizeof(ssids_local)); + + if ((cscan_tlv_temp->prefix == CSCAN_TLV_PREFIX) && \ + (cscan_tlv_temp->version == CSCAN_TLV_VERSION) && \ + (cscan_tlv_temp->subver == CSCAN_TLV_SUBVERSION)) + { + str_ptr += sizeof(cscan_tlv_t); + tlv_size_left -= sizeof(cscan_tlv_t); + + + if ((nssid = wl_iw_parse_ssid_list_tlv(&str_ptr, ssids_local, \ + WL_SCAN_PARAMS_SSID_MAX, &tlv_size_left)) <= 0) { + WL_ERROR(("SSID is not presented or corrupted ret=%d\n", nssid)); + goto exit_proc; + } + else { + + memset(iscan->iscan_ex_params_p, 0, iscan->iscan_ex_param_size); + + + wl_iw_iscan_prep(&iscan->iscan_ex_params_p->params, NULL); + iscan->iscan_ex_params_p->version = htod32(ISCAN_REQ_VERSION); + iscan->iscan_ex_params_p->action = htod16(WL_SCAN_ACTION_START); + iscan->iscan_ex_params_p->scan_duration = htod16(0); + + + while (tlv_size_left > 0) + { + type = str_ptr[0]; + switch (type) { + case CSCAN_TLV_TYPE_CHANNEL_IE: + + if ((nchan = wl_iw_parse_channel_list_tlv(&str_ptr, \ + &iscan->iscan_ex_params_p->params.channel_list[0], \ + WL_NUMCHANNELS, &tlv_size_left)) == -1) { + WL_ERROR(("%s missing channel list\n", \ + __FUNCTION__)); + goto exit_proc; + } + break; + case CSCAN_TLV_TYPE_NPROBE_IE: + if ((res = wl_iw_parse_data_tlv(&str_ptr, \ + &iscan->iscan_ex_params_p->params.nprobes, \ + sizeof(iscan->iscan_ex_params_p->params.nprobes), \ + type, sizeof(char), &tlv_size_left)) == -1) { + WL_ERROR(("%s return %d\n", \ + __FUNCTION__, res)); + goto exit_proc; + } + break; + case CSCAN_TLV_TYPE_ACTIVE_IE: + if ((res = wl_iw_parse_data_tlv(&str_ptr, \ + &iscan->iscan_ex_params_p->params.active_time, \ + sizeof(iscan->iscan_ex_params_p->params.active_time), \ + type, sizeof(short), &tlv_size_left)) == -1) { + WL_ERROR(("%s return %d\n", \ + __FUNCTION__, res)); + goto exit_proc; + } + break; + case CSCAN_TLV_TYPE_PASSIVE_IE: + if ((res = wl_iw_parse_data_tlv(&str_ptr, \ + &iscan->iscan_ex_params_p->params.passive_time, \ + sizeof(iscan->iscan_ex_params_p->params.passive_time), \ + type, sizeof(short), &tlv_size_left)) == -1) { + WL_ERROR(("%s return %d\n", \ + __FUNCTION__, res)); + goto exit_proc; + } + break; + case CSCAN_TLV_TYPE_HOME_IE: + if ((res = wl_iw_parse_data_tlv(&str_ptr, \ + &iscan->iscan_ex_params_p->params.home_time, \ + sizeof(iscan->iscan_ex_params_p->params.home_time), \ + type, sizeof(short), &tlv_size_left)) == -1) { + WL_ERROR(("%s return %d\n", \ + __FUNCTION__, res)); + goto exit_proc; + } + break; + case CSCAN_TLV_TYPE_STYPE_IE: + if ((res = wl_iw_parse_data_tlv(&str_ptr, \ + &iscan->iscan_ex_params_p->params.scan_type, \ + sizeof(iscan->iscan_ex_params_p->params.scan_type), \ + type, sizeof(char), &tlv_size_left)) == -1) { + WL_ERROR(("%s return %d\n", \ + __FUNCTION__, res)); + goto exit_proc; + } + break; + + default : + WL_ERROR(("%s get unkwown type %X\n", \ + __FUNCTION__, type)); + goto exit_proc; + break; + } + } + } + } + else { + WL_ERROR(("%s get wrong TLV command\n", __FUNCTION__)); + goto exit_proc; + } + + if (g_first_broadcast_scan < BROADCAST_SCAN_FIRST_RESULT_CONSUMED) { + if (++g_first_counter_scans == MAX_ALLOWED_BLOCK_SCAN_FROM_FIRST_SCAN) { + + WL_ERROR(("%s Clean up First scan flag which is %d\n", \ + __FUNCTION__, g_first_broadcast_scan)); + g_first_broadcast_scan = BROADCAST_SCAN_FIRST_RESULT_CONSUMED; + } + else { + WL_ERROR(("%s Ignoring CSCAN : First Scan is not done yet %d\n", \ + __FUNCTION__, g_first_counter_scans)); + res = -EBUSY; + goto exit_proc; + } + } + + res = wl_iw_combined_scan_set(dev, ssids_local, nssid, nchan); + +exit_proc: + net_os_wake_unlock(dev); + return res; +} + +#endif + +#ifdef SOFTAP +#ifndef AP_ONLY + +static int thr_wait_for_2nd_eth_dev(void *data) +{ + int ret = 0; + + DAEMONIZE("wl0_eth_wthread"); + + WL_TRACE(("\n>%s threda started:, PID:%x\n", __FUNCTION__, current->pid)); + + if (down_timeout(&ap_eth_sema, msecs_to_jiffies(5000)) != 0) { + WL_ERROR(("\n%s: sap_eth_sema timeout \n", __FUNCTION__)); + ret = -1; + goto fail; + } + + if (!ap_net_dev) { + WL_ERROR((" ap_net_dev is null !!!")); + ret = -1; + goto fail; + } + + WL_TRACE(("\n>%s: Thread:'softap ethdev IF:%s is detected !!!'\n\n", + __FUNCTION__, ap_net_dev->name)); + + ap_cfg_running = TRUE; + + bcm_mdelay(500); + + wl_iw_send_priv_event(priv_dev, "AP_SET_CFG_OK"); + +fail: + WL_TRACE(("\n>%s, thread completed\n", __FUNCTION__)); + + return ret; +} +#endif +#ifndef AP_ONLY +static int last_auto_channel = 6; +#endif +static int get_softap_auto_channel(struct net_device *dev, struct ap_profile *ap) +{ + int chosen = 0; + wl_uint32_list_t request; + int rescan = 0; + int retry = 0; + int updown = 0; + int ret = 0; + wlc_ssid_t null_ssid; + int res = 0; +#ifndef AP_ONLY + int iolen = 0; + int mkvar_err = 0; + int bsscfg_index = 1; + char buf[WLC_IOCTL_SMLEN]; +#endif + WL_SOFTAP(("Enter %s\n", __FUNCTION__)); + +#ifndef AP_ONLY + if (ap_cfg_running) { + ap->channel = last_auto_channel; + return res; + } +#endif + memset(&null_ssid, 0, sizeof(wlc_ssid_t)); + res |= dev_wlc_ioctl(dev, WLC_UP, &updown, sizeof(updown)); +#ifdef AP_ONLY + res |= dev_wlc_ioctl(dev, WLC_SET_SSID, &null_ssid, sizeof(null_ssid)); +#else + iolen = wl_bssiovar_mkbuf("ssid", bsscfg_index, (char *)(&null_ssid), \ + null_ssid.SSID_len+4, buf, sizeof(buf), &mkvar_err); + ASSERT(iolen); + res |= dev_wlc_ioctl(dev, WLC_SET_VAR, buf, iolen); +#endif + auto_channel_retry: + request.count = htod32(0); + ret = dev_wlc_ioctl(dev, WLC_START_CHANNEL_SEL, &request, sizeof(request)); + if (ret < 0) { + WL_ERROR(("can't start auto channel scan\n")); + goto fail; + } + + get_channel_retry: + bcm_mdelay(500); + + ret = dev_wlc_ioctl(dev, WLC_GET_CHANNEL_SEL, &chosen, sizeof(chosen)); + if (ret < 0 || dtoh32(chosen) == 0) { + if (retry++ < 3) + goto get_channel_retry; + else { + WL_ERROR(("can't get auto channel sel, err = %d, \ + chosen = %d\n", ret, chosen)); + goto fail; + } + } + if ((chosen == 1) && (!rescan++)) + goto auto_channel_retry; + WL_SOFTAP(("Set auto channel = %d\n", chosen)); + ap->channel = chosen; + if ((res = dev_wlc_ioctl(dev, WLC_DOWN, &updown, sizeof(updown))) < 0) { + WL_ERROR(("%s fail to set up err =%d\n", __FUNCTION__, res)); + goto fail; + } +#ifndef AP_ONLY + if (!res) + last_auto_channel = ap->channel; +#endif + +fail : + return res; +} + + +static int set_ap_cfg(struct net_device *dev, struct ap_profile *ap) +{ + int updown = 0; + int channel = 0; + + wlc_ssid_t ap_ssid; + int max_assoc = 8; + + int res = 0; + int apsta_var = 0; +#ifndef AP_ONLY + int mpc = 0; + int iolen = 0; + int mkvar_err = 0; + int bsscfg_index = 1; + char buf[WLC_IOCTL_SMLEN]; +#endif + + if (!dev) { + WL_ERROR(("%s: dev is null\n", __FUNCTION__)); + return -1; + } + + net_os_wake_lock(dev); + + WL_SOFTAP(("wl_iw: set ap profile:\n")); + WL_SOFTAP((" ssid = '%s'\n", ap->ssid)); + WL_SOFTAP((" security = '%s'\n", ap->sec)); + if (ap->key[0] != '\0') + WL_SOFTAP((" key = '%s'\n", ap->key)); + WL_SOFTAP((" channel = %d\n", ap->channel)); + WL_SOFTAP((" max scb = %d\n", ap->max_scb)); + +#ifdef AP_ONLY + if (ap_cfg_running) { + wl_iw_softap_deassoc_stations(dev); + ap_cfg_running = FALSE; + } +#endif + + if (ap_cfg_running == FALSE) { + +#ifndef AP_ONLY + + sema_init(&ap_eth_sema, 0); + + mpc = 0; + if ((res = dev_wlc_intvar_set(dev, "mpc", mpc))) { + WL_ERROR(("%s fail to set mpc\n", __FUNCTION__)); + goto fail; + } +#endif + + updown = 0; + if ((res = dev_wlc_ioctl(dev, WLC_DOWN, &updown, sizeof(updown)))) { + WL_ERROR(("%s fail to set updown\n", __FUNCTION__)); + goto fail; + } + +#ifdef AP_ONLY + apsta_var = 0; + if ((res = dev_wlc_ioctl(dev, WLC_SET_AP, &apsta_var, sizeof(apsta_var)))) { + WL_ERROR(("%s fail to set apsta_var 0\n", __FUNCTION__)); + goto fail; + } + apsta_var = 1; + if ((res = dev_wlc_ioctl(dev, WLC_SET_AP, &apsta_var, sizeof(apsta_var)))) { + WL_ERROR(("%s fail to set apsta_var 1\n", __FUNCTION__)); + goto fail; + } + res = dev_wlc_ioctl(dev, WLC_GET_AP, &apsta_var, sizeof(apsta_var)); +#else + apsta_var = 1; + iolen = wl_bssiovar_mkbuf("apsta", + bsscfg_index, &apsta_var, sizeof(apsta_var)+4, + buf, sizeof(buf), &mkvar_err); + ASSERT(iolen); + if ((res = dev_wlc_ioctl(dev, WLC_SET_VAR, buf, iolen)) < 0) { + WL_ERROR(("%s fail to set apsta \n", __FUNCTION__)); + goto fail; + } + WL_TRACE(("\n>in %s: apsta set result: %d \n", __FUNCTION__, res)); +#endif + + updown = 1; + if ((res = dev_wlc_ioctl(dev, WLC_UP, &updown, sizeof(updown))) < 0) { + WL_ERROR(("%s fail to set apsta \n", __FUNCTION__)); + goto fail; + } + + } else { + + if (!ap_net_dev) { + WL_ERROR(("%s: ap_net_dev is null\n", __FUNCTION__)); + goto fail; + } + + res = wl_iw_softap_deassoc_stations(ap_net_dev); + + + if ((res = dev_iw_write_cfg1_bss_var(dev, 0)) < 0) { + WL_ERROR(("%s fail to set bss down\n", __FUNCTION__)); + goto fail; + } + } + + + if ((ap->channel == 0) && (get_softap_auto_channel(dev, ap) < 0)) { + ap->channel = 1; + WL_ERROR(("%s auto channel failed, pick up channel=%d\n", \ + __FUNCTION__, ap->channel)); + } + + channel = ap->channel; + if ((res = dev_wlc_ioctl(dev, WLC_SET_CHANNEL, &channel, sizeof(channel)))) { + WL_ERROR(("%s fail to set channel\n", __FUNCTION__)); + goto fail; + } + + if (ap_cfg_running == FALSE) { + updown = 0; + if ((res = dev_wlc_ioctl(dev, WLC_UP, &updown, sizeof(updown)))) { + WL_ERROR(("%s fail to set up\n", __FUNCTION__)); + goto fail; + } + } + + max_assoc = ap->max_scb; + if ((res = dev_wlc_intvar_set(dev, "maxassoc", max_assoc))) { + WL_ERROR(("%s fail to set maxassoc\n", __FUNCTION__)); + goto fail; + } + + ap_ssid.SSID_len = strlen(ap->ssid); + strncpy(ap_ssid.SSID, ap->ssid, ap_ssid.SSID_len); + +#ifdef AP_ONLY + if ((res = wl_iw_set_ap_security(dev, &my_ap)) != 0) { + WL_ERROR(("ERROR:%d in:%s, wl_iw_set_ap_security is skipped\n", \ + res, __FUNCTION__)); + goto fail; + } + wl_iw_send_priv_event(dev, "ASCII_CMD=AP_BSS_START"); + ap_cfg_running = TRUE; +#else + iolen = wl_bssiovar_mkbuf("ssid", bsscfg_index, (char *)(&ap_ssid), + ap_ssid.SSID_len+4, buf, sizeof(buf), &mkvar_err); + ASSERT(iolen); + if ((res = dev_wlc_ioctl(dev, WLC_SET_VAR, buf, iolen)) != 0) { + WL_ERROR(("ERROR:%d in:%s, Security & BSS reconfiguration is skipped\n", \ + res, __FUNCTION__)); + goto fail; + } + if (ap_cfg_running == FALSE) { + kernel_thread(thr_wait_for_2nd_eth_dev, 0, 0); + } else { + if (ap_net_dev == NULL) { + WL_ERROR(("%s ERROR: ap_net_dev is NULL !!!\n", __FUNCTION__)); + goto fail; + } + + WL_ERROR(("%s: %s Configure security & restart AP bss \n", \ + __FUNCTION__, ap_net_dev->name)); + + if ((res = wl_iw_set_ap_security(ap_net_dev, &my_ap)) < 0) { + WL_ERROR(("%s fail to set security : %d\n", __FUNCTION__, res)); + goto fail; + } + + if ((res = dev_iw_write_cfg1_bss_var(dev, 1)) < 0) { + WL_ERROR(("%s fail to set bss up\n", __FUNCTION__)); + goto fail; + } + } +#endif +fail: + WL_SOFTAP(("%s exit with %d\n", __FUNCTION__, res)); + + net_os_wake_unlock(dev); + + return res; +} + + +static int wl_iw_set_ap_security(struct net_device *dev, struct ap_profile *ap) +{ + int wsec = 0; + int wpa_auth = 0; + int res = 0; + int i; + char *ptr; +#ifdef AP_ONLY + int mpc = 0; + wlc_ssid_t ap_ssid; +#endif + wl_wsec_key_t key; + + WL_SOFTAP(("\nsetting SOFTAP security mode:\n")); + WL_SOFTAP(("wl_iw: set ap profile:\n")); + WL_SOFTAP((" ssid = '%s'\n", ap->ssid)); + WL_SOFTAP((" security = '%s'\n", ap->sec)); + if (ap->key[0] != '\0') + WL_SOFTAP((" key = '%s'\n", ap->key)); + WL_SOFTAP((" channel = %d\n", ap->channel)); + WL_SOFTAP((" max scb = %d\n", ap->max_scb)); + + if (strnicmp(ap->sec, "open", strlen("open")) == 0) { + wsec = 0; + res = dev_wlc_intvar_set(dev, "wsec", wsec); + wpa_auth = WPA_AUTH_DISABLED; + res |= dev_wlc_intvar_set(dev, "wpa_auth", wpa_auth); + + WL_SOFTAP(("=====================\n")); + WL_SOFTAP((" wsec & wpa_auth set 'OPEN', result:&d %d\n", res)); + WL_SOFTAP(("=====================\n")); + + } else if (strnicmp(ap->sec, "wep", strlen("wep")) == 0) { + + memset(&key, 0, sizeof(key)); + + wsec = WEP_ENABLED; + res = dev_wlc_intvar_set(dev, "wsec", wsec); + + key.index = 0; + if (wl_iw_parse_wep(ap->key, &key)) { + WL_SOFTAP(("wep key parse err!\n")); + return -1; + } + + key.index = htod32(key.index); + key.len = htod32(key.len); + key.algo = htod32(key.algo); + key.flags = htod32(key.flags); + + res |= dev_wlc_ioctl(dev, WLC_SET_KEY, &key, sizeof(key)); + + wpa_auth = WPA_AUTH_DISABLED; + res |= dev_wlc_intvar_set(dev, "wpa_auth", wpa_auth); + + WL_SOFTAP(("=====================\n")); + WL_SOFTAP((" wsec & auth set 'WEP', result:&d %d\n", res)); + WL_SOFTAP(("=====================\n")); + + } else if (strnicmp(ap->sec, "wpa2-psk", strlen("wpa2-psk")) == 0) { + wsec_pmk_t psk; + size_t key_len; + + wsec = AES_ENABLED; + dev_wlc_intvar_set(dev, "wsec", wsec); + + key_len = strlen(ap->key); + if (key_len < WSEC_MIN_PSK_LEN || key_len > WSEC_MAX_PSK_LEN) { + WL_SOFTAP(("passphrase must be between %d and %d characters long\n", + WSEC_MIN_PSK_LEN, WSEC_MAX_PSK_LEN)); + return -1; + } + + if (key_len < WSEC_MAX_PSK_LEN) { + unsigned char output[2*SHA1HashSize]; + char key_str_buf[WSEC_MAX_PSK_LEN+1]; + + memset(output, 0, sizeof(output)); + pbkdf2_sha1(ap->key, ap->ssid, strlen(ap->ssid), 4096, output, 32); + + ptr = key_str_buf; + for (i = 0; i < (WSEC_MAX_PSK_LEN/8); i++) { + sprintf(ptr, "%02x%02x%02x%02x", (uint)output[i*4], \ + (uint)output[i*4+1], (uint)output[i*4+2], \ + (uint)output[i*4+3]); + ptr += 8; + } + WL_SOFTAP(("%s: passphase = %s\n", __FUNCTION__, key_str_buf)); + + psk.key_len = htod16((ushort)WSEC_MAX_PSK_LEN); + memcpy(psk.key, key_str_buf, psk.key_len); + } else { + psk.key_len = htod16((ushort) key_len); + memcpy(psk.key, ap->key, key_len); + } + psk.flags = htod16(WSEC_PASSPHRASE); + dev_wlc_ioctl(dev, WLC_SET_WSEC_PMK, &psk, sizeof(psk)); + + wpa_auth = WPA2_AUTH_PSK; + dev_wlc_intvar_set(dev, "wpa_auth", wpa_auth); + + } else if (strnicmp(ap->sec, "wpa-psk", strlen("wpa-psk")) == 0) { + + wsec_pmk_t psk; + size_t key_len; + + wsec = TKIP_ENABLED; + res = dev_wlc_intvar_set(dev, "wsec", wsec); + + key_len = strlen(ap->key); + if (key_len < WSEC_MIN_PSK_LEN || key_len > WSEC_MAX_PSK_LEN) { + WL_SOFTAP(("passphrase must be between %d and %d characters long\n", + WSEC_MIN_PSK_LEN, WSEC_MAX_PSK_LEN)); + return -1; + } + + if (key_len < WSEC_MAX_PSK_LEN) { + unsigned char output[2*SHA1HashSize]; + char key_str_buf[WSEC_MAX_PSK_LEN+1]; + + WL_SOFTAP(("%s: do passhash...\n", __FUNCTION__)); + + pbkdf2_sha1(ap->key, ap->ssid, strlen(ap->ssid), 4096, output, 32); + + ptr = key_str_buf; + for (i = 0; i < (WSEC_MAX_PSK_LEN/8); i++) { + WL_SOFTAP(("[%02d]: %08x\n", i, *((unsigned int *)&output[i*4]))); + + sprintf(ptr, "%02x%02x%02x%02x", (uint)output[i*4], + (uint)output[i*4+1], (uint)output[i*4+2], + (uint)output[i*4+3]); + ptr += 8; + } + WL_SOFTAP(("%s: passphase = %s\n", __FUNCTION__, key_str_buf)); + + psk.key_len = htod16((ushort)WSEC_MAX_PSK_LEN); + memcpy(psk.key, key_str_buf, psk.key_len); + } else { + psk.key_len = htod16((ushort) key_len); + memcpy(psk.key, ap->key, key_len); + } + + psk.flags = htod16(WSEC_PASSPHRASE); + res |= dev_wlc_ioctl(dev, WLC_SET_WSEC_PMK, &psk, sizeof(psk)); + + wpa_auth = WPA_AUTH_PSK; + res |= dev_wlc_intvar_set(dev, "wpa_auth", wpa_auth); + + WL_SOFTAP((" wsec & auth set 'wpa-psk' (TKIP), result:&d %d\n", res)); + } + +#ifdef AP_ONLY + ap_ssid.SSID_len = strlen(ap->ssid); + strncpy(ap_ssid.SSID, ap->ssid, ap_ssid.SSID_len); + res |= dev_wlc_ioctl(dev, WLC_SET_SSID, &ap_ssid, sizeof(ap_ssid)); + mpc = 0; + res |= dev_wlc_intvar_set(dev, "mpc", mpc); + if (strnicmp(ap->sec, "wep", strlen("wep")) == 0) { + res |= dev_wlc_ioctl(dev, WLC_SET_KEY, &key, sizeof(key)); + } +#endif + return res; +} + + + +int get_parmeter_from_string( + char **str_ptr, const char *token, + int param_type, void *dst, int param_max_len) +{ + char int_str[7] = "0"; + int parm_str_len; + char *param_str_begin; + char *param_str_end; + char *orig_str = *str_ptr; + + if ((*str_ptr) && !strncmp(*str_ptr, token, strlen(token))) { + + strsep(str_ptr, "=,"); + param_str_begin = *str_ptr; + strsep(str_ptr, "=,"); + + if (*str_ptr == NULL) { + parm_str_len = strlen(param_str_begin); + } else { + param_str_end = *str_ptr-1; + parm_str_len = param_str_end - param_str_begin; + } + + WL_TRACE((" 'token:%s', len:%d, ", token, parm_str_len)); + + if (parm_str_len > param_max_len) { + WL_TRACE((" WARNING: extracted param len:%d is > MAX:%d\n", + parm_str_len, param_max_len)); + + parm_str_len = param_max_len; + } + + switch (param_type) { + + case PTYPE_INTDEC: { + int *pdst_int = dst; + char *eptr; + + if (parm_str_len > sizeof(int_str)) + parm_str_len = sizeof(int_str); + + memcpy(int_str, param_str_begin, parm_str_len); + + *pdst_int = simple_strtoul(int_str, &eptr, 10); + + WL_TRACE((" written as integer:%d\n", *pdst_int)); + } + break; + case PTYPE_STR_HEX: { + u8 *buf = dst; + + param_max_len = param_max_len >> 1; + hstr_2_buf(param_str_begin, buf, param_max_len); + print_buf(buf, param_max_len, 0); + } + break; + default: + memcpy(dst, param_str_begin, parm_str_len); + *((char *)dst + parm_str_len) = 0; + WL_TRACE((" written as a string:%s\n", (char *)dst)); + break; + } + + return 0; + } else { + WL_ERROR(("\n %s: ERROR: can't find token:%s in str:%s \n", + __FUNCTION__, token, orig_str)); + + return -1; + } +} + + +static int wl_iw_softap_deassoc_stations(struct net_device *dev) +{ + int i; + int res = 0; + char mac_buf[128] = {0}; + struct maclist *assoc_maclist = (struct maclist *)mac_buf; + + memset(assoc_maclist, 0, sizeof(mac_buf)); + assoc_maclist->count = 8; + + res = dev_wlc_ioctl(dev, WLC_GET_ASSOCLIST, assoc_maclist, 128); + if (res != 0) { + WL_SOFTAP((" Error:%d in :%s, Couldn't get ASSOC List\n", res, __FUNCTION__)); + return res; + } + + if (assoc_maclist->count) { + for (i = 0; i < assoc_maclist->count; i++) { + scb_val_t scbval; + + scbval.val = htod32(1); + bcopy(&assoc_maclist->ea[i], &scbval.ea, ETHER_ADDR_LEN); + + WL_SOFTAP(("deauth STA:%d \n", i)); + res |= dev_wlc_ioctl(dev, WLC_SCB_DEAUTHENTICATE_FOR_REASON, + &scbval, sizeof(scb_val_t)); + } + } else { + WL_SOFTAP((" STA ASSOC list is empty\n")); + } + + if (res != 0) + WL_SOFTAP((" Error:%d in :%s\n", res, __FUNCTION__)); + else if (assoc_maclist->count) { + + bcm_mdelay(200); + } + return res; +} + + +static int iwpriv_softap_stop(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *ext) +{ + int res = 0; + + WL_SOFTAP(("got iwpriv AP_BSS_STOP\n")); + + if ((!dev) && (!ap_net_dev)) { + WL_ERROR(("%s: dev is null\n", __FUNCTION__)); + return res; + } + + net_os_wake_lock(dev); + + if ((ap_cfg_running == TRUE)) { +#ifdef AP_ONLY + wl_iw_softap_deassoc_stations(dev); +#else + wl_iw_softap_deassoc_stations(ap_net_dev); + + if ((res = dev_iw_write_cfg1_bss_var(dev, 2)) < 0) + WL_ERROR(("%s failed to del BSS err = %d", __FUNCTION__, res)); +#endif + + bcm_mdelay(100); + + wrqu->data.length = 0; + ap_cfg_running = FALSE; + } + else + WL_ERROR(("%s: was called when SoftAP is OFF : move on\n", __FUNCTION__)); + + WL_SOFTAP(("%s Done with %d\n", __FUNCTION__, res)); + + net_os_wake_unlock(dev); + + return res; +} + + +static int iwpriv_fw_reload(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *ext) +{ + int ret = -1; + char extra[256]; + char *fwstr = fw_path; + + WL_SOFTAP(("current firmware_path[]=%s\n", fwstr)); + + WL_TRACE((">Got FW_RELOAD cmd:" + "info->cmd:%x, info->flags:%x, u.data:%p, u.len:%d, \ + fw_path:%p, len:%d \n", + info->cmd, info->flags, + wrqu->data.pointer, wrqu->data.length, fwstr, strlen(fwstr))); + + if ((wrqu->data.length > 4) && (wrqu->data.length < sizeof(extra))) { + + char *str_ptr; + + if (copy_from_user(extra, wrqu->data.pointer, wrqu->data.length)) { + ret = -EFAULT; + goto exit_proc; + } + + extra[wrqu->data.length] = 8; + str_ptr = extra; + + if (get_parmeter_from_string(&str_ptr, "FW_PATH=", PTYPE_STRING, fwstr, 255) != 0) { + WL_ERROR(("Error: extracting FW_PATH='' string\n")); + goto exit_proc; + } + + if (strstr(fwstr, "apsta") != NULL) { + WL_SOFTAP(("GOT APSTA FIRMWARE\n")); + ap_fw_loaded = TRUE; + } else { + WL_SOFTAP(("GOT STA FIRMWARE\n")); + ap_fw_loaded = FALSE; + } + + WL_SOFTAP(("SET firmware_path[]=%s , str_p:%p\n", fwstr, fwstr)); + ret = 0; + } else { + WL_ERROR(("Error: ivalid param len:%d\n", wrqu->data.length)); + } + +exit_proc: + return ret; +} +#endif + +#ifdef SOFTAP +static int iwpriv_wpasupp_loop_tst(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *ext) +{ + int res = 0; + char *params = NULL; + + WL_TRACE((">Got IWPRIV wp_supp loopback cmd test:" + "info->cmd:%x, info->flags:%x, u.data:%p, u.len:%d\n", + info->cmd, info->flags, + wrqu->data.pointer, wrqu->data.length)); + + if (wrqu->data.length != 0) { + + if (!(params = kmalloc(wrqu->data.length+1, GFP_KERNEL))) + return -ENOMEM; + + if (copy_from_user(params, wrqu->data.pointer, wrqu->data.length)) { + kfree(params); + return -EFAULT; + } + + params[wrqu->data.length] = 0; + WL_SOFTAP(("\n>> copied from user:\n %s\n", params)); + } else { + WL_ERROR(("ERROR param length is 0\n")); + return -EFAULT; + } + + res = wl_iw_send_priv_event(dev, params); + kfree(params); + + return res; +} +#endif + + +static int +iwpriv_en_ap_bss( + struct net_device *dev, + struct iw_request_info *info, + void *wrqu, + char *extra) +{ + int res = 0; + + if (!dev) { + WL_ERROR(("%s: dev is null\n", __FUNCTION__)); + return -1; + } + + net_os_wake_lock(dev); + + WL_TRACE(("%s: rcvd IWPRIV IOCTL: for dev:%s\n", __FUNCTION__, dev->name)); + +#ifndef AP_ONLY + if ((res = wl_iw_set_ap_security(dev, &my_ap)) != 0) { + WL_ERROR((" %s ERROR setting SOFTAP security in :%d\n", __FUNCTION__, res)); + } + else { + if ((res = dev_iw_write_cfg1_bss_var(dev, 1)) < 0) + WL_ERROR(("%s fail to set bss up err=%d\n", __FUNCTION__, res)); + else + bcm_mdelay(100); + } + +#endif + WL_SOFTAP(("%s done with res %d \n", __FUNCTION__, res)); + + net_os_wake_unlock(dev); + + return res; +} + +static int +get_assoc_sta_list(struct net_device *dev, char *buf, int len) +{ + WL_TRACE(("calling dev_wlc_ioctl(dev:%p, cmd:%d, buf:%p, len:%d)\n", + dev, WLC_GET_ASSOCLIST, buf, len)); + + dev_wlc_ioctl(dev, WLC_GET_ASSOCLIST, buf, len); + + return 0; +} + + +static int +set_ap_mac_list(struct net_device *dev, char *buf) +{ + struct mac_list_set *mac_list_set = (struct mac_list_set *)buf; + struct maclist *white_maclist = (struct maclist *)&mac_list_set->white_list; + struct maclist *black_maclist = (struct maclist *)&mac_list_set->black_list; + int mac_mode = mac_list_set->mode; + int length; + int i; + + ap_macmode = mac_mode; + if (mac_mode == MACLIST_MODE_DISABLED) { + + bzero(&ap_black_list, sizeof(struct mflist)); + + dev_wlc_ioctl(dev, WLC_SET_MACMODE, &mac_mode, sizeof(mac_mode)); + } else { + scb_val_t scbval; + char mac_buf[256] = {0}; + struct maclist *assoc_maclist = (struct maclist *) mac_buf; + + mac_mode = MACLIST_MODE_ALLOW; + + dev_wlc_ioctl(dev, WLC_SET_MACMODE, &mac_mode, sizeof(mac_mode)); + + length = sizeof(white_maclist->count)+white_maclist->count*ETHER_ADDR_LEN; + dev_wlc_ioctl(dev, WLC_SET_MACLIST, white_maclist, length); + WL_SOFTAP(("White List, length %d:\n", length)); + for (i = 0; i < white_maclist->count; i++) + WL_SOFTAP(("mac %d: %02X:%02X:%02X:%02X:%02X:%02X\n", + i, white_maclist->ea[i].octet[0], white_maclist->ea[i].octet[1], + white_maclist->ea[i].octet[2], + white_maclist->ea[i].octet[3], white_maclist->ea[i].octet[4], + white_maclist->ea[i].octet[5])); + + bcopy(black_maclist, &ap_black_list, sizeof(ap_black_list)); + + WL_SOFTAP(("Black List, size %d:\n", sizeof(ap_black_list))); + for (i = 0; i < ap_black_list.count; i++) + WL_SOFTAP(("mac %d: %02X:%02X:%02X:%02X:%02X:%02X\n", + i, ap_black_list.ea[i].octet[0], ap_black_list.ea[i].octet[1], + ap_black_list.ea[i].octet[2], + ap_black_list.ea[i].octet[3], + ap_black_list.ea[i].octet[4], ap_black_list.ea[i].octet[5])); + + dev_wlc_ioctl(dev, WLC_GET_ASSOCLIST, assoc_maclist, 256); + if (assoc_maclist->count) { + int j; + for (i = 0; i < assoc_maclist->count; i++) { + for (j = 0; j < white_maclist->count; j++) { + if (!bcmp(&assoc_maclist->ea[i], &white_maclist->ea[j], + ETHER_ADDR_LEN)) { + WL_SOFTAP(("match allow, let it be\n")); + break; + } + } + if (j == white_maclist->count) { + WL_SOFTAP(("match black, deauth it\n")); + scbval.val = htod32(1); + bcopy(&assoc_maclist->ea[i], &scbval.ea, + ETHER_ADDR_LEN); + dev_wlc_ioctl(dev, + WLC_SCB_DEAUTHENTICATE_FOR_REASON, &scbval, + sizeof(scb_val_t)); + } + } + } + } + return 0; +} +#endif + + +#ifdef SOFTAP +int set_macfilt_from_string(struct mflist *pmflist, char **param_str) +{ + return 0; +} +#endif + + +#ifdef SOFTAP +#define PARAM_OFFSET PROFILE_OFFSET + +int wl_iw_process_private_ascii_cmd( + struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *dwrq, + char *cmd_str) +{ + int ret = 0; + char *sub_cmd = cmd_str + PROFILE_OFFSET + strlen("ASCII_CMD="); + + WL_SOFTAP(("\n %s: ASCII_CMD: offs_0:%s, offset_32:\n'%s'\n", + __FUNCTION__, cmd_str, cmd_str + PROFILE_OFFSET)); + + if (strnicmp(sub_cmd, "AP_CFG", strlen("AP_CFG")) == 0) { + + WL_SOFTAP((" AP_CFG \n")); + + + if (init_ap_profile_from_string(cmd_str+PROFILE_OFFSET, &my_ap) != 0) { + WL_ERROR(("ERROR: SoftAP CFG prams !\n")); + ret = -1; + } else { + ret = set_ap_cfg(dev, &my_ap); + } + + } else if (strnicmp(sub_cmd, "AP_BSS_START", strlen("AP_BSS_START")) == 0) { + + WL_SOFTAP(("\n SOFTAP - ENABLE BSS \n")); + + WL_SOFTAP(("\n!!! got 'WL_AP_EN_BSS' from WPA supplicant, dev:%s\n", dev->name)); + +#ifndef AP_ONLY + if (ap_net_dev == NULL) { + printf("\n ERROR: SOFTAP net_dev* is NULL !!!\n"); + } else { + if ((ret = iwpriv_en_ap_bss(ap_net_dev, info, dwrq, cmd_str)) < 0) + WL_ERROR(("%s line %d fail to set bss up\n", \ + __FUNCTION__, __LINE__)); + } +#else + if ((ret = iwpriv_en_ap_bss(dev, info, dwrq, cmd_str)) < 0) + WL_ERROR(("%s line %d fail to set bss up\n", \ + __FUNCTION__, __LINE__)); +#endif + } else if (strnicmp(sub_cmd, "ASSOC_LST", strlen("ASSOC_LST")) == 0) { + /* no code yet */ + } else if (strnicmp(sub_cmd, "AP_BSS_STOP", strlen("AP_BSS_STOP")) == 0) { + WL_SOFTAP((" \n temp DOWN SOFTAP\n")); +#ifndef AP_ONLY + if ((ret = dev_iw_write_cfg1_bss_var(dev, 0)) < 0) { + WL_ERROR(("%s line %d fail to set bss down\n", \ + __FUNCTION__, __LINE__)); + } +#endif + } + + return ret; +} +#endif + +static int wl_iw_set_priv( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, + char *ext +) +{ + int ret = 0; + char * extra; + + if (!(extra = kmalloc(dwrq->length, GFP_KERNEL))) + return -ENOMEM; + + if (copy_from_user(extra, dwrq->pointer, dwrq->length)) { + kfree(extra); + return -EFAULT; + } + + WL_TRACE(("%s: SIOCSIWPRIV request %s, info->cmd:%x, info->flags:%d\n dwrq->length:%d", + dev->name, extra, info->cmd, info->flags, dwrq->length)); + + net_os_wake_lock(dev); + + if (dwrq->length && extra) { + if (strnicmp(extra, "START", strlen("START")) == 0) { + wl_iw_control_wl_on(dev, info); + WL_TRACE(("%s, Received regular START command\n", __FUNCTION__)); + } + + if (g_onoff == G_WLAN_SET_OFF) { + WL_TRACE(("%s, missing START, Fail\n", __FUNCTION__)); + kfree(extra); + net_os_wake_unlock(dev); + return -EFAULT; + } + + if (strnicmp(extra, "SCAN-ACTIVE", strlen("SCAN-ACTIVE")) == 0) { +#ifdef ENABLE_ACTIVE_PASSIVE_SCAN_SUPPRESS + WL_TRACE(("%s: active scan setting suppressed\n", dev->name)); +#else + ret = wl_iw_set_active_scan(dev, info, (union iwreq_data *)dwrq, extra); +#endif + } else if (strnicmp(extra, "SCAN-PASSIVE", strlen("SCAN-PASSIVE")) == 0) +#ifdef ENABLE_ACTIVE_PASSIVE_SCAN_SUPPRESS + WL_TRACE(("%s: passive scan setting suppressed\n", dev->name)); +#else + ret = wl_iw_set_passive_scan(dev, info, (union iwreq_data *)dwrq, extra); +#endif + else if (strnicmp(extra, "RSSI", strlen("RSSI")) == 0) + ret = wl_iw_get_rssi(dev, info, (union iwreq_data *)dwrq, extra); + else if (strnicmp(extra, "LINKSPEED", strlen("LINKSPEED")) == 0) + ret = wl_iw_get_link_speed(dev, info, (union iwreq_data *)dwrq, extra); + else if (strnicmp(extra, "MACADDR", strlen("MACADDR")) == 0) + ret = wl_iw_get_macaddr(dev, info, (union iwreq_data *)dwrq, extra); + else if (strnicmp(extra, "COUNTRY", strlen("COUNTRY")) == 0) + ret = wl_iw_set_country(dev, info, (union iwreq_data *)dwrq, extra); + else if (strnicmp(extra, "STOP", strlen("STOP")) == 0) + ret = wl_iw_control_wl_off(dev, info); + else if (strnicmp(extra, BAND_GET_CMD, strlen(BAND_GET_CMD)) == 0) + ret = wl_iw_get_band(dev, info, (union iwreq_data *)dwrq, extra); + else if (strnicmp(extra, BAND_SET_CMD, strlen(BAND_SET_CMD)) == 0) + ret = wl_iw_set_band(dev, info, (union iwreq_data *)dwrq, extra); + else if (strnicmp(extra, DTIM_SKIP_GET_CMD, strlen(DTIM_SKIP_GET_CMD)) == 0) + ret = wl_iw_get_dtim_skip(dev, info, (union iwreq_data *)dwrq, extra); + else if (strnicmp(extra, DTIM_SKIP_SET_CMD, strlen(DTIM_SKIP_SET_CMD)) == 0) + ret = wl_iw_set_dtim_skip(dev, info, (union iwreq_data *)dwrq, extra); + else if (strnicmp(extra, SETSUSPEND_CMD, strlen(SETSUSPEND_CMD)) == 0) + ret = wl_iw_set_suspend(dev, info, (union iwreq_data *)dwrq, extra); +#if defined(PNO_SUPPORT) + else if (strnicmp(extra, PNOSSIDCLR_SET_CMD, strlen(PNOSSIDCLR_SET_CMD)) == 0) + ret = wl_iw_set_pno_reset(dev, info, (union iwreq_data *)dwrq, extra); + else if (strnicmp(extra, PNOSETUP_SET_CMD, strlen(PNOSETUP_SET_CMD)) == 0) + ret = wl_iw_set_pno_set(dev, info, (union iwreq_data *)dwrq, extra); + else if (strnicmp(extra, PNOENABLE_SET_CMD, strlen(PNOENABLE_SET_CMD)) == 0) + ret = wl_iw_set_pno_enable(dev, info, (union iwreq_data *)dwrq, extra); +#endif +#if defined(CSCAN) + else if (strnicmp(extra, CSCAN_COMMAND, strlen(CSCAN_COMMAND)) == 0) + ret = wl_iw_set_cscan(dev, info, (union iwreq_data *)dwrq, extra); +#endif +#ifdef CUSTOMER_HW2 + else if (strnicmp(extra, "POWERMODE", strlen("POWERMODE")) == 0) + ret = wl_iw_set_power_mode(dev, info, (union iwreq_data *)dwrq, extra); + else if (strnicmp(extra, "BTCOEXMODE", strlen("BTCOEXMODE")) == 0) + ret = wl_iw_set_btcoex_dhcp(dev, info, (union iwreq_data *)dwrq, extra); +#else + else if (strnicmp(extra, "POWERMODE", strlen("POWERMODE")) == 0) + ret = wl_iw_set_btcoex_dhcp(dev, info, (union iwreq_data *)dwrq, extra); +#endif + else if (strnicmp(extra, "GETPOWER", strlen("GETPOWER")) == 0) + ret = wl_iw_get_power_mode(dev, info, (union iwreq_data *)dwrq, extra); +#ifdef SOFTAP + else if (strnicmp(extra, "ASCII_CMD", strlen("ASCII_CMD")) == 0) { + wl_iw_process_private_ascii_cmd(dev, info, (union iwreq_data *)dwrq, extra); + } else if (strnicmp(extra, "AP_MAC_LIST_SET", strlen("AP_MAC_LIST_SET")) == 0) { + WL_SOFTAP(("penguin, set AP_MAC_LIST_SET\n")); + set_ap_mac_list(dev, (extra + PROFILE_OFFSET)); + } +#endif + else { + WL_TRACE(("Unknown PRIVATE command: %s: ignored\n", extra)); + snprintf(extra, MAX_WX_STRING, "OK"); + dwrq->length = strlen("OK") + 1; + } + } + + net_os_wake_unlock(dev); + + if (extra) { + if (copy_to_user(dwrq->pointer, extra, dwrq->length)) { + kfree(extra); + return -EFAULT; + } + + kfree(extra); + } + + return ret; +} + +static const iw_handler wl_iw_handler[] = +{ + (iw_handler) wl_iw_config_commit, + (iw_handler) wl_iw_get_name, + (iw_handler) NULL, + (iw_handler) NULL, + (iw_handler) wl_iw_set_freq, + (iw_handler) wl_iw_get_freq, + (iw_handler) wl_iw_set_mode, + (iw_handler) wl_iw_get_mode, + (iw_handler) NULL, + (iw_handler) NULL, + (iw_handler) NULL, + (iw_handler) wl_iw_get_range, + (iw_handler) wl_iw_set_priv, + (iw_handler) NULL, + (iw_handler) NULL, + (iw_handler) NULL, + (iw_handler) wl_iw_set_spy, + (iw_handler) wl_iw_get_spy, + (iw_handler) NULL, + (iw_handler) NULL, + (iw_handler) wl_iw_set_wap, + (iw_handler) wl_iw_get_wap, +#if WIRELESS_EXT > 17 + (iw_handler) wl_iw_mlme, +#else + (iw_handler) NULL, +#endif +#if defined(WL_IW_USE_ISCAN) + (iw_handler) wl_iw_iscan_get_aplist, +#else + (iw_handler) wl_iw_get_aplist, +#endif +#if WIRELESS_EXT > 13 +#if defined(WL_IW_USE_ISCAN) + (iw_handler) wl_iw_iscan_set_scan, + (iw_handler) wl_iw_iscan_get_scan, +#else + (iw_handler) wl_iw_set_scan, + (iw_handler) wl_iw_get_scan, +#endif +#else + (iw_handler) NULL, + (iw_handler) NULL, +#endif + (iw_handler) wl_iw_set_essid, + (iw_handler) wl_iw_get_essid, + (iw_handler) wl_iw_set_nick, + (iw_handler) wl_iw_get_nick, + (iw_handler) NULL, + (iw_handler) NULL, + (iw_handler) wl_iw_set_rate, + (iw_handler) wl_iw_get_rate, + (iw_handler) wl_iw_set_rts, + (iw_handler) wl_iw_get_rts, + (iw_handler) wl_iw_set_frag, + (iw_handler) wl_iw_get_frag, + (iw_handler) wl_iw_set_txpow, + (iw_handler) wl_iw_get_txpow, +#if WIRELESS_EXT > 10 + (iw_handler) wl_iw_set_retry, + (iw_handler) wl_iw_get_retry, +#endif + (iw_handler) wl_iw_set_encode, + (iw_handler) wl_iw_get_encode, + (iw_handler) wl_iw_set_power, + (iw_handler) wl_iw_get_power, +#if WIRELESS_EXT > 17 + (iw_handler) NULL, + (iw_handler) NULL, + (iw_handler) wl_iw_set_wpaie, + (iw_handler) wl_iw_get_wpaie, + (iw_handler) wl_iw_set_wpaauth, + (iw_handler) wl_iw_get_wpaauth, + (iw_handler) wl_iw_set_encodeext, + (iw_handler) wl_iw_get_encodeext, +#ifdef BCMWPA2 + (iw_handler) wl_iw_set_pmksa, +#endif +#endif +}; + +#if WIRELESS_EXT > 12 +static const iw_handler wl_iw_priv_handler[] = { + NULL, + (iw_handler)wl_iw_set_active_scan, + NULL, + (iw_handler)wl_iw_get_rssi, + NULL, + (iw_handler)wl_iw_set_passive_scan, + NULL, + (iw_handler)wl_iw_get_link_speed, + NULL, + (iw_handler)wl_iw_get_macaddr, + NULL, + (iw_handler)wl_iw_control_wl_off, + NULL, + (iw_handler)wl_iw_control_wl_on, +#ifdef SOFTAP + NULL, + (iw_handler)iwpriv_set_ap_config, + + NULL, + (iw_handler)iwpriv_get_assoc_list, + + NULL, + (iw_handler)iwpriv_set_mac_filters, + + NULL, + (iw_handler)iwpriv_en_ap_bss, + + NULL, + (iw_handler)iwpriv_wpasupp_loop_tst, + + NULL, + (iw_handler)iwpriv_softap_stop, + + NULL, + (iw_handler)iwpriv_fw_reload, +#endif +#if defined(CSCAN) + + NULL, + (iw_handler)iwpriv_set_cscan +#endif +}; + +static const struct iw_priv_args wl_iw_priv_args[] = { + { + WL_IW_SET_ACTIVE_SCAN, + 0, + IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING, + "SCAN-ACTIVE" + }, + { + WL_IW_GET_RSSI, + 0, + IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING, + "RSSI" + }, + { + WL_IW_SET_PASSIVE_SCAN, + 0, + IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING, + "SCAN-PASSIVE" + }, + { + WL_IW_GET_LINK_SPEED, + 0, + IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING, + "LINKSPEED" + }, + { + WL_IW_GET_CURR_MACADDR, + 0, + IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING, + "Macaddr" + }, + { + WL_IW_SET_STOP, + 0, + IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING, + "STOP" + }, + { + WL_IW_SET_START, + 0, + IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING, + "START" + }, + +#ifdef SOFTAP + { + WL_SET_AP_CFG, + IW_PRIV_TYPE_CHAR | 256, + 0, + "AP_SET_CFG" + }, + + { + WL_AP_STA_LIST, + 0, + IW_PRIV_TYPE_CHAR | 0, + "AP_GET_STA_LIST" + }, + + { + WL_AP_MAC_FLTR, + IW_PRIV_TYPE_CHAR | 256, + IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | 0, + "AP_SET_MAC_FLTR" + }, + + { + WL_AP_BSS_START, + 0, + IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING, + "AP_BSS_START" + }, + + { + AP_LPB_CMD, + IW_PRIV_TYPE_CHAR | 256, + IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | 0, + "AP_LPB_CMD" + }, + + { + WL_AP_STOP, + IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | 0, + IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | 0, + "AP_BSS_STOP" + }, + + { + WL_FW_RELOAD, + IW_PRIV_TYPE_CHAR | 256, + IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | 0, + "WL_FW_RELOAD" + }, +#endif +#if defined(CSCAN) + { + WL_COMBO_SCAN, + IW_PRIV_TYPE_CHAR | 1024, + 0, + "CSCAN" + }, +#endif +}; + +const struct iw_handler_def wl_iw_handler_def = +{ + .num_standard = ARRAYSIZE(wl_iw_handler), + .standard = (iw_handler *) wl_iw_handler, + .num_private = ARRAYSIZE(wl_iw_priv_handler), + .num_private_args = ARRAY_SIZE(wl_iw_priv_args), + .private = (iw_handler *)wl_iw_priv_handler, + .private_args = (void *) wl_iw_priv_args, + +#if WIRELESS_EXT >= 19 + get_wireless_stats: dhd_get_wireless_stats, +#endif +}; +#endif + + +int wl_iw_ioctl( + struct net_device *dev, + struct ifreq *rq, + int cmd +) +{ + struct iwreq *wrq = (struct iwreq *) rq; + struct iw_request_info info; + iw_handler handler; + char *extra = NULL; + int token_size = 1, max_tokens = 0, ret = 0; + + net_os_wake_lock(dev); + + WL_TRACE(("%s: cmd:%x alled via dhd->do_ioctl()entry point\n", __FUNCTION__, cmd)); + if (cmd < SIOCIWFIRST || + IW_IOCTL_IDX(cmd) >= ARRAYSIZE(wl_iw_handler) || + !(handler = wl_iw_handler[IW_IOCTL_IDX(cmd)])) { + WL_ERROR(("%s: error in cmd=%x : not supported\n", __FUNCTION__, cmd)); + net_os_wake_unlock(dev); + return -EOPNOTSUPP; + } + + switch (cmd) { + + case SIOCSIWESSID: + case SIOCGIWESSID: + case SIOCSIWNICKN: + case SIOCGIWNICKN: + max_tokens = IW_ESSID_MAX_SIZE + 1; + break; + + case SIOCSIWENCODE: + case SIOCGIWENCODE: +#if WIRELESS_EXT > 17 + case SIOCSIWENCODEEXT: + case SIOCGIWENCODEEXT: +#endif + max_tokens = wrq->u.data.length; + break; + + case SIOCGIWRANGE: + max_tokens = sizeof(struct iw_range) + 500; + break; + + case SIOCGIWAPLIST: + token_size = sizeof(struct sockaddr) + sizeof(struct iw_quality); + max_tokens = IW_MAX_AP; + break; + +#if WIRELESS_EXT > 13 + case SIOCGIWSCAN: +#if defined(WL_IW_USE_ISCAN) + if (g_iscan) + max_tokens = wrq->u.data.length; + else +#endif + max_tokens = IW_SCAN_MAX_DATA; + break; +#endif + + case SIOCSIWSPY: + token_size = sizeof(struct sockaddr); + max_tokens = IW_MAX_SPY; + break; + + case SIOCGIWSPY: + token_size = sizeof(struct sockaddr) + sizeof(struct iw_quality); + max_tokens = IW_MAX_SPY; + break; + +#if WIRELESS_EXT > 17 + case SIOCSIWPMKSA: + case SIOCSIWGENIE: +#endif + case SIOCSIWPRIV: + max_tokens = wrq->u.data.length; + break; + } + + if (max_tokens && wrq->u.data.pointer) { + if (wrq->u.data.length > max_tokens) { + WL_ERROR(("%s: error in cmd=%x wrq->u.data.length=%d > max_tokens=%d\n", \ + __FUNCTION__, cmd, wrq->u.data.length, max_tokens)); + ret = -E2BIG; + goto wl_iw_ioctl_done; + } + if (!(extra = kmalloc(max_tokens * token_size, GFP_KERNEL))) { + ret = -ENOMEM; + goto wl_iw_ioctl_done; + } + + if (copy_from_user(extra, wrq->u.data.pointer, wrq->u.data.length * token_size)) { + kfree(extra); + ret = -EFAULT; + goto wl_iw_ioctl_done; + } + } + + info.cmd = cmd; + info.flags = 0; + + ret = handler(dev, &info, &wrq->u, extra); + + if (extra) { + if (copy_to_user(wrq->u.data.pointer, extra, wrq->u.data.length * token_size)) { + kfree(extra); + ret = -EFAULT; + goto wl_iw_ioctl_done; + } + + kfree(extra); + } + +wl_iw_ioctl_done: + + net_os_wake_unlock(dev); + + return ret; +} + + +bool +wl_iw_conn_status_str(uint32 event_type, uint32 status, uint32 reason, + char* stringBuf, uint buflen) +{ + typedef struct conn_fail_event_map_t { + uint32 inEvent; + uint32 inStatus; + uint32 inReason; + const char* outName; + const char* outCause; + } conn_fail_event_map_t; + + +# define WL_IW_DONT_CARE 9999 + const conn_fail_event_map_t event_map [] = { + + + {WLC_E_SET_SSID, WLC_E_STATUS_SUCCESS, WL_IW_DONT_CARE, + "Conn", "Success"}, + {WLC_E_SET_SSID, WLC_E_STATUS_NO_NETWORKS, WL_IW_DONT_CARE, + "Conn", "NoNetworks"}, + {WLC_E_SET_SSID, WLC_E_STATUS_FAIL, WL_IW_DONT_CARE, + "Conn", "ConfigMismatch"}, + {WLC_E_PRUNE, WL_IW_DONT_CARE, WLC_E_PRUNE_ENCR_MISMATCH, + "Conn", "EncrypMismatch"}, + {WLC_E_PRUNE, WL_IW_DONT_CARE, WLC_E_RSN_MISMATCH, + "Conn", "RsnMismatch"}, + {WLC_E_AUTH, WLC_E_STATUS_TIMEOUT, WL_IW_DONT_CARE, + "Conn", "AuthTimeout"}, + {WLC_E_AUTH, WLC_E_STATUS_FAIL, WL_IW_DONT_CARE, + "Conn", "AuthFail"}, + {WLC_E_AUTH, WLC_E_STATUS_NO_ACK, WL_IW_DONT_CARE, + "Conn", "AuthNoAck"}, + {WLC_E_REASSOC, WLC_E_STATUS_FAIL, WL_IW_DONT_CARE, + "Conn", "ReassocFail"}, + {WLC_E_REASSOC, WLC_E_STATUS_TIMEOUT, WL_IW_DONT_CARE, + "Conn", "ReassocTimeout"}, + {WLC_E_REASSOC, WLC_E_STATUS_ABORT, WL_IW_DONT_CARE, + "Conn", "ReassocAbort"}, + {WLC_E_PSK_SUP, WLC_SUP_KEYED, WL_IW_DONT_CARE, + "Sup", "ConnSuccess"}, + {WLC_E_PSK_SUP, WL_IW_DONT_CARE, WL_IW_DONT_CARE, + "Sup", "WpaHandshakeFail"}, + {WLC_E_DEAUTH_IND, WL_IW_DONT_CARE, WL_IW_DONT_CARE, + "Conn", "Deauth"}, + {WLC_E_DISASSOC_IND, WL_IW_DONT_CARE, WL_IW_DONT_CARE, + "Conn", "DisassocInd"}, + {WLC_E_DISASSOC, WL_IW_DONT_CARE, WL_IW_DONT_CARE, + "Conn", "Disassoc"} + }; + + const char* name = ""; + const char* cause = NULL; + int i; + + + for (i = 0; i < sizeof(event_map)/sizeof(event_map[0]); i++) { + const conn_fail_event_map_t* row = &event_map[i]; + if (row->inEvent == event_type && + (row->inStatus == status || row->inStatus == WL_IW_DONT_CARE) && + (row->inReason == reason || row->inReason == WL_IW_DONT_CARE)) { + name = row->outName; + cause = row->outCause; + break; + } + } + + + if (cause) { + memset(stringBuf, 0, buflen); + snprintf(stringBuf, buflen, "%s %s %02d %02d", + name, cause, status, reason); + WL_INFORM(("Connection status: %s\n", stringBuf)); + return TRUE; + } else { + return FALSE; + } +} + +#if WIRELESS_EXT > 14 + +static bool +wl_iw_check_conn_fail(wl_event_msg_t *e, char* stringBuf, uint buflen) +{ + uint32 event = ntoh32(e->event_type); + uint32 status = ntoh32(e->status); + uint32 reason = ntoh32(e->reason); + + if (wl_iw_conn_status_str(event, status, reason, stringBuf, buflen)) { + return TRUE; + } + else + return FALSE; +} +#endif + +#ifndef IW_CUSTOM_MAX +#define IW_CUSTOM_MAX 256 +#endif + +void +wl_iw_event(struct net_device *dev, wl_event_msg_t *e, void* data) +{ +#if WIRELESS_EXT > 13 + union iwreq_data wrqu; + char extra[IW_CUSTOM_MAX + 1]; + int cmd = 0; + uint32 event_type = ntoh32(e->event_type); + uint16 flags = ntoh16(e->flags); + uint32 datalen = ntoh32(e->datalen); + uint32 status = ntoh32(e->status); + uint32 toto; + static uint32 roam_no_success = 0; + static bool roam_no_success_send = FALSE; + + memset(&wrqu, 0, sizeof(wrqu)); + memset(extra, 0, sizeof(extra)); + + if (!dev) { + WL_ERROR(("%s: dev is null\n", __FUNCTION__)); + return; + } + + net_os_wake_lock(dev); + + WL_TRACE(("%s: dev=%s event=%d \n", __FUNCTION__, dev->name, event_type)); + + switch (event_type) { +#if defined(SOFTAP) + case WLC_E_PRUNE: + if (ap_cfg_running) { + char *macaddr = (char *)&e->addr; + WL_SOFTAP(("PRUNE received, %02X:%02X:%02X:%02X:%02X:%02X!\n", + macaddr[0], macaddr[1], macaddr[2], macaddr[3], \ + macaddr[4], macaddr[5])); + + if (ap_macmode) { + int i; + for (i = 0; i < ap_black_list.count; i++) { + if (!bcmp(macaddr, &ap_black_list.ea[i], \ + sizeof(struct ether_addr))) { + WL_SOFTAP(("mac in black list, ignore it\n")); + break; + } + } + + if (i == ap_black_list.count) { + char mac_buf[32] = {0}; + sprintf(mac_buf, "STA_BLOCK %02X:%02X:%02X:%02X:%02X:%02X", + macaddr[0], macaddr[1], macaddr[2], + macaddr[3], macaddr[4], macaddr[5]); + wl_iw_send_priv_event(priv_dev, mac_buf); + } + } + } + break; +#endif + case WLC_E_TXFAIL: + cmd = IWEVTXDROP; + memcpy(wrqu.addr.sa_data, &e->addr, ETHER_ADDR_LEN); + wrqu.addr.sa_family = ARPHRD_ETHER; + break; +#if WIRELESS_EXT > 14 + case WLC_E_JOIN: + case WLC_E_ASSOC_IND: + case WLC_E_REASSOC_IND: +#if defined(SOFTAP) + WL_SOFTAP(("STA connect received %d\n", event_type)); + if (ap_cfg_running) { + wl_iw_send_priv_event(priv_dev, "STA_JOIN"); + goto wl_iw_event_end; + } +#endif + memcpy(wrqu.addr.sa_data, &e->addr, ETHER_ADDR_LEN); + wrqu.addr.sa_family = ARPHRD_ETHER; + cmd = IWEVREGISTERED; + break; + case WLC_E_ROAM: + if (status != WLC_E_STATUS_SUCCESS) { + roam_no_success++; + if ((roam_no_success == 3) && (roam_no_success_send == FALSE)) { + + roam_no_success_send = TRUE; + bzero(wrqu.addr.sa_data, ETHER_ADDR_LEN); + bzero(&extra, ETHER_ADDR_LEN); + cmd = SIOCGIWAP; + WL_ERROR(("%s ROAMING did not succeeded , send Link Down\n", \ + __FUNCTION__)); + } else { + WL_TRACE(("##### ROAMING did not succeeded %d\n", roam_no_success)); + goto wl_iw_event_end; + } + } else { + memcpy(wrqu.addr.sa_data, &e->addr.octet, ETHER_ADDR_LEN); + wrqu.addr.sa_family = ARPHRD_ETHER; + cmd = SIOCGIWAP; + } + break; + case WLC_E_DEAUTH_IND: + case WLC_E_DISASSOC_IND: +#if defined(SOFTAP) + WL_SOFTAP(("STA disconnect received %d\n", event_type)); + if (ap_cfg_running) { + wl_iw_send_priv_event(priv_dev, "STA_LEAVE"); + goto wl_iw_event_end; + } +#endif + cmd = SIOCGIWAP; + bzero(wrqu.addr.sa_data, ETHER_ADDR_LEN); + wrqu.addr.sa_family = ARPHRD_ETHER; + bzero(&extra, ETHER_ADDR_LEN); + break; + case WLC_E_LINK: + case WLC_E_NDIS_LINK: + cmd = SIOCGIWAP; + if (!(flags & WLC_EVENT_MSG_LINK)) { +#ifdef SOFTAP +#ifdef AP_ONLY + if (ap_cfg_running) { +#else + if (ap_cfg_running && !strncmp(dev->name, "wl0.1", 5)) { +#endif + WL_SOFTAP(("AP DOWN %d\n", event_type)); + wl_iw_send_priv_event(priv_dev, "AP_DOWN"); + } else { + WL_TRACE(("STA_Link Down\n")); + g_ss_cache_ctrl.m_link_down = 1; + } +#else + g_ss_cache_ctrl.m_link_down = 1; +#endif + WL_TRACE(("Link Down\n")); + + bzero(wrqu.addr.sa_data, ETHER_ADDR_LEN); + bzero(&extra, ETHER_ADDR_LEN); + } + else { + memcpy(wrqu.addr.sa_data, &e->addr, ETHER_ADDR_LEN); + g_ss_cache_ctrl.m_link_down = 0; + + memcpy(g_ss_cache_ctrl.m_active_bssid, &e->addr, ETHER_ADDR_LEN); + +#ifdef SOFTAP +#ifdef AP_ONLY + if (ap_cfg_running) { +#else + if (ap_cfg_running && !strncmp(dev->name, "wl0.1", 5)) { +#endif + WL_SOFTAP(("AP UP %d\n", event_type)); + wl_iw_send_priv_event(priv_dev, "AP_UP"); + } else { + WL_TRACE(("STA_LINK_UP\n")); + roam_no_success_send = FALSE; + roam_no_success = 0; + } +#endif + WL_TRACE(("Link UP\n")); + + } + net_os_wake_lock_timeout_enable(dev); + wrqu.addr.sa_family = ARPHRD_ETHER; + break; + case WLC_E_ACTION_FRAME: + cmd = IWEVCUSTOM; + if (datalen + 1 <= sizeof(extra)) { + wrqu.data.length = datalen + 1; + extra[0] = WLC_E_ACTION_FRAME; + memcpy(&extra[1], data, datalen); + WL_TRACE(("WLC_E_ACTION_FRAME len %d \n", wrqu.data.length)); + } + break; + + case WLC_E_ACTION_FRAME_COMPLETE: + cmd = IWEVCUSTOM; + memcpy(&toto, data, 4); + if (sizeof(status) + 1 <= sizeof(extra)) { + wrqu.data.length = sizeof(status) + 1; + extra[0] = WLC_E_ACTION_FRAME_COMPLETE; + memcpy(&extra[1], &status, sizeof(status)); + printf("wl_iw_event status %d PacketId %d \n", status, toto); + printf("WLC_E_ACTION_FRAME_COMPLETE len %d \n", wrqu.data.length); + } + break; +#endif +#if WIRELESS_EXT > 17 + case WLC_E_MIC_ERROR: { + struct iw_michaelmicfailure *micerrevt = (struct iw_michaelmicfailure *)&extra; + cmd = IWEVMICHAELMICFAILURE; + wrqu.data.length = sizeof(struct iw_michaelmicfailure); + if (flags & WLC_EVENT_MSG_GROUP) + micerrevt->flags |= IW_MICFAILURE_GROUP; + else + micerrevt->flags |= IW_MICFAILURE_PAIRWISE; + memcpy(micerrevt->src_addr.sa_data, &e->addr, ETHER_ADDR_LEN); + micerrevt->src_addr.sa_family = ARPHRD_ETHER; + + break; + } +#ifdef BCMWPA2 + case WLC_E_PMKID_CACHE: { + if (data) + { + struct iw_pmkid_cand *iwpmkidcand = (struct iw_pmkid_cand *)&extra; + pmkid_cand_list_t *pmkcandlist; + pmkid_cand_t *pmkidcand; + int count; + + cmd = IWEVPMKIDCAND; + pmkcandlist = data; + count = ntoh32_ua((uint8 *)&pmkcandlist->npmkid_cand); + ASSERT(count >= 0); + wrqu.data.length = sizeof(struct iw_pmkid_cand); + pmkidcand = pmkcandlist->pmkid_cand; + while (count) { + bzero(iwpmkidcand, sizeof(struct iw_pmkid_cand)); + if (pmkidcand->preauth) + iwpmkidcand->flags |= IW_PMKID_CAND_PREAUTH; + bcopy(&pmkidcand->BSSID, &iwpmkidcand->bssid.sa_data, + ETHER_ADDR_LEN); +#ifndef SANDGATE2G + wireless_send_event(dev, cmd, &wrqu, extra); +#endif + pmkidcand++; + count--; + } + } + goto wl_iw_event_end; + } +#endif +#endif + + case WLC_E_SCAN_COMPLETE: +#if defined(WL_IW_USE_ISCAN) + if ((g_iscan) && (g_iscan->sysioc_pid >= 0) && + (g_iscan->iscan_state != ISCAN_STATE_IDLE)) + { + up(&g_iscan->sysioc_sem); + } else { + cmd = SIOCGIWSCAN; + wrqu.data.length = strlen(extra); + WL_TRACE(("Event WLC_E_SCAN_COMPLETE from specific scan %d\n", \ + g_iscan->iscan_state)); + } +#else + cmd = SIOCGIWSCAN; + wrqu.data.length = strlen(extra); + WL_TRACE(("Event WLC_E_SCAN_COMPLETE\n")); +#endif + break; + + case WLC_E_PFN_NET_FOUND: + { + wlc_ssid_t * ssid; + ssid = (wlc_ssid_t *)data; + WL_TRACE(("%s Event WLC_E_PFN_NET_FOUND, send %s up : find %s len=%d\n", \ + __FUNCTION__, PNO_EVENT_UP, ssid->SSID, ssid->SSID_len)); + net_os_wake_lock_timeout_enable(dev); + cmd = IWEVCUSTOM; + memset(&wrqu, 0, sizeof(wrqu)); + strcpy(extra, PNO_EVENT_UP); + wrqu.data.length = strlen(extra); + } + break; + + default: + + WL_TRACE(("Unknown Event %d: ignoring\n", event_type)); + break; + } +#ifndef SANDGATE2G + if (cmd) { +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 31)) + if (cmd == SIOCGIWSCAN) + wireless_send_event(dev, cmd, &wrqu, NULL); + else +#endif + wireless_send_event(dev, cmd, &wrqu, extra); + } +#endif + +#if WIRELESS_EXT > 14 + + memset(extra, 0, sizeof(extra)); + if (wl_iw_check_conn_fail(e, extra, sizeof(extra))) { + cmd = IWEVCUSTOM; + wrqu.data.length = strlen(extra); +#ifndef SANDGATE2G + wireless_send_event(dev, cmd, &wrqu, extra); +#endif + } +#endif +wl_iw_event_end: + net_os_wake_unlock(dev); +#endif +} + +int wl_iw_get_wireless_stats(struct net_device *dev, struct iw_statistics *wstats) +{ + int res = 0; + wl_cnt_t cnt; + int phy_noise; + int rssi; + scb_val_t scb_val; + + phy_noise = 0; + if ((res = dev_wlc_ioctl(dev, WLC_GET_PHY_NOISE, &phy_noise, sizeof(phy_noise)))) + goto done; + + phy_noise = dtoh32(phy_noise); + WL_TRACE(("wl_iw_get_wireless_stats phy noise=%d\n", phy_noise)); + + bzero(&scb_val, sizeof(scb_val_t)); + if ((res = dev_wlc_ioctl(dev, WLC_GET_RSSI, &scb_val, sizeof(scb_val_t)))) + goto done; + + rssi = dtoh32(scb_val.val); + WL_TRACE(("wl_iw_get_wireless_stats rssi=%d\n", rssi)); + if (rssi <= WL_IW_RSSI_NO_SIGNAL) + wstats->qual.qual = 0; + else if (rssi <= WL_IW_RSSI_VERY_LOW) + wstats->qual.qual = 1; + else if (rssi <= WL_IW_RSSI_LOW) + wstats->qual.qual = 2; + else if (rssi <= WL_IW_RSSI_GOOD) + wstats->qual.qual = 3; + else if (rssi <= WL_IW_RSSI_VERY_GOOD) + wstats->qual.qual = 4; + else + wstats->qual.qual = 5; + + + wstats->qual.level = 0x100 + rssi; + wstats->qual.noise = 0x100 + phy_noise; +#if WIRELESS_EXT > 18 + wstats->qual.updated |= (IW_QUAL_ALL_UPDATED | IW_QUAL_DBM); +#else + wstats->qual.updated |= 7; +#endif + +#if WIRELESS_EXT > 11 + WL_TRACE(("wl_iw_get_wireless_stats counters=%d\n", (int)sizeof(wl_cnt_t))); + + memset(&cnt, 0, sizeof(wl_cnt_t)); + res = dev_wlc_bufvar_get(dev, "counters", (char *)&cnt, sizeof(wl_cnt_t)); + if (res) + { + WL_ERROR(("wl_iw_get_wireless_stats counters failed error=%d\n", res)); + goto done; + } + + cnt.version = dtoh16(cnt.version); + if (cnt.version != WL_CNT_T_VERSION) { + WL_TRACE(("\tIncorrect version of counters struct: expected %d; got %d\n", + WL_CNT_T_VERSION, cnt.version)); + goto done; + } + + wstats->discard.nwid = 0; + wstats->discard.code = dtoh32(cnt.rxundec); + wstats->discard.fragment = dtoh32(cnt.rxfragerr); + wstats->discard.retries = dtoh32(cnt.txfail); + wstats->discard.misc = dtoh32(cnt.rxrunt) + dtoh32(cnt.rxgiant); + wstats->miss.beacon = 0; + + WL_TRACE(("wl_iw_get_wireless_stats counters txframe=%d txbyte=%d\n", + dtoh32(cnt.txframe), dtoh32(cnt.txbyte))); + WL_TRACE(("wl_iw_get_wireless_stats counters rxfrmtoolong=%d\n", dtoh32(cnt.rxfrmtoolong))); + WL_TRACE(("wl_iw_get_wireless_stats counters rxbadplcp=%d\n", dtoh32(cnt.rxbadplcp))); + WL_TRACE(("wl_iw_get_wireless_stats counters rxundec=%d\n", dtoh32(cnt.rxundec))); + WL_TRACE(("wl_iw_get_wireless_stats counters rxfragerr=%d\n", dtoh32(cnt.rxfragerr))); + WL_TRACE(("wl_iw_get_wireless_stats counters txfail=%d\n", dtoh32(cnt.txfail))); + WL_TRACE(("wl_iw_get_wireless_stats counters rxrunt=%d\n", dtoh32(cnt.rxrunt))); + WL_TRACE(("wl_iw_get_wireless_stats counters rxgiant=%d\n", dtoh32(cnt.rxgiant))); + +#endif + +done: + return res; +} +static void +wl_iw_bt_flag_set( + struct net_device *dev, + bool set) +{ + char buf_flag7_dhcp_on[8] = { 7, 00, 00, 00, 0x1, 0x0, 0x00, 0x00 }; + char buf_flag7_default[8] = { 7, 00, 00, 00, 0x0, 0x00, 0x00, 0x00}; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + rtnl_lock(); +#endif + + if (set == TRUE) { + dev_wlc_bufvar_set(dev, "btc_flags", + (char *)&buf_flag7_dhcp_on[0], sizeof(buf_flag7_dhcp_on)); + } + else { + dev_wlc_bufvar_set(dev, "btc_flags", + (char *)&buf_flag7_default[0], sizeof(buf_flag7_default)); + } + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + rtnl_unlock(); +#endif +} + +static void +wl_iw_bt_timerfunc(ulong data) +{ + bt_info_t *bt_local = (bt_info_t *)data; + bt_local->timer_on = 0; + WL_TRACE(("%s\n", __FUNCTION__)); + + up(&bt_local->bt_sem); +} + +static int +_bt_dhcp_sysioc_thread(void *data) +{ + DAEMONIZE("dhcp_sysioc"); + + while (down_interruptible(&g_bt->bt_sem) == 0) { + + net_os_wake_lock(g_bt->dev); + + if (g_bt->timer_on) { + g_bt->timer_on = 0; + del_timer_sync(&g_bt->timer); + } + + switch (g_bt->bt_state) { + case BT_DHCP_START: + g_bt->bt_state = BT_DHCP_OPPORTUNITY_WINDOW; + mod_timer(&g_bt->timer, jiffies + BT_DHCP_OPPORTUNITY_WINDOW_TIEM*HZ/1000); + g_bt->timer_on = 1; + break; + + case BT_DHCP_OPPORTUNITY_WINDOW: + WL_TRACE(("%s waiting for %d msec expired, force bt flag\n", \ + __FUNCTION__, BT_DHCP_OPPORTUNITY_WINDOW_TIEM)); + if (g_bt->dev) wl_iw_bt_flag_set(g_bt->dev, TRUE); + g_bt->bt_state = BT_DHCP_FLAG_FORCE_TIMEOUT; + mod_timer(&g_bt->timer, jiffies + BT_DHCP_FLAG_FORCE_TIME*HZ/1000); + g_bt->timer_on = 1; + break; + + case BT_DHCP_FLAG_FORCE_TIMEOUT: + WL_TRACE(("%s waiting for %d msec expired remove bt flag\n", \ + __FUNCTION__, BT_DHCP_FLAG_FORCE_TIME)); + + if (g_bt->dev) wl_iw_bt_flag_set(g_bt->dev, FALSE); + g_bt->bt_state = BT_DHCP_IDLE; + g_bt->timer_on = 0; + break; + + default: + WL_ERROR(("%s error g_status=%d !!!\n", __FUNCTION__, \ + g_bt->bt_state)); + if (g_bt->dev) wl_iw_bt_flag_set(g_bt->dev, FALSE); + g_bt->bt_state = BT_DHCP_IDLE; + g_bt->timer_on = 0; + break; + } + + net_os_wake_unlock(g_bt->dev); + } + + if (g_bt->timer_on) { + g_bt->timer_on = 0; + del_timer_sync(&g_bt->timer); + } + + complete_and_exit(&g_bt->bt_exited, 0); +} + +static void +wl_iw_bt_release(void) +{ + bt_info_t *bt_local = g_bt; + + if (!bt_local) { + return; + } + + if (bt_local->bt_pid >= 0) { + KILL_PROC(bt_local->bt_pid, SIGTERM); + wait_for_completion(&bt_local->bt_exited); + } + kfree(bt_local); + g_bt = NULL; +} + +static int +wl_iw_bt_init(struct net_device *dev) +{ + bt_info_t *bt_dhcp = NULL; + + bt_dhcp = kmalloc(sizeof(bt_info_t), GFP_KERNEL); + if (!bt_dhcp) + return -ENOMEM; + + memset(bt_dhcp, 0, sizeof(bt_info_t)); + bt_dhcp->bt_pid = -1; + g_bt = bt_dhcp; + bt_dhcp->dev = dev; + bt_dhcp->bt_state = BT_DHCP_IDLE; + + + bt_dhcp->timer_ms = 10; + init_timer(&bt_dhcp->timer); + bt_dhcp->timer.data = (ulong)bt_dhcp; + bt_dhcp->timer.function = wl_iw_bt_timerfunc; + + sema_init(&bt_dhcp->bt_sem, 0); + init_completion(&bt_dhcp->bt_exited); + bt_dhcp->bt_pid = kernel_thread(_bt_dhcp_sysioc_thread, bt_dhcp, 0); + if (bt_dhcp->bt_pid < 0) { + WL_ERROR(("Failed in %s\n", __FUNCTION__)); + return -ENOMEM; + } + + return 0; +} + +int wl_iw_attach(struct net_device *dev, void * dhdp) +{ + int params_size; + wl_iw_t *iw; +#if defined(WL_IW_USE_ISCAN) + iscan_info_t *iscan = NULL; +#endif + + mutex_init(&wl_cache_lock); + mutex_init(&wl_start_lock); + +#if defined(WL_IW_USE_ISCAN) + if (!dev) + return 0; + + memset(&g_wl_iw_params, 0, sizeof(wl_iw_extra_params_t)); + +#ifdef CSCAN + params_size = (WL_SCAN_PARAMS_FIXED_SIZE + OFFSETOF(wl_iscan_params_t, params)) + + (WL_NUMCHANNELS * sizeof(uint16)) + WL_SCAN_PARAMS_SSID_MAX * sizeof(wlc_ssid_t); +#else + params_size = (WL_SCAN_PARAMS_FIXED_SIZE + OFFSETOF(wl_iscan_params_t, params)); +#endif + iscan = kmalloc(sizeof(iscan_info_t), GFP_KERNEL); + if (!iscan) + return -ENOMEM; + memset(iscan, 0, sizeof(iscan_info_t)); + + iscan->iscan_ex_params_p = (wl_iscan_params_t*)kmalloc(params_size, GFP_KERNEL); + if (!iscan->iscan_ex_params_p) + return -ENOMEM; + iscan->iscan_ex_param_size = params_size; + iscan->sysioc_pid = -1; + + g_iscan = iscan; + iscan->dev = dev; + iscan->iscan_state = ISCAN_STATE_IDLE; + g_first_broadcast_scan = BROADCAST_SCAN_FIRST_IDLE; + g_first_counter_scans = 0; + g_iscan->scan_flag = 0; + + iscan->timer_ms = 8000; + init_timer(&iscan->timer); + iscan->timer.data = (ulong)iscan; + iscan->timer.function = wl_iw_timerfunc; + + sema_init(&iscan->sysioc_sem, 0); + init_completion(&iscan->sysioc_exited); + iscan->sysioc_pid = kernel_thread(_iscan_sysioc_thread, iscan, 0); + if (iscan->sysioc_pid < 0) + return -ENOMEM; +#endif + + iw = *(wl_iw_t **)netdev_priv(dev); + iw->pub = (dhd_pub_t *)dhdp; +#ifdef SOFTAP + priv_dev = dev; +#endif + g_scan = NULL; + + g_scan = (void *)kmalloc(G_SCAN_RESULTS, GFP_KERNEL); + if (!g_scan) + return -ENOMEM; + + memset(g_scan, 0, G_SCAN_RESULTS); + g_scan_specified_ssid = 0; + +#if !defined(CSCAN) + wl_iw_init_ss_cache_ctrl(); +#endif + + wl_iw_bt_init(dev); + + return 0; +} + +void wl_iw_detach(void) +{ +#if defined(WL_IW_USE_ISCAN) + iscan_buf_t *buf; + iscan_info_t *iscan = g_iscan; + + if (!iscan) + return; + if (iscan->sysioc_pid >= 0) { + KILL_PROC(iscan->sysioc_pid, SIGTERM); + wait_for_completion(&iscan->sysioc_exited); + } + mutex_lock(&wl_cache_lock); + while (iscan->list_hdr) { + buf = iscan->list_hdr->next; + kfree(iscan->list_hdr); + iscan->list_hdr = buf; + } + kfree(iscan->iscan_ex_params_p); + kfree(iscan); + g_iscan = NULL; + mutex_unlock(&wl_cache_lock); +#endif + + if (g_scan) + kfree(g_scan); + + g_scan = NULL; +#if !defined(CSCAN) + wl_iw_release_ss_cache_ctrl(); +#endif + wl_iw_bt_release(); +#ifdef SOFTAP + if (ap_cfg_running) { + WL_TRACE(("\n%s AP is going down\n", __FUNCTION__)); + wl_iw_send_priv_event(priv_dev, "AP_DOWN"); + } +#endif + +} diff --git a/drivers/net/wireless/bcm4329/wl_iw.h b/drivers/net/wireless/bcm4329/wl_iw.h new file mode 100644 index 000000000000..43088cf886bd --- /dev/null +++ b/drivers/net/wireless/bcm4329/wl_iw.h @@ -0,0 +1,278 @@ +/* + * Linux Wireless Extensions support + * + * Copyright (C) 1999-2010, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: wl_iw.h,v 1.5.34.1.6.36.4.1 2010/09/10 19:24:30 Exp $ + */ + + +#ifndef _wl_iw_h_ +#define _wl_iw_h_ + +#include <linux/wireless.h> + +#include <typedefs.h> +#include <proto/ethernet.h> +#include <wlioctl.h> + +#define WL_SCAN_PARAMS_SSID_MAX 10 +#define GET_SSID "SSID=" +#define GET_CHANNEL "CH=" +#define GET_NPROBE "NPROBE=" +#define GET_ACTIVE_ASSOC_DWELL "ACTIVE=" +#define GET_PASSIVE_ASSOC_DWELL "PASSIVE=" +#define GET_HOME_DWELL "HOME=" +#define GET_SCAN_TYPE "TYPE=" + +#define BAND_GET_CMD "GETBAND" +#define BAND_SET_CMD "SETBAND" +#define DTIM_SKIP_GET_CMD "DTIMSKIPGET" +#define DTIM_SKIP_SET_CMD "DTIMSKIPSET" +#define SETSUSPEND_CMD "SETSUSPENDOPT" +#define PNOSSIDCLR_SET_CMD "PNOSSIDCLR" +#define PNOSETUP_SET_CMD "PNOSETUP " +#define PNOENABLE_SET_CMD "PNOFORCE" +#define PNODEBUG_SET_CMD "PNODEBUG" + +#define MAC2STR(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5] +#define MACSTR "%02x:%02x:%02x:%02x:%02x:%02x" + + +typedef struct wl_iw_extra_params { + int target_channel; +} wl_iw_extra_params_t; + +#define WL_IW_RSSI_MINVAL -200 +#define WL_IW_RSSI_NO_SIGNAL -91 +#define WL_IW_RSSI_VERY_LOW -80 +#define WL_IW_RSSI_LOW -70 +#define WL_IW_RSSI_GOOD -68 +#define WL_IW_RSSI_VERY_GOOD -58 +#define WL_IW_RSSI_EXCELLENT -57 +#define WL_IW_RSSI_INVALID 0 +#define MAX_WX_STRING 80 +#define isprint(c) bcm_isprint(c) +#define WL_IW_SET_ACTIVE_SCAN (SIOCIWFIRSTPRIV+1) +#define WL_IW_GET_RSSI (SIOCIWFIRSTPRIV+3) +#define WL_IW_SET_PASSIVE_SCAN (SIOCIWFIRSTPRIV+5) +#define WL_IW_GET_LINK_SPEED (SIOCIWFIRSTPRIV+7) +#define WL_IW_GET_CURR_MACADDR (SIOCIWFIRSTPRIV+9) +#define WL_IW_SET_STOP (SIOCIWFIRSTPRIV+11) +#define WL_IW_SET_START (SIOCIWFIRSTPRIV+13) + + +#define WL_SET_AP_CFG (SIOCIWFIRSTPRIV+15) +#define WL_AP_STA_LIST (SIOCIWFIRSTPRIV+17) +#define WL_AP_MAC_FLTR (SIOCIWFIRSTPRIV+19) +#define WL_AP_BSS_START (SIOCIWFIRSTPRIV+21) +#define AP_LPB_CMD (SIOCIWFIRSTPRIV+23) +#define WL_AP_STOP (SIOCIWFIRSTPRIV+25) +#define WL_FW_RELOAD (SIOCIWFIRSTPRIV+27) +#define WL_COMBO_SCAN (SIOCIWFIRSTPRIV+29) +#define WL_AP_SPARE3 (SIOCIWFIRSTPRIV+31) +#define G_SCAN_RESULTS (8*1024) +#define WE_ADD_EVENT_FIX 0x80 +#define G_WLAN_SET_ON 0 +#define G_WLAN_SET_OFF 1 + +#define CHECK_EXTRA_FOR_NULL(extra) \ +if (!extra) { \ + WL_ERROR(("%s: error : extra is null pointer\n", __FUNCTION__)); \ + return -EINVAL; \ +} + +typedef struct wl_iw { + char nickname[IW_ESSID_MAX_SIZE]; + + struct iw_statistics wstats; + + int spy_num; + uint32 pwsec; + uint32 gwsec; + bool privacy_invoked; + + struct ether_addr spy_addr[IW_MAX_SPY]; + struct iw_quality spy_qual[IW_MAX_SPY]; + void *wlinfo; + dhd_pub_t * pub; +} wl_iw_t; + +#define WLC_IW_SS_CACHE_MAXLEN 512 +#define WLC_IW_SS_CACHE_CTRL_FIELD_MAXLEN 32 +#define WLC_IW_BSS_INFO_MAXLEN \ + (WLC_IW_SS_CACHE_MAXLEN - WLC_IW_SS_CACHE_CTRL_FIELD_MAXLEN) + +typedef struct wl_iw_ss_cache { + uint32 buflen; + uint32 version; + uint32 count; + wl_bss_info_t bss_info[1]; + char dummy[WLC_IW_BSS_INFO_MAXLEN - sizeof(wl_bss_info_t)]; + int dirty; + struct wl_iw_ss_cache *next; +} wl_iw_ss_cache_t; + +typedef struct wl_iw_ss_cache_ctrl { + wl_iw_ss_cache_t *m_cache_head; + int m_link_down; + int m_timer_expired; + char m_active_bssid[ETHER_ADDR_LEN]; + uint m_prev_scan_mode; + uint m_cons_br_scan_cnt; + struct timer_list *m_timer; +} wl_iw_ss_cache_ctrl_t; +typedef enum broadcast_first_scan { + BROADCAST_SCAN_FIRST_IDLE = 0, + BROADCAST_SCAN_FIRST_STARTED, + BROADCAST_SCAN_FIRST_RESULT_READY, + BROADCAST_SCAN_FIRST_RESULT_CONSUMED +} broadcast_first_scan_t; +#ifdef SOFTAP +#define SSID_LEN 33 +#define SEC_LEN 16 +#define KEY_LEN 65 +#define PROFILE_OFFSET 32 +struct ap_profile { + uint8 ssid[SSID_LEN]; + uint8 sec[SEC_LEN]; + uint8 key[KEY_LEN]; + uint32 channel; + uint32 preamble; + uint32 max_scb; +}; + + +#define MACLIST_MODE_DISABLED 0 +#define MACLIST_MODE_ENABLED 1 +#define MACLIST_MODE_ALLOW 2 +struct mflist { + uint count; + struct ether_addr ea[16]; +}; + +struct mac_list_set { + uint32 mode; + struct mflist white_list; + struct mflist black_list; +}; +#endif + +#if WIRELESS_EXT > 12 +#include <net/iw_handler.h> +extern const struct iw_handler_def wl_iw_handler_def; +#endif + +extern int wl_iw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); +extern void wl_iw_event(struct net_device *dev, wl_event_msg_t *e, void* data); +extern int wl_iw_get_wireless_stats(struct net_device *dev, struct iw_statistics *wstats); +int wl_iw_attach(struct net_device *dev, void * dhdp); +void wl_iw_detach(void); +int wl_control_wl_start(struct net_device *dev); + +extern int net_os_wake_lock(struct net_device *dev); +extern int net_os_wake_unlock(struct net_device *dev); +extern int net_os_wake_lock_timeout(struct net_device *dev); +extern int net_os_wake_lock_timeout_enable(struct net_device *dev); +extern int net_os_set_suspend_disable(struct net_device *dev, int val); +extern int net_os_set_suspend(struct net_device *dev, int val); +extern int net_os_set_dtim_skip(struct net_device *dev, int val); +extern int net_os_set_packet_filter(struct net_device *dev, int val); +extern int net_os_send_hang_message(struct net_device *dev); + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) +#define IWE_STREAM_ADD_EVENT(info, stream, ends, iwe, extra) \ + iwe_stream_add_event(info, stream, ends, iwe, extra) +#define IWE_STREAM_ADD_VALUE(info, event, value, ends, iwe, event_len) \ + iwe_stream_add_value(info, event, value, ends, iwe, event_len) +#define IWE_STREAM_ADD_POINT(info, stream, ends, iwe, extra) \ + iwe_stream_add_point(info, stream, ends, iwe, extra) +#else +#define IWE_STREAM_ADD_EVENT(info, stream, ends, iwe, extra) \ + iwe_stream_add_event(stream, ends, iwe, extra) +#define IWE_STREAM_ADD_VALUE(info, event, value, ends, iwe, event_len) \ + iwe_stream_add_value(event, value, ends, iwe, event_len) +#define IWE_STREAM_ADD_POINT(info, stream, ends, iwe, extra) \ + iwe_stream_add_point(stream, ends, iwe, extra) +#endif + +extern int dhd_pno_enable(dhd_pub_t *dhd, int pfn_enabled); +extern int dhd_pno_clean(dhd_pub_t *dhd); +extern int dhd_pno_set(dhd_pub_t *dhd, wlc_ssid_t* ssids_local, int nssid, ushort scan_fr); +extern int dhd_pno_get_status(dhd_pub_t *dhd); +extern int dhd_dev_pno_reset(struct net_device *dev); +extern int dhd_dev_pno_set(struct net_device *dev, wlc_ssid_t* ssids_local, \ + int nssid, ushort scan_fr); +extern int dhd_dev_pno_enable(struct net_device *dev, int pfn_enabled); +extern int dhd_dev_get_pno_status(struct net_device *dev); + +#define PNO_TLV_PREFIX 'S' +#define PNO_TLV_VERSION 1 +#define PNO_TLV_SUBVERSION 1 +#define PNO_TLV_RESERVED 0 +#define PNO_TLV_TYPE_SSID_IE 'S' +#define PNO_TLV_TYPE_TIME 'T' +#define PNO_EVENT_UP "PNO_EVENT" + +typedef struct cmd_tlv { + char prefix; + char version; + char subver; + char reserved; +} cmd_tlv_t; + +#if defined(CSCAN) + +typedef struct cscan_tlv { + char prefix; + char version; + char subver; + char reserved; +} cscan_tlv_t; + +#define CSCAN_COMMAND "CSCAN " +#define CSCAN_TLV_PREFIX 'S' +#define CSCAN_TLV_VERSION 1 +#define CSCAN_TLV_SUBVERSION 0 +#define CSCAN_TLV_TYPE_SSID_IE 'S' +#define CSCAN_TLV_TYPE_CHANNEL_IE 'C' +#define CSCAN_TLV_TYPE_NPROBE_IE 'N' +#define CSCAN_TLV_TYPE_ACTIVE_IE 'A' +#define CSCAN_TLV_TYPE_PASSIVE_IE 'P' +#define CSCAN_TLV_TYPE_HOME_IE 'H' +#define CSCAN_TLV_TYPE_STYPE_IE 'T' + +extern int wl_iw_parse_channel_list_tlv(char** list_str, uint16* channel_list, \ + int channel_num, int *bytes_left); + +extern int wl_iw_parse_data_tlv(char** list_str, void *dst, int dst_size, \ + const char token, int input_size, int *bytes_left); + +extern int wl_iw_parse_ssid_list_tlv(char** list_str, wlc_ssid_t* ssid, \ + int max, int *bytes_left); + +extern int wl_iw_parse_ssid_list(char** list_str, wlc_ssid_t* ssid, int idx, int max); + +extern int wl_iw_parse_channel_list(char** list_str, uint16* channel_list, int channel_num); + +#endif + +#endif diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c index 91606bb55318..127617195629 100644 --- a/drivers/power/power_supply_core.c +++ b/drivers/power/power_supply_core.c @@ -41,23 +41,40 @@ static int __power_supply_changed_work(struct device *dev, void *data) static void power_supply_changed_work(struct work_struct *work) { + unsigned long flags; struct power_supply *psy = container_of(work, struct power_supply, changed_work); dev_dbg(psy->dev, "%s\n", __func__); - class_for_each_device(power_supply_class, NULL, psy, - __power_supply_changed_work); + spin_lock_irqsave(&psy->changed_lock, flags); + if (psy->changed) { + psy->changed = false; + spin_unlock_irqrestore(&psy->changed_lock, flags); - power_supply_update_leds(psy); + class_for_each_device(power_supply_class, NULL, psy, + __power_supply_changed_work); - kobject_uevent(&psy->dev->kobj, KOBJ_CHANGE); + power_supply_update_leds(psy); + + kobject_uevent(&psy->dev->kobj, KOBJ_CHANGE); + spin_lock_irqsave(&psy->changed_lock, flags); + } + if (!psy->changed) + wake_unlock(&psy->work_wake_lock); + spin_unlock_irqrestore(&psy->changed_lock, flags); } void power_supply_changed(struct power_supply *psy) { + unsigned long flags; + dev_dbg(psy->dev, "%s\n", __func__); + spin_lock_irqsave(&psy->changed_lock, flags); + psy->changed = true; + wake_lock(&psy->work_wake_lock); + spin_unlock_irqrestore(&psy->changed_lock, flags); schedule_work(&psy->changed_work); } EXPORT_SYMBOL_GPL(power_supply_changed); @@ -180,6 +197,8 @@ int power_supply_register(struct device *parent, struct power_supply *psy) goto device_add_failed; INIT_WORK(&psy->changed_work, power_supply_changed_work); + spin_lock_init(&psy->changed_lock); + wake_lock_init(&psy->work_wake_lock, WAKE_LOCK_SUSPEND, "power-supply"); rc = power_supply_create_triggers(psy); if (rc) @@ -190,6 +209,7 @@ int power_supply_register(struct device *parent, struct power_supply *psy) goto success; create_triggers_failed: + wake_lock_destroy(&psy->work_wake_lock); device_unregister(psy->dev); kobject_set_name_failed: device_add_failed: @@ -203,6 +223,7 @@ void power_supply_unregister(struct power_supply *psy) { flush_scheduled_work(); power_supply_remove_triggers(psy); + wake_lock_destroy(&psy->work_wake_lock); device_unregister(psy->dev); } EXPORT_SYMBOL_GPL(power_supply_unregister); diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index 241c7f73a410..a86401ea8e71 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -109,6 +109,24 @@ config RTC_INTF_DEV_UIE_EMUL clock several times per second, please enable this option only if you know that you really need it. +config RTC_INTF_ALARM + bool "Android alarm driver" + depends on RTC_CLASS + default y + help + Provides non-wakeup and rtc backed wakeup alarms based on rtc or + elapsed realtime, and a non-wakeup alarm on the monotonic clock. + Also provides an interface to set the wall time which must be used + for elapsed realtime to work. + +config RTC_INTF_ALARM_DEV + bool "Android alarm device" + depends on RTC_INTF_ALARM + default y + help + Exports the alarm interface to user-space. + + config RTC_DRV_TEST tristate "Test driver/device" help diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile index acd8426c3d8e..5520733748d2 100644 --- a/drivers/rtc/Makefile +++ b/drivers/rtc/Makefile @@ -11,6 +11,8 @@ obj-$(CONFIG_RTC_HCTOSYS) += hctosys.o obj-$(CONFIG_RTC_CLASS) += rtc-core.o rtc-core-y := class.o interface.o +obj-$(CONFIG_RTC_INTF_ALARM) += alarm.o +obj-$(CONFIG_RTC_INTF_ALARM_DEV) += alarm-dev.o rtc-core-$(CONFIG_RTC_INTF_DEV) += rtc-dev.o rtc-core-$(CONFIG_RTC_INTF_PROC) += rtc-proc.o rtc-core-$(CONFIG_RTC_INTF_SYSFS) += rtc-sysfs.o diff --git a/drivers/rtc/alarm-dev.c b/drivers/rtc/alarm-dev.c new file mode 100644 index 000000000000..686e6f7ed480 --- /dev/null +++ b/drivers/rtc/alarm-dev.c @@ -0,0 +1,286 @@ +/* drivers/rtc/alarm-dev.c + * + * Copyright (C) 2007-2009 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <asm/mach/time.h> +#include <linux/android_alarm.h> +#include <linux/device.h> +#include <linux/miscdevice.h> +#include <linux/fs.h> +#include <linux/platform_device.h> +#include <linux/sched.h> +#include <linux/spinlock.h> +#include <linux/sysdev.h> +#include <linux/uaccess.h> +#include <linux/wakelock.h> + +#define ANDROID_ALARM_PRINT_INFO (1U << 0) +#define ANDROID_ALARM_PRINT_IO (1U << 1) +#define ANDROID_ALARM_PRINT_INT (1U << 2) + +static int debug_mask = ANDROID_ALARM_PRINT_INFO; +module_param_named(debug_mask, debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP); + +#define pr_alarm(debug_level_mask, args...) \ + do { \ + if (debug_mask & ANDROID_ALARM_PRINT_##debug_level_mask) { \ + pr_info(args); \ + } \ + } while (0) + +#define ANDROID_ALARM_WAKEUP_MASK ( \ + ANDROID_ALARM_RTC_WAKEUP_MASK | \ + ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK) + +/* support old usespace code */ +#define ANDROID_ALARM_SET_OLD _IOW('a', 2, time_t) /* set alarm */ +#define ANDROID_ALARM_SET_AND_WAIT_OLD _IOW('a', 3, time_t) + +static int alarm_opened; +static DEFINE_SPINLOCK(alarm_slock); +static struct wake_lock alarm_wake_lock; +static DECLARE_WAIT_QUEUE_HEAD(alarm_wait_queue); +static uint32_t alarm_pending; +static uint32_t alarm_enabled; +static uint32_t wait_pending; + +static struct alarm alarms[ANDROID_ALARM_TYPE_COUNT]; + +static long alarm_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + int rv = 0; + unsigned long flags; + struct timespec new_alarm_time; + struct timespec new_rtc_time; + struct timespec tmp_time; + enum android_alarm_type alarm_type = ANDROID_ALARM_IOCTL_TO_TYPE(cmd); + uint32_t alarm_type_mask = 1U << alarm_type; + + if (alarm_type >= ANDROID_ALARM_TYPE_COUNT) + return -EINVAL; + + if (ANDROID_ALARM_BASE_CMD(cmd) != ANDROID_ALARM_GET_TIME(0)) { + if ((file->f_flags & O_ACCMODE) == O_RDONLY) + return -EPERM; + if (file->private_data == NULL && + cmd != ANDROID_ALARM_SET_RTC) { + spin_lock_irqsave(&alarm_slock, flags); + if (alarm_opened) { + spin_unlock_irqrestore(&alarm_slock, flags); + return -EBUSY; + } + alarm_opened = 1; + file->private_data = (void *)1; + spin_unlock_irqrestore(&alarm_slock, flags); + } + } + + switch (ANDROID_ALARM_BASE_CMD(cmd)) { + case ANDROID_ALARM_CLEAR(0): + spin_lock_irqsave(&alarm_slock, flags); + pr_alarm(IO, "alarm %d clear\n", alarm_type); + alarm_try_to_cancel(&alarms[alarm_type]); + if (alarm_pending) { + alarm_pending &= ~alarm_type_mask; + if (!alarm_pending && !wait_pending) + wake_unlock(&alarm_wake_lock); + } + alarm_enabled &= ~alarm_type_mask; + spin_unlock_irqrestore(&alarm_slock, flags); + break; + + case ANDROID_ALARM_SET_OLD: + case ANDROID_ALARM_SET_AND_WAIT_OLD: + if (get_user(new_alarm_time.tv_sec, (int __user *)arg)) { + rv = -EFAULT; + goto err1; + } + new_alarm_time.tv_nsec = 0; + goto from_old_alarm_set; + + case ANDROID_ALARM_SET_AND_WAIT(0): + case ANDROID_ALARM_SET(0): + if (copy_from_user(&new_alarm_time, (void __user *)arg, + sizeof(new_alarm_time))) { + rv = -EFAULT; + goto err1; + } +from_old_alarm_set: + spin_lock_irqsave(&alarm_slock, flags); + pr_alarm(IO, "alarm %d set %ld.%09ld\n", alarm_type, + new_alarm_time.tv_sec, new_alarm_time.tv_nsec); + alarm_enabled |= alarm_type_mask; + alarm_start_range(&alarms[alarm_type], + timespec_to_ktime(new_alarm_time), + timespec_to_ktime(new_alarm_time)); + spin_unlock_irqrestore(&alarm_slock, flags); + if (ANDROID_ALARM_BASE_CMD(cmd) != ANDROID_ALARM_SET_AND_WAIT(0) + && cmd != ANDROID_ALARM_SET_AND_WAIT_OLD) + break; + /* fall though */ + case ANDROID_ALARM_WAIT: + spin_lock_irqsave(&alarm_slock, flags); + pr_alarm(IO, "alarm wait\n"); + if (!alarm_pending && wait_pending) { + wake_unlock(&alarm_wake_lock); + wait_pending = 0; + } + spin_unlock_irqrestore(&alarm_slock, flags); + rv = wait_event_interruptible(alarm_wait_queue, alarm_pending); + if (rv) + goto err1; + spin_lock_irqsave(&alarm_slock, flags); + rv = alarm_pending; + wait_pending = 1; + alarm_pending = 0; + spin_unlock_irqrestore(&alarm_slock, flags); + break; + case ANDROID_ALARM_SET_RTC: + if (copy_from_user(&new_rtc_time, (void __user *)arg, + sizeof(new_rtc_time))) { + rv = -EFAULT; + goto err1; + } + rv = alarm_set_rtc(new_rtc_time); + spin_lock_irqsave(&alarm_slock, flags); + alarm_pending |= ANDROID_ALARM_TIME_CHANGE_MASK; + wake_up(&alarm_wait_queue); + spin_unlock_irqrestore(&alarm_slock, flags); + if (rv < 0) + goto err1; + break; + case ANDROID_ALARM_GET_TIME(0): + switch (alarm_type) { + case ANDROID_ALARM_RTC_WAKEUP: + case ANDROID_ALARM_RTC: + getnstimeofday(&tmp_time); + break; + case ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP: + case ANDROID_ALARM_ELAPSED_REALTIME: + tmp_time = + ktime_to_timespec(alarm_get_elapsed_realtime()); + break; + case ANDROID_ALARM_TYPE_COUNT: + case ANDROID_ALARM_SYSTEMTIME: + ktime_get_ts(&tmp_time); + break; + } + if (copy_to_user((void __user *)arg, &tmp_time, + sizeof(tmp_time))) { + rv = -EFAULT; + goto err1; + } + break; + + default: + rv = -EINVAL; + goto err1; + } +err1: + return rv; +} + +static int alarm_open(struct inode *inode, struct file *file) +{ + file->private_data = NULL; + return 0; +} + +static int alarm_release(struct inode *inode, struct file *file) +{ + int i; + unsigned long flags; + + spin_lock_irqsave(&alarm_slock, flags); + if (file->private_data != 0) { + for (i = 0; i < ANDROID_ALARM_TYPE_COUNT; i++) { + uint32_t alarm_type_mask = 1U << i; + if (alarm_enabled & alarm_type_mask) { + pr_alarm(INFO, "alarm_release: clear alarm, " + "pending %d\n", + !!(alarm_pending & alarm_type_mask)); + alarm_enabled &= ~alarm_type_mask; + } + spin_unlock_irqrestore(&alarm_slock, flags); + alarm_cancel(&alarms[i]); + spin_lock_irqsave(&alarm_slock, flags); + } + if (alarm_pending | wait_pending) { + if (alarm_pending) + pr_alarm(INFO, "alarm_release: clear " + "pending alarms %x\n", alarm_pending); + wake_unlock(&alarm_wake_lock); + wait_pending = 0; + alarm_pending = 0; + } + alarm_opened = 0; + } + spin_unlock_irqrestore(&alarm_slock, flags); + return 0; +} + +static void alarm_triggered(struct alarm *alarm) +{ + unsigned long flags; + uint32_t alarm_type_mask = 1U << alarm->type; + + pr_alarm(INT, "alarm_triggered type %d\n", alarm->type); + spin_lock_irqsave(&alarm_slock, flags); + if (alarm_enabled & alarm_type_mask) { + wake_lock_timeout(&alarm_wake_lock, 5 * HZ); + alarm_enabled &= ~alarm_type_mask; + alarm_pending |= alarm_type_mask; + wake_up(&alarm_wait_queue); + } + spin_unlock_irqrestore(&alarm_slock, flags); +} + +static const struct file_operations alarm_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = alarm_ioctl, + .open = alarm_open, + .release = alarm_release, +}; + +static struct miscdevice alarm_device = { + .minor = MISC_DYNAMIC_MINOR, + .name = "alarm", + .fops = &alarm_fops, +}; + +static int __init alarm_dev_init(void) +{ + int err; + int i; + + err = misc_register(&alarm_device); + if (err) + return err; + + for (i = 0; i < ANDROID_ALARM_TYPE_COUNT; i++) + alarm_init(&alarms[i], i, alarm_triggered); + wake_lock_init(&alarm_wake_lock, WAKE_LOCK_SUSPEND, "alarm"); + + return 0; +} + +static void __exit alarm_dev_exit(void) +{ + misc_deregister(&alarm_device); + wake_lock_destroy(&alarm_wake_lock); +} + +module_init(alarm_dev_init); +module_exit(alarm_dev_exit); + diff --git a/drivers/rtc/alarm.c b/drivers/rtc/alarm.c new file mode 100644 index 000000000000..1250edfbdf33 --- /dev/null +++ b/drivers/rtc/alarm.c @@ -0,0 +1,587 @@ +/* drivers/rtc/alarm.c + * + * Copyright (C) 2007-2009 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <asm/mach/time.h> +#include <linux/android_alarm.h> +#include <linux/device.h> +#include <linux/miscdevice.h> +#include <linux/platform_device.h> +#include <linux/rtc.h> +#include <linux/sched.h> +#include <linux/spinlock.h> +#include <linux/sysdev.h> +#include <linux/wakelock.h> + +#define ANDROID_ALARM_PRINT_ERROR (1U << 0) +#define ANDROID_ALARM_PRINT_INIT_STATUS (1U << 1) +#define ANDROID_ALARM_PRINT_TSET (1U << 2) +#define ANDROID_ALARM_PRINT_CALL (1U << 3) +#define ANDROID_ALARM_PRINT_SUSPEND (1U << 4) +#define ANDROID_ALARM_PRINT_INT (1U << 5) +#define ANDROID_ALARM_PRINT_FLOW (1U << 6) + +static int debug_mask = ANDROID_ALARM_PRINT_ERROR | \ + ANDROID_ALARM_PRINT_INIT_STATUS; +module_param_named(debug_mask, debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP); + +#define pr_alarm(debug_level_mask, args...) \ + do { \ + if (debug_mask & ANDROID_ALARM_PRINT_##debug_level_mask) { \ + pr_info(args); \ + } \ + } while (0) + +#define ANDROID_ALARM_WAKEUP_MASK ( \ + ANDROID_ALARM_RTC_WAKEUP_MASK | \ + ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK) + +/* support old usespace code */ +#define ANDROID_ALARM_SET_OLD _IOW('a', 2, time_t) /* set alarm */ +#define ANDROID_ALARM_SET_AND_WAIT_OLD _IOW('a', 3, time_t) + +struct alarm_queue { + struct rb_root alarms; + struct rb_node *first; + struct hrtimer timer; + ktime_t delta; + bool stopped; + ktime_t stopped_time; +}; + +static struct rtc_device *alarm_rtc_dev; +static DEFINE_SPINLOCK(alarm_slock); +static DEFINE_MUTEX(alarm_setrtc_mutex); +static struct wake_lock alarm_rtc_wake_lock; +static struct platform_device *alarm_platform_dev; +struct alarm_queue alarms[ANDROID_ALARM_TYPE_COUNT]; +static bool suspended; + +static void update_timer_locked(struct alarm_queue *base, bool head_removed) +{ + struct alarm *alarm; + bool is_wakeup = base == &alarms[ANDROID_ALARM_RTC_WAKEUP] || + base == &alarms[ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP]; + + if (base->stopped) { + pr_alarm(FLOW, "changed alarm while setting the wall time\n"); + return; + } + + if (is_wakeup && !suspended && head_removed) + wake_unlock(&alarm_rtc_wake_lock); + + if (!base->first) + return; + + alarm = container_of(base->first, struct alarm, node); + + pr_alarm(FLOW, "selected alarm, type %d, func %pF at %lld\n", + alarm->type, alarm->function, ktime_to_ns(alarm->expires)); + + if (is_wakeup && suspended) { + pr_alarm(FLOW, "changed alarm while suspened\n"); + wake_lock_timeout(&alarm_rtc_wake_lock, 1 * HZ); + return; + } + + hrtimer_try_to_cancel(&base->timer); + base->timer._expires = ktime_add(base->delta, alarm->expires); + base->timer._softexpires = ktime_add(base->delta, alarm->softexpires); + hrtimer_start_expires(&base->timer, HRTIMER_MODE_ABS); +} + +static void alarm_enqueue_locked(struct alarm *alarm) +{ + struct alarm_queue *base = &alarms[alarm->type]; + struct rb_node **link = &base->alarms.rb_node; + struct rb_node *parent = NULL; + struct alarm *entry; + int leftmost = 1; + + pr_alarm(FLOW, "added alarm, type %d, func %pF at %lld\n", + alarm->type, alarm->function, ktime_to_ns(alarm->expires)); + + if (base->first == &alarm->node) + base->first = rb_next(&alarm->node); + if (!RB_EMPTY_NODE(&alarm->node)) { + rb_erase(&alarm->node, &base->alarms); + RB_CLEAR_NODE(&alarm->node); + } + + while (*link) { + parent = *link; + entry = rb_entry(parent, struct alarm, node); + /* + * We dont care about collisions. Nodes with + * the same expiry time stay together. + */ + if (alarm->expires.tv64 < entry->expires.tv64) { + link = &(*link)->rb_left; + } else { + link = &(*link)->rb_right; + leftmost = 0; + } + } + if (leftmost) { + base->first = &alarm->node; + update_timer_locked(base, false); + } + + rb_link_node(&alarm->node, parent, link); + rb_insert_color(&alarm->node, &base->alarms); +} + +/** + * alarm_init - initialize an alarm + * @alarm: the alarm to be initialized + * @type: the alarm type to be used + * @function: alarm callback function + */ +void alarm_init(struct alarm *alarm, + enum android_alarm_type type, void (*function)(struct alarm *)) +{ + RB_CLEAR_NODE(&alarm->node); + alarm->type = type; + alarm->function = function; + + pr_alarm(FLOW, "created alarm, type %d, func %pF\n", type, function); +} + + +/** + * alarm_start_range - (re)start an alarm + * @alarm: the alarm to be added + * @start: earliest expiry time + * @end: expiry time + */ +void alarm_start_range(struct alarm *alarm, ktime_t start, ktime_t end) +{ + unsigned long flags; + + spin_lock_irqsave(&alarm_slock, flags); + alarm->softexpires = start; + alarm->expires = end; + alarm_enqueue_locked(alarm); + spin_unlock_irqrestore(&alarm_slock, flags); +} + +/** + * alarm_try_to_cancel - try to deactivate an alarm + * @alarm: alarm to stop + * + * Returns: + * 0 when the alarm was not active + * 1 when the alarm was active + * -1 when the alarm may currently be excuting the callback function and + * cannot be stopped (it may also be inactive) + */ +int alarm_try_to_cancel(struct alarm *alarm) +{ + struct alarm_queue *base = &alarms[alarm->type]; + unsigned long flags; + bool first = false; + int ret = 0; + + spin_lock_irqsave(&alarm_slock, flags); + if (!RB_EMPTY_NODE(&alarm->node)) { + pr_alarm(FLOW, "canceled alarm, type %d, func %pF at %lld\n", + alarm->type, alarm->function, + ktime_to_ns(alarm->expires)); + ret = 1; + if (base->first == &alarm->node) { + base->first = rb_next(&alarm->node); + first = true; + } + rb_erase(&alarm->node, &base->alarms); + RB_CLEAR_NODE(&alarm->node); + if (first) + update_timer_locked(base, true); + } else + pr_alarm(FLOW, "tried to cancel alarm, type %d, func %pF\n", + alarm->type, alarm->function); + spin_unlock_irqrestore(&alarm_slock, flags); + if (!ret && hrtimer_callback_running(&base->timer)) + ret = -1; + return ret; +} + +/** + * alarm_cancel - cancel an alarm and wait for the handler to finish. + * @alarm: the alarm to be cancelled + * + * Returns: + * 0 when the alarm was not active + * 1 when the alarm was active + */ +int alarm_cancel(struct alarm *alarm) +{ + for (;;) { + int ret = alarm_try_to_cancel(alarm); + if (ret >= 0) + return ret; + cpu_relax(); + } +} + +/** + * alarm_set_rtc - set the kernel and rtc walltime + * @new_time: timespec value containing the new time + */ +int alarm_set_rtc(struct timespec new_time) +{ + int i; + int ret; + unsigned long flags; + struct rtc_time rtc_new_rtc_time; + struct timespec tmp_time; + + rtc_time_to_tm(new_time.tv_sec, &rtc_new_rtc_time); + + pr_alarm(TSET, "set rtc %ld %ld - rtc %02d:%02d:%02d %02d/%02d/%04d\n", + new_time.tv_sec, new_time.tv_nsec, + rtc_new_rtc_time.tm_hour, rtc_new_rtc_time.tm_min, + rtc_new_rtc_time.tm_sec, rtc_new_rtc_time.tm_mon + 1, + rtc_new_rtc_time.tm_mday, + rtc_new_rtc_time.tm_year + 1900); + + mutex_lock(&alarm_setrtc_mutex); + spin_lock_irqsave(&alarm_slock, flags); + wake_lock(&alarm_rtc_wake_lock); + getnstimeofday(&tmp_time); + for (i = 0; i < ANDROID_ALARM_SYSTEMTIME; i++) { + hrtimer_try_to_cancel(&alarms[i].timer); + alarms[i].stopped = true; + alarms[i].stopped_time = timespec_to_ktime(tmp_time); + } + alarms[ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP].delta = + alarms[ANDROID_ALARM_ELAPSED_REALTIME].delta = + ktime_sub(alarms[ANDROID_ALARM_ELAPSED_REALTIME].delta, + timespec_to_ktime(timespec_sub(tmp_time, new_time))); + spin_unlock_irqrestore(&alarm_slock, flags); + ret = do_settimeofday(&new_time); + spin_lock_irqsave(&alarm_slock, flags); + for (i = 0; i < ANDROID_ALARM_SYSTEMTIME; i++) { + alarms[i].stopped = false; + update_timer_locked(&alarms[i], false); + } + spin_unlock_irqrestore(&alarm_slock, flags); + if (ret < 0) { + pr_alarm(ERROR, "alarm_set_rtc: Failed to set time\n"); + goto err; + } + if (!alarm_rtc_dev) { + pr_alarm(ERROR, + "alarm_set_rtc: no RTC, time will be lost on reboot\n"); + goto err; + } + ret = rtc_set_time(alarm_rtc_dev, &rtc_new_rtc_time); + if (ret < 0) + pr_alarm(ERROR, "alarm_set_rtc: " + "Failed to set RTC, time will be lost on reboot\n"); +err: + wake_unlock(&alarm_rtc_wake_lock); + mutex_unlock(&alarm_setrtc_mutex); + return ret; +} + +/** + * alarm_get_elapsed_realtime - get the elapsed real time in ktime_t format + * + * returns the time in ktime_t format + */ +ktime_t alarm_get_elapsed_realtime(void) +{ + ktime_t now; + unsigned long flags; + struct alarm_queue *base = &alarms[ANDROID_ALARM_ELAPSED_REALTIME]; + + spin_lock_irqsave(&alarm_slock, flags); + now = base->stopped ? base->stopped_time : ktime_get_real(); + now = ktime_sub(now, base->delta); + spin_unlock_irqrestore(&alarm_slock, flags); + return now; +} + +static enum hrtimer_restart alarm_timer_triggered(struct hrtimer *timer) +{ + struct alarm_queue *base; + struct alarm *alarm; + unsigned long flags; + ktime_t now; + + spin_lock_irqsave(&alarm_slock, flags); + + base = container_of(timer, struct alarm_queue, timer); + now = base->stopped ? base->stopped_time : hrtimer_cb_get_time(timer); + now = ktime_sub(now, base->delta); + + pr_alarm(INT, "alarm_timer_triggered type %d at %lld\n", + base - alarms, ktime_to_ns(now)); + + while (base->first) { + alarm = container_of(base->first, struct alarm, node); + if (alarm->softexpires.tv64 > now.tv64) { + pr_alarm(FLOW, "don't call alarm, %pF, %lld (s %lld)\n", + alarm->function, ktime_to_ns(alarm->expires), + ktime_to_ns(alarm->softexpires)); + break; + } + base->first = rb_next(&alarm->node); + rb_erase(&alarm->node, &base->alarms); + RB_CLEAR_NODE(&alarm->node); + pr_alarm(CALL, "call alarm, type %d, func %pF, %lld (s %lld)\n", + alarm->type, alarm->function, + ktime_to_ns(alarm->expires), + ktime_to_ns(alarm->softexpires)); + spin_unlock_irqrestore(&alarm_slock, flags); + alarm->function(alarm); + spin_lock_irqsave(&alarm_slock, flags); + } + if (!base->first) + pr_alarm(FLOW, "no more alarms of type %d\n", base - alarms); + update_timer_locked(base, true); + spin_unlock_irqrestore(&alarm_slock, flags); + return HRTIMER_NORESTART; +} + +static void alarm_triggered_func(void *p) +{ + struct rtc_device *rtc = alarm_rtc_dev; + if (!(rtc->irq_data & RTC_AF)) + return; + pr_alarm(INT, "rtc alarm triggered\n"); + wake_lock_timeout(&alarm_rtc_wake_lock, 1 * HZ); +} + +static int alarm_suspend(struct platform_device *pdev, pm_message_t state) +{ + int err = 0; + unsigned long flags; + struct rtc_wkalrm rtc_alarm; + struct rtc_time rtc_current_rtc_time; + unsigned long rtc_current_time; + unsigned long rtc_alarm_time; + struct timespec rtc_delta; + struct timespec wall_time; + struct alarm_queue *wakeup_queue = NULL; + struct alarm_queue *tmp_queue = NULL; + + pr_alarm(SUSPEND, "alarm_suspend(%p, %d)\n", pdev, state.event); + + spin_lock_irqsave(&alarm_slock, flags); + suspended = true; + spin_unlock_irqrestore(&alarm_slock, flags); + + hrtimer_cancel(&alarms[ANDROID_ALARM_RTC_WAKEUP].timer); + hrtimer_cancel(&alarms[ + ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK].timer); + + tmp_queue = &alarms[ANDROID_ALARM_RTC_WAKEUP]; + if (tmp_queue->first) + wakeup_queue = tmp_queue; + tmp_queue = &alarms[ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP]; + if (tmp_queue->first && (!wakeup_queue || + hrtimer_get_expires(&tmp_queue->timer).tv64 < + hrtimer_get_expires(&wakeup_queue->timer).tv64)) + wakeup_queue = tmp_queue; + if (wakeup_queue) { + rtc_read_time(alarm_rtc_dev, &rtc_current_rtc_time); + getnstimeofday(&wall_time); + rtc_tm_to_time(&rtc_current_rtc_time, &rtc_current_time); + set_normalized_timespec(&rtc_delta, + wall_time.tv_sec - rtc_current_time, + wall_time.tv_nsec); + + rtc_alarm_time = timespec_sub(ktime_to_timespec( + hrtimer_get_expires(&wakeup_queue->timer)), + rtc_delta).tv_sec; + + rtc_time_to_tm(rtc_alarm_time, &rtc_alarm.time); + rtc_alarm.enabled = 1; + rtc_set_alarm(alarm_rtc_dev, &rtc_alarm); + rtc_read_time(alarm_rtc_dev, &rtc_current_rtc_time); + rtc_tm_to_time(&rtc_current_rtc_time, &rtc_current_time); + pr_alarm(SUSPEND, + "rtc alarm set at %ld, now %ld, rtc delta %ld.%09ld\n", + rtc_alarm_time, rtc_current_time, + rtc_delta.tv_sec, rtc_delta.tv_nsec); + if (rtc_current_time + 1 >= rtc_alarm_time) { + pr_alarm(SUSPEND, "alarm about to go off\n"); + memset(&rtc_alarm, 0, sizeof(rtc_alarm)); + rtc_alarm.enabled = 0; + rtc_set_alarm(alarm_rtc_dev, &rtc_alarm); + + spin_lock_irqsave(&alarm_slock, flags); + suspended = false; + wake_lock_timeout(&alarm_rtc_wake_lock, 2 * HZ); + update_timer_locked(&alarms[ANDROID_ALARM_RTC_WAKEUP], + false); + update_timer_locked(&alarms[ + ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP], false); + err = -EBUSY; + spin_unlock_irqrestore(&alarm_slock, flags); + } + } + return err; +} + +static int alarm_resume(struct platform_device *pdev) +{ + struct rtc_wkalrm alarm; + unsigned long flags; + + pr_alarm(SUSPEND, "alarm_resume(%p)\n", pdev); + + memset(&alarm, 0, sizeof(alarm)); + alarm.enabled = 0; + rtc_set_alarm(alarm_rtc_dev, &alarm); + + spin_lock_irqsave(&alarm_slock, flags); + suspended = false; + update_timer_locked(&alarms[ANDROID_ALARM_RTC_WAKEUP], false); + update_timer_locked(&alarms[ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP], + false); + spin_unlock_irqrestore(&alarm_slock, flags); + + return 0; +} + +static struct rtc_task alarm_rtc_task = { + .func = alarm_triggered_func +}; + +static int rtc_alarm_add_device(struct device *dev, + struct class_interface *class_intf) +{ + int err; + struct rtc_device *rtc = to_rtc_device(dev); + + mutex_lock(&alarm_setrtc_mutex); + + if (alarm_rtc_dev) { + err = -EBUSY; + goto err1; + } + + alarm_platform_dev = + platform_device_register_simple("alarm", -1, NULL, 0); + if (IS_ERR(alarm_platform_dev)) { + err = PTR_ERR(alarm_platform_dev); + goto err2; + } + err = rtc_irq_register(rtc, &alarm_rtc_task); + if (err) + goto err3; + alarm_rtc_dev = rtc; + pr_alarm(INIT_STATUS, "using rtc device, %s, for alarms", rtc->name); + mutex_unlock(&alarm_setrtc_mutex); + + return 0; + +err3: + platform_device_unregister(alarm_platform_dev); +err2: +err1: + mutex_unlock(&alarm_setrtc_mutex); + return err; +} + +static void rtc_alarm_remove_device(struct device *dev, + struct class_interface *class_intf) +{ + if (dev == &alarm_rtc_dev->dev) { + pr_alarm(INIT_STATUS, "lost rtc device for alarms"); + rtc_irq_unregister(alarm_rtc_dev, &alarm_rtc_task); + platform_device_unregister(alarm_platform_dev); + alarm_rtc_dev = NULL; + } +} + +static struct class_interface rtc_alarm_interface = { + .add_dev = &rtc_alarm_add_device, + .remove_dev = &rtc_alarm_remove_device, +}; + +static struct platform_driver alarm_driver = { + .suspend = alarm_suspend, + .resume = alarm_resume, + .driver = { + .name = "alarm" + } +}; + +static int __init alarm_late_init(void) +{ + unsigned long flags; + struct timespec tmp_time, system_time; + + /* this needs to run after the rtc is read at boot */ + spin_lock_irqsave(&alarm_slock, flags); + /* We read the current rtc and system time so we can later calulate + * elasped realtime to be (boot_systemtime + rtc - boot_rtc) == + * (rtc - (boot_rtc - boot_systemtime)) + */ + getnstimeofday(&tmp_time); + ktime_get_ts(&system_time); + alarms[ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP].delta = + alarms[ANDROID_ALARM_ELAPSED_REALTIME].delta = + timespec_to_ktime(timespec_sub(tmp_time, system_time)); + + spin_unlock_irqrestore(&alarm_slock, flags); + return 0; +} + +static int __init alarm_driver_init(void) +{ + int err; + int i; + + for (i = 0; i < ANDROID_ALARM_SYSTEMTIME; i++) { + hrtimer_init(&alarms[i].timer, + CLOCK_REALTIME, HRTIMER_MODE_ABS); + alarms[i].timer.function = alarm_timer_triggered; + } + hrtimer_init(&alarms[ANDROID_ALARM_SYSTEMTIME].timer, + CLOCK_MONOTONIC, HRTIMER_MODE_ABS); + alarms[ANDROID_ALARM_SYSTEMTIME].timer.function = alarm_timer_triggered; + err = platform_driver_register(&alarm_driver); + if (err < 0) + goto err1; + wake_lock_init(&alarm_rtc_wake_lock, WAKE_LOCK_SUSPEND, "alarm_rtc"); + rtc_alarm_interface.class = rtc_class; + err = class_interface_register(&rtc_alarm_interface); + if (err < 0) + goto err2; + + return 0; + +err2: + wake_lock_destroy(&alarm_rtc_wake_lock); + platform_driver_unregister(&alarm_driver); +err1: + return err; +} + +static void __exit alarm_exit(void) +{ + class_interface_unregister(&rtc_alarm_interface); + wake_lock_destroy(&alarm_rtc_wake_lock); + platform_driver_unregister(&alarm_driver); +} + +late_initcall(alarm_late_init); +module_init(alarm_driver_init); +module_exit(alarm_exit); + diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c index 565562ba6ac9..11d7ab90a67c 100644 --- a/drivers/rtc/class.c +++ b/drivers/rtc/class.c @@ -41,25 +41,32 @@ static void rtc_device_release(struct device *dev) */ static struct timespec delta; +static struct timespec delta_delta; static time_t oldtime; static int rtc_suspend(struct device *dev, pm_message_t mesg) { struct rtc_device *rtc = to_rtc_device(dev); struct rtc_time tm; - struct timespec ts = current_kernel_time(); + struct timespec ts; + struct timespec new_delta; if (strcmp(dev_name(&rtc->dev), CONFIG_RTC_HCTOSYS_DEVICE) != 0) return 0; + getnstimeofday(&ts); rtc_read_time(rtc, &tm); rtc_tm_to_time(&tm, &oldtime); /* RTC precision is 1 second; adjust delta for avg 1/2 sec err */ - set_normalized_timespec(&delta, + set_normalized_timespec(&new_delta, ts.tv_sec - oldtime, ts.tv_nsec - (NSEC_PER_SEC >> 1)); + /* prevent 1/2 sec errors from accumulating */ + delta_delta = timespec_sub(new_delta, delta); + if (delta_delta.tv_sec < -2 || delta_delta.tv_sec >= 2) + delta = new_delta; return 0; } @@ -79,6 +86,8 @@ static int rtc_resume(struct device *dev) return 0; } rtc_tm_to_time(&tm, &newtime); + if (delta_delta.tv_sec < -1) + newtime++; if (newtime <= oldtime) { if (newtime < oldtime) pr_debug("%s: time travel!\n", dev_name(&rtc->dev)); diff --git a/drivers/serial/serial_core.c b/drivers/serial/serial_core.c index cd8511298bcb..076cfd1546d4 100644 --- a/drivers/serial/serial_core.c +++ b/drivers/serial/serial_core.c @@ -94,6 +94,9 @@ static void __uart_start(struct tty_struct *tty) struct uart_state *state = tty->driver_data; struct uart_port *port = state->uart_port; + if (port->ops->wake_peer) + port->ops->wake_peer(port); + if (!uart_circ_empty(&state->xmit) && state->xmit.buf && !tty->stopped && !tty->hw_stopped) port->ops->start_tx(port); diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig index 335311a98fdc..96f6dda3ee69 100644 --- a/drivers/staging/Kconfig +++ b/drivers/staging/Kconfig @@ -81,6 +81,8 @@ source "drivers/staging/rtl8192e/Kconfig" source "drivers/staging/frontier/Kconfig" +source "drivers/staging/android/Kconfig" + source "drivers/staging/dream/Kconfig" source "drivers/staging/pohmelfs/Kconfig" diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile index e3f1e1b6095e..47c0b9489caf 100644 --- a/drivers/staging/Makefile +++ b/drivers/staging/Makefile @@ -25,6 +25,7 @@ obj-$(CONFIG_RTL8192U) += rtl8192u/ obj-$(CONFIG_RTL8192E) += rtl8192e/ obj-$(CONFIG_SPECTRA) += spectra/ obj-$(CONFIG_TRANZPORT) += frontier/ +obj-$(CONFIG_ANDROID) += android/ obj-$(CONFIG_DREAM) += dream/ obj-$(CONFIG_POHMELFS) += pohmelfs/ obj-$(CONFIG_IDE_PHISON) += phison/ diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig new file mode 100644 index 000000000000..247194992374 --- /dev/null +++ b/drivers/staging/android/Kconfig @@ -0,0 +1,95 @@ +menu "Android" + +config ANDROID + bool "Android Drivers" + default N + ---help--- + Enable support for various drivers needed on the Android platform + +if ANDROID + +config ANDROID_BINDER_IPC + bool "Android Binder IPC Driver" + default n + +config ANDROID_LOGGER + tristate "Android log driver" + default n + +config ANDROID_RAM_CONSOLE + bool "Android RAM buffer console" + default n + +config ANDROID_RAM_CONSOLE_ENABLE_VERBOSE + bool "Enable verbose console messages on Android RAM console" + default y + depends on ANDROID_RAM_CONSOLE + +menuconfig ANDROID_RAM_CONSOLE_ERROR_CORRECTION + bool "Android RAM Console Enable error correction" + default n + depends on ANDROID_RAM_CONSOLE + depends on !ANDROID_RAM_CONSOLE_EARLY_INIT + select REED_SOLOMON + select REED_SOLOMON_ENC8 + select REED_SOLOMON_DEC8 + +if ANDROID_RAM_CONSOLE_ERROR_CORRECTION + +config ANDROID_RAM_CONSOLE_ERROR_CORRECTION_DATA_SIZE + int "Android RAM Console Data data size" + default 128 + help + Must be a power of 2. + +config ANDROID_RAM_CONSOLE_ERROR_CORRECTION_ECC_SIZE + int "Android RAM Console ECC size" + default 16 + +config ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE + int "Android RAM Console Symbol size" + default 8 + +config ANDROID_RAM_CONSOLE_ERROR_CORRECTION_POLYNOMIAL + hex "Android RAM Console Polynomial" + default 0x19 if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 4) + default 0x29 if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 5) + default 0x61 if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 6) + default 0x89 if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 7) + default 0x11d if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 8) + +endif # ANDROID_RAM_CONSOLE_ERROR_CORRECTION + +config ANDROID_RAM_CONSOLE_EARLY_INIT + bool "Start Android RAM console early" + default n + depends on ANDROID_RAM_CONSOLE + +config ANDROID_RAM_CONSOLE_EARLY_ADDR + hex "Android RAM console virtual address" + default 0 + depends on ANDROID_RAM_CONSOLE_EARLY_INIT + +config ANDROID_RAM_CONSOLE_EARLY_SIZE + hex "Android RAM console buffer size" + default 0 + depends on ANDROID_RAM_CONSOLE_EARLY_INIT + +config ANDROID_TIMED_OUTPUT + bool "Timed output class driver" + default y + +config ANDROID_TIMED_GPIO + tristate "Android timed gpio driver" + depends on GENERIC_GPIO && ANDROID_TIMED_OUTPUT + default n + +config ANDROID_LOW_MEMORY_KILLER + bool "Android Low Memory Killer" + default N + ---help--- + Register processes to be killed when memory is low + +endif # if ANDROID + +endmenu diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile new file mode 100644 index 000000000000..8e057e626d11 --- /dev/null +++ b/drivers/staging/android/Makefile @@ -0,0 +1,6 @@ +obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o +obj-$(CONFIG_ANDROID_LOGGER) += logger.o +obj-$(CONFIG_ANDROID_RAM_CONSOLE) += ram_console.o +obj-$(CONFIG_ANDROID_TIMED_OUTPUT) += timed_output.o +obj-$(CONFIG_ANDROID_TIMED_GPIO) += timed_gpio.o +obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER) += lowmemorykiller.o diff --git a/drivers/staging/android/TODO b/drivers/staging/android/TODO new file mode 100644 index 000000000000..e59c5be4be2b --- /dev/null +++ b/drivers/staging/android/TODO @@ -0,0 +1,10 @@ +TODO: + - checkpatch.pl cleanups + - sparse fixes + - rename files to be not so "generic" + - make sure things build as modules properly + - add proper arch dependancies as needed + - audit userspace interfaces to make sure they are sane + +Please send patches to Greg Kroah-Hartman <greg@kroah.com> and Cc: +Brian Swetland <swetland@google.com> diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c new file mode 100644 index 000000000000..e13b4c483407 --- /dev/null +++ b/drivers/staging/android/binder.c @@ -0,0 +1,3600 @@ +/* binder.c + * + * Android IPC Subsystem + * + * Copyright (C) 2007-2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <asm/cacheflush.h> +#include <linux/fdtable.h> +#include <linux/file.h> +#include <linux/fs.h> +#include <linux/list.h> +#include <linux/miscdevice.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/nsproxy.h> +#include <linux/poll.h> +#include <linux/debugfs.h> +#include <linux/rbtree.h> +#include <linux/sched.h> +#include <linux/seq_file.h> +#include <linux/slab.h> +#include <linux/uaccess.h> +#include <linux/vmalloc.h> + +#include "binder.h" + +static DEFINE_MUTEX(binder_lock); +static DEFINE_MUTEX(binder_deferred_lock); + +static HLIST_HEAD(binder_procs); +static HLIST_HEAD(binder_deferred_list); +static HLIST_HEAD(binder_dead_nodes); + +static struct dentry *binder_debugfs_dir_entry_root; +static struct dentry *binder_debugfs_dir_entry_proc; +static struct binder_node *binder_context_mgr_node; +static uid_t binder_context_mgr_uid = -1; +static int binder_last_id; +static struct workqueue_struct *binder_deferred_workqueue; + +#define BINDER_DEBUG_ENTRY(name) \ +static int binder_##name##_open(struct inode *inode, struct file *file) \ +{ \ + return single_open(file, binder_##name##_show, inode->i_private); \ +} \ +\ +static const struct file_operations binder_##name##_fops = { \ + .owner = THIS_MODULE, \ + .open = binder_##name##_open, \ + .read = seq_read, \ + .llseek = seq_lseek, \ + .release = single_release, \ +} + +static int binder_proc_show(struct seq_file *m, void *unused); +BINDER_DEBUG_ENTRY(proc); + +/* This is only defined in include/asm-arm/sizes.h */ +#ifndef SZ_1K +#define SZ_1K 0x400 +#endif + +#ifndef SZ_4M +#define SZ_4M 0x400000 +#endif + +#define FORBIDDEN_MMAP_FLAGS (VM_WRITE) + +#define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64) + +enum { + BINDER_DEBUG_USER_ERROR = 1U << 0, + BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1, + BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2, + BINDER_DEBUG_OPEN_CLOSE = 1U << 3, + BINDER_DEBUG_DEAD_BINDER = 1U << 4, + BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5, + BINDER_DEBUG_READ_WRITE = 1U << 6, + BINDER_DEBUG_USER_REFS = 1U << 7, + BINDER_DEBUG_THREADS = 1U << 8, + BINDER_DEBUG_TRANSACTION = 1U << 9, + BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10, + BINDER_DEBUG_FREE_BUFFER = 1U << 11, + BINDER_DEBUG_INTERNAL_REFS = 1U << 12, + BINDER_DEBUG_BUFFER_ALLOC = 1U << 13, + BINDER_DEBUG_PRIORITY_CAP = 1U << 14, + BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 15, +}; +static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR | + BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION; +module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO); + +static int binder_debug_no_lock; +module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO); + +static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait); +static int binder_stop_on_user_error; + +static int binder_set_stop_on_user_error(const char *val, + struct kernel_param *kp) +{ + int ret; + ret = param_set_int(val, kp); + if (binder_stop_on_user_error < 2) + wake_up(&binder_user_error_wait); + return ret; +} +module_param_call(stop_on_user_error, binder_set_stop_on_user_error, + param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO); + +#define binder_debug(mask, x...) \ + do { \ + if (binder_debug_mask & mask) \ + printk(KERN_INFO x); \ + } while (0) + +#define binder_user_error(x...) \ + do { \ + if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \ + printk(KERN_INFO x); \ + if (binder_stop_on_user_error) \ + binder_stop_on_user_error = 2; \ + } while (0) + +enum binder_stat_types { + BINDER_STAT_PROC, + BINDER_STAT_THREAD, + BINDER_STAT_NODE, + BINDER_STAT_REF, + BINDER_STAT_DEATH, + BINDER_STAT_TRANSACTION, + BINDER_STAT_TRANSACTION_COMPLETE, + BINDER_STAT_COUNT +}; + +struct binder_stats { + int br[_IOC_NR(BR_FAILED_REPLY) + 1]; + int bc[_IOC_NR(BC_DEAD_BINDER_DONE) + 1]; + int obj_created[BINDER_STAT_COUNT]; + int obj_deleted[BINDER_STAT_COUNT]; +}; + +static struct binder_stats binder_stats; + +static inline void binder_stats_deleted(enum binder_stat_types type) +{ + binder_stats.obj_deleted[type]++; +} + +static inline void binder_stats_created(enum binder_stat_types type) +{ + binder_stats.obj_created[type]++; +} + +struct binder_transaction_log_entry { + int debug_id; + int call_type; + int from_proc; + int from_thread; + int target_handle; + int to_proc; + int to_thread; + int to_node; + int data_size; + int offsets_size; +}; +struct binder_transaction_log { + int next; + int full; + struct binder_transaction_log_entry entry[32]; +}; +static struct binder_transaction_log binder_transaction_log; +static struct binder_transaction_log binder_transaction_log_failed; + +static struct binder_transaction_log_entry *binder_transaction_log_add( + struct binder_transaction_log *log) +{ + struct binder_transaction_log_entry *e; + e = &log->entry[log->next]; + memset(e, 0, sizeof(*e)); + log->next++; + if (log->next == ARRAY_SIZE(log->entry)) { + log->next = 0; + log->full = 1; + } + return e; +} + +struct binder_work { + struct list_head entry; + enum { + BINDER_WORK_TRANSACTION = 1, + BINDER_WORK_TRANSACTION_COMPLETE, + BINDER_WORK_NODE, + BINDER_WORK_DEAD_BINDER, + BINDER_WORK_DEAD_BINDER_AND_CLEAR, + BINDER_WORK_CLEAR_DEATH_NOTIFICATION, + } type; +}; + +struct binder_node { + int debug_id; + struct binder_work work; + union { + struct rb_node rb_node; + struct hlist_node dead_node; + }; + struct binder_proc *proc; + struct hlist_head refs; + int internal_strong_refs; + int local_weak_refs; + int local_strong_refs; + void __user *ptr; + void __user *cookie; + unsigned has_strong_ref:1; + unsigned pending_strong_ref:1; + unsigned has_weak_ref:1; + unsigned pending_weak_ref:1; + unsigned has_async_transaction:1; + unsigned accept_fds:1; + unsigned min_priority:8; + struct list_head async_todo; +}; + +struct binder_ref_death { + struct binder_work work; + void __user *cookie; +}; + +struct binder_ref { + /* Lookups needed: */ + /* node + proc => ref (transaction) */ + /* desc + proc => ref (transaction, inc/dec ref) */ + /* node => refs + procs (proc exit) */ + int debug_id; + struct rb_node rb_node_desc; + struct rb_node rb_node_node; + struct hlist_node node_entry; + struct binder_proc *proc; + struct binder_node *node; + uint32_t desc; + int strong; + int weak; + struct binder_ref_death *death; +}; + +struct binder_buffer { + struct list_head entry; /* free and allocated entries by addesss */ + struct rb_node rb_node; /* free entry by size or allocated entry */ + /* by address */ + unsigned free:1; + unsigned allow_user_free:1; + unsigned async_transaction:1; + unsigned debug_id:29; + + struct binder_transaction *transaction; + + struct binder_node *target_node; + size_t data_size; + size_t offsets_size; + uint8_t data[0]; +}; + +enum binder_deferred_state { + BINDER_DEFERRED_PUT_FILES = 0x01, + BINDER_DEFERRED_FLUSH = 0x02, + BINDER_DEFERRED_RELEASE = 0x04, +}; + +struct binder_proc { + struct hlist_node proc_node; + struct rb_root threads; + struct rb_root nodes; + struct rb_root refs_by_desc; + struct rb_root refs_by_node; + int pid; + struct vm_area_struct *vma; + struct task_struct *tsk; + struct files_struct *files; + struct hlist_node deferred_work_node; + int deferred_work; + void *buffer; + ptrdiff_t user_buffer_offset; + + struct list_head buffers; + struct rb_root free_buffers; + struct rb_root allocated_buffers; + size_t free_async_space; + + struct page **pages; + size_t buffer_size; + uint32_t buffer_free; + struct list_head todo; + wait_queue_head_t wait; + struct binder_stats stats; + struct list_head delivered_death; + int max_threads; + int requested_threads; + int requested_threads_started; + int ready_threads; + long default_priority; + struct dentry *debugfs_entry; +}; + +enum { + BINDER_LOOPER_STATE_REGISTERED = 0x01, + BINDER_LOOPER_STATE_ENTERED = 0x02, + BINDER_LOOPER_STATE_EXITED = 0x04, + BINDER_LOOPER_STATE_INVALID = 0x08, + BINDER_LOOPER_STATE_WAITING = 0x10, + BINDER_LOOPER_STATE_NEED_RETURN = 0x20 +}; + +struct binder_thread { + struct binder_proc *proc; + struct rb_node rb_node; + int pid; + int looper; + struct binder_transaction *transaction_stack; + struct list_head todo; + uint32_t return_error; /* Write failed, return error code in read buf */ + uint32_t return_error2; /* Write failed, return error code in read */ + /* buffer. Used when sending a reply to a dead process that */ + /* we are also waiting on */ + wait_queue_head_t wait; + struct binder_stats stats; +}; + +struct binder_transaction { + int debug_id; + struct binder_work work; + struct binder_thread *from; + struct binder_transaction *from_parent; + struct binder_proc *to_proc; + struct binder_thread *to_thread; + struct binder_transaction *to_parent; + unsigned need_reply:1; + /* unsigned is_dead:1; */ /* not used at the moment */ + + struct binder_buffer *buffer; + unsigned int code; + unsigned int flags; + long priority; + long saved_priority; + uid_t sender_euid; +}; + +static void +binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer); + +/* + * copied from get_unused_fd_flags + */ +int task_get_unused_fd_flags(struct binder_proc *proc, int flags) +{ + struct files_struct *files = proc->files; + int fd, error; + struct fdtable *fdt; + unsigned long rlim_cur; + unsigned long irqs; + + if (files == NULL) + return -ESRCH; + + error = -EMFILE; + spin_lock(&files->file_lock); + +repeat: + fdt = files_fdtable(files); + fd = find_next_zero_bit(fdt->open_fds->fds_bits, fdt->max_fds, + files->next_fd); + + /* + * N.B. For clone tasks sharing a files structure, this test + * will limit the total number of files that can be opened. + */ + rlim_cur = 0; + if (lock_task_sighand(proc->tsk, &irqs)) { + rlim_cur = proc->tsk->signal->rlim[RLIMIT_NOFILE].rlim_cur; + unlock_task_sighand(proc->tsk, &irqs); + } + if (fd >= rlim_cur) + goto out; + + /* Do we need to expand the fd array or fd set? */ + error = expand_files(files, fd); + if (error < 0) + goto out; + + if (error) { + /* + * If we needed to expand the fs array we + * might have blocked - try again. + */ + error = -EMFILE; + goto repeat; + } + + FD_SET(fd, fdt->open_fds); + if (flags & O_CLOEXEC) + FD_SET(fd, fdt->close_on_exec); + else + FD_CLR(fd, fdt->close_on_exec); + files->next_fd = fd + 1; +#if 1 + /* Sanity check */ + if (fdt->fd[fd] != NULL) { + printk(KERN_WARNING "get_unused_fd: slot %d not NULL!\n", fd); + fdt->fd[fd] = NULL; + } +#endif + error = fd; + +out: + spin_unlock(&files->file_lock); + return error; +} + +/* + * copied from fd_install + */ +static void task_fd_install( + struct binder_proc *proc, unsigned int fd, struct file *file) +{ + struct files_struct *files = proc->files; + struct fdtable *fdt; + + if (files == NULL) + return; + + spin_lock(&files->file_lock); + fdt = files_fdtable(files); + BUG_ON(fdt->fd[fd] != NULL); + rcu_assign_pointer(fdt->fd[fd], file); + spin_unlock(&files->file_lock); +} + +/* + * copied from __put_unused_fd in open.c + */ +static void __put_unused_fd(struct files_struct *files, unsigned int fd) +{ + struct fdtable *fdt = files_fdtable(files); + __FD_CLR(fd, fdt->open_fds); + if (fd < files->next_fd) + files->next_fd = fd; +} + +/* + * copied from sys_close + */ +static long task_close_fd(struct binder_proc *proc, unsigned int fd) +{ + struct file *filp; + struct files_struct *files = proc->files; + struct fdtable *fdt; + int retval; + + if (files == NULL) + return -ESRCH; + + spin_lock(&files->file_lock); + fdt = files_fdtable(files); + if (fd >= fdt->max_fds) + goto out_unlock; + filp = fdt->fd[fd]; + if (!filp) + goto out_unlock; + rcu_assign_pointer(fdt->fd[fd], NULL); + FD_CLR(fd, fdt->close_on_exec); + __put_unused_fd(files, fd); + spin_unlock(&files->file_lock); + retval = filp_close(filp, files); + + /* can't restart close syscall because file table entry was cleared */ + if (unlikely(retval == -ERESTARTSYS || + retval == -ERESTARTNOINTR || + retval == -ERESTARTNOHAND || + retval == -ERESTART_RESTARTBLOCK)) + retval = -EINTR; + + return retval; + +out_unlock: + spin_unlock(&files->file_lock); + return -EBADF; +} + +static void binder_set_nice(long nice) +{ + long min_nice; + if (can_nice(current, nice)) { + set_user_nice(current, nice); + return; + } + min_nice = 20 - current->signal->rlim[RLIMIT_NICE].rlim_cur; + binder_debug(BINDER_DEBUG_PRIORITY_CAP, + "binder: %d: nice value %ld not allowed use " + "%ld instead\n", current->pid, nice, min_nice); + set_user_nice(current, min_nice); + if (min_nice < 20) + return; + binder_user_error("binder: %d RLIMIT_NICE not set\n", current->pid); +} + +static size_t binder_buffer_size(struct binder_proc *proc, + struct binder_buffer *buffer) +{ + if (list_is_last(&buffer->entry, &proc->buffers)) + return proc->buffer + proc->buffer_size - (void *)buffer->data; + else + return (size_t)list_entry(buffer->entry.next, + struct binder_buffer, entry) - (size_t)buffer->data; +} + +static void binder_insert_free_buffer(struct binder_proc *proc, + struct binder_buffer *new_buffer) +{ + struct rb_node **p = &proc->free_buffers.rb_node; + struct rb_node *parent = NULL; + struct binder_buffer *buffer; + size_t buffer_size; + size_t new_buffer_size; + + BUG_ON(!new_buffer->free); + + new_buffer_size = binder_buffer_size(proc, new_buffer); + + binder_debug(BINDER_DEBUG_BUFFER_ALLOC, + "binder: %d: add free buffer, size %zd, " + "at %p\n", proc->pid, new_buffer_size, new_buffer); + + while (*p) { + parent = *p; + buffer = rb_entry(parent, struct binder_buffer, rb_node); + BUG_ON(!buffer->free); + + buffer_size = binder_buffer_size(proc, buffer); + + if (new_buffer_size < buffer_size) + p = &parent->rb_left; + else + p = &parent->rb_right; + } + rb_link_node(&new_buffer->rb_node, parent, p); + rb_insert_color(&new_buffer->rb_node, &proc->free_buffers); +} + +static void binder_insert_allocated_buffer(struct binder_proc *proc, + struct binder_buffer *new_buffer) +{ + struct rb_node **p = &proc->allocated_buffers.rb_node; + struct rb_node *parent = NULL; + struct binder_buffer *buffer; + + BUG_ON(new_buffer->free); + + while (*p) { + parent = *p; + buffer = rb_entry(parent, struct binder_buffer, rb_node); + BUG_ON(buffer->free); + + if (new_buffer < buffer) + p = &parent->rb_left; + else if (new_buffer > buffer) + p = &parent->rb_right; + else + BUG(); + } + rb_link_node(&new_buffer->rb_node, parent, p); + rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers); +} + +static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc, + void __user *user_ptr) +{ + struct rb_node *n = proc->allocated_buffers.rb_node; + struct binder_buffer *buffer; + struct binder_buffer *kern_ptr; + + kern_ptr = user_ptr - proc->user_buffer_offset + - offsetof(struct binder_buffer, data); + + while (n) { + buffer = rb_entry(n, struct binder_buffer, rb_node); + BUG_ON(buffer->free); + + if (kern_ptr < buffer) + n = n->rb_left; + else if (kern_ptr > buffer) + n = n->rb_right; + else + return buffer; + } + return NULL; +} + +static int binder_update_page_range(struct binder_proc *proc, int allocate, + void *start, void *end, + struct vm_area_struct *vma) +{ + void *page_addr; + unsigned long user_page_addr; + struct vm_struct tmp_area; + struct page **page; + struct mm_struct *mm; + + binder_debug(BINDER_DEBUG_BUFFER_ALLOC, + "binder: %d: %s pages %p-%p\n", proc->pid, + allocate ? "allocate" : "free", start, end); + + if (end <= start) + return 0; + + if (vma) + mm = NULL; + else + mm = get_task_mm(proc->tsk); + + if (mm) { + down_write(&mm->mmap_sem); + vma = proc->vma; + } + + if (allocate == 0) + goto free_range; + + if (vma == NULL) { + printk(KERN_ERR "binder: %d: binder_alloc_buf failed to " + "map pages in userspace, no vma\n", proc->pid); + goto err_no_vma; + } + + for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { + int ret; + struct page **page_array_ptr; + page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; + + BUG_ON(*page); + *page = alloc_page(GFP_KERNEL | __GFP_ZERO); + if (*page == NULL) { + printk(KERN_ERR "binder: %d: binder_alloc_buf failed " + "for page at %p\n", proc->pid, page_addr); + goto err_alloc_page_failed; + } + tmp_area.addr = page_addr; + tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */; + page_array_ptr = page; + ret = map_vm_area(&tmp_area, PAGE_KERNEL, &page_array_ptr); + if (ret) { + printk(KERN_ERR "binder: %d: binder_alloc_buf failed " + "to map page at %p in kernel\n", + proc->pid, page_addr); + goto err_map_kernel_failed; + } + user_page_addr = + (uintptr_t)page_addr + proc->user_buffer_offset; + ret = vm_insert_page(vma, user_page_addr, page[0]); + if (ret) { + printk(KERN_ERR "binder: %d: binder_alloc_buf failed " + "to map page at %lx in userspace\n", + proc->pid, user_page_addr); + goto err_vm_insert_page_failed; + } + /* vm_insert_page does not seem to increment the refcount */ + } + if (mm) { + up_write(&mm->mmap_sem); + mmput(mm); + } + return 0; + +free_range: + for (page_addr = end - PAGE_SIZE; page_addr >= start; + page_addr -= PAGE_SIZE) { + page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; + if (vma) + zap_page_range(vma, (uintptr_t)page_addr + + proc->user_buffer_offset, PAGE_SIZE, NULL); +err_vm_insert_page_failed: + unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); +err_map_kernel_failed: + __free_page(*page); + *page = NULL; +err_alloc_page_failed: + ; + } +err_no_vma: + if (mm) { + up_write(&mm->mmap_sem); + mmput(mm); + } + return -ENOMEM; +} + +static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc, + size_t data_size, + size_t offsets_size, int is_async) +{ + struct rb_node *n = proc->free_buffers.rb_node; + struct binder_buffer *buffer; + size_t buffer_size; + struct rb_node *best_fit = NULL; + void *has_page_addr; + void *end_page_addr; + size_t size; + + if (proc->vma == NULL) { + printk(KERN_ERR "binder: %d: binder_alloc_buf, no vma\n", + proc->pid); + return NULL; + } + + size = ALIGN(data_size, sizeof(void *)) + + ALIGN(offsets_size, sizeof(void *)); + + if (size < data_size || size < offsets_size) { + binder_user_error("binder: %d: got transaction with invalid " + "size %zd-%zd\n", proc->pid, data_size, offsets_size); + return NULL; + } + + if (is_async && + proc->free_async_space < size + sizeof(struct binder_buffer)) { + binder_debug(BINDER_DEBUG_BUFFER_ALLOC, + "binder: %d: binder_alloc_buf size %zd" + "failed, no async space left\n", proc->pid, size); + return NULL; + } + + while (n) { + buffer = rb_entry(n, struct binder_buffer, rb_node); + BUG_ON(!buffer->free); + buffer_size = binder_buffer_size(proc, buffer); + + if (size < buffer_size) { + best_fit = n; + n = n->rb_left; + } else if (size > buffer_size) + n = n->rb_right; + else { + best_fit = n; + break; + } + } + if (best_fit == NULL) { + printk(KERN_ERR "binder: %d: binder_alloc_buf size %zd failed, " + "no address space\n", proc->pid, size); + return NULL; + } + if (n == NULL) { + buffer = rb_entry(best_fit, struct binder_buffer, rb_node); + buffer_size = binder_buffer_size(proc, buffer); + } + + binder_debug(BINDER_DEBUG_BUFFER_ALLOC, + "binder: %d: binder_alloc_buf size %zd got buff" + "er %p size %zd\n", proc->pid, size, buffer, buffer_size); + + has_page_addr = + (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK); + if (n == NULL) { + if (size + sizeof(struct binder_buffer) + 4 >= buffer_size) + buffer_size = size; /* no room for other buffers */ + else + buffer_size = size + sizeof(struct binder_buffer); + } + end_page_addr = + (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size); + if (end_page_addr > has_page_addr) + end_page_addr = has_page_addr; + if (binder_update_page_range(proc, 1, + (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL)) + return NULL; + + rb_erase(best_fit, &proc->free_buffers); + buffer->free = 0; + binder_insert_allocated_buffer(proc, buffer); + if (buffer_size != size) { + struct binder_buffer *new_buffer = (void *)buffer->data + size; + list_add(&new_buffer->entry, &buffer->entry); + new_buffer->free = 1; + binder_insert_free_buffer(proc, new_buffer); + } + binder_debug(BINDER_DEBUG_BUFFER_ALLOC, + "binder: %d: binder_alloc_buf size %zd got " + "%p\n", proc->pid, size, buffer); + buffer->data_size = data_size; + buffer->offsets_size = offsets_size; + buffer->async_transaction = is_async; + if (is_async) { + proc->free_async_space -= size + sizeof(struct binder_buffer); + binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, + "binder: %d: binder_alloc_buf size %zd " + "async free %zd\n", proc->pid, size, + proc->free_async_space); + } + + return buffer; +} + +static void *buffer_start_page(struct binder_buffer *buffer) +{ + return (void *)((uintptr_t)buffer & PAGE_MASK); +} + +static void *buffer_end_page(struct binder_buffer *buffer) +{ + return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK); +} + +static void binder_delete_free_buffer(struct binder_proc *proc, + struct binder_buffer *buffer) +{ + struct binder_buffer *prev, *next = NULL; + int free_page_end = 1; + int free_page_start = 1; + + BUG_ON(proc->buffers.next == &buffer->entry); + prev = list_entry(buffer->entry.prev, struct binder_buffer, entry); + BUG_ON(!prev->free); + if (buffer_end_page(prev) == buffer_start_page(buffer)) { + free_page_start = 0; + if (buffer_end_page(prev) == buffer_end_page(buffer)) + free_page_end = 0; + binder_debug(BINDER_DEBUG_BUFFER_ALLOC, + "binder: %d: merge free, buffer %p " + "share page with %p\n", proc->pid, buffer, prev); + } + + if (!list_is_last(&buffer->entry, &proc->buffers)) { + next = list_entry(buffer->entry.next, + struct binder_buffer, entry); + if (buffer_start_page(next) == buffer_end_page(buffer)) { + free_page_end = 0; + if (buffer_start_page(next) == + buffer_start_page(buffer)) + free_page_start = 0; + binder_debug(BINDER_DEBUG_BUFFER_ALLOC, + "binder: %d: merge free, buffer" + " %p share page with %p\n", proc->pid, + buffer, prev); + } + } + list_del(&buffer->entry); + if (free_page_start || free_page_end) { + binder_debug(BINDER_DEBUG_BUFFER_ALLOC, + "binder: %d: merge free, buffer %p do " + "not share page%s%s with with %p or %p\n", + proc->pid, buffer, free_page_start ? "" : " end", + free_page_end ? "" : " start", prev, next); + binder_update_page_range(proc, 0, free_page_start ? + buffer_start_page(buffer) : buffer_end_page(buffer), + (free_page_end ? buffer_end_page(buffer) : + buffer_start_page(buffer)) + PAGE_SIZE, NULL); + } +} + +static void binder_free_buf(struct binder_proc *proc, + struct binder_buffer *buffer) +{ + size_t size, buffer_size; + + buffer_size = binder_buffer_size(proc, buffer); + + size = ALIGN(buffer->data_size, sizeof(void *)) + + ALIGN(buffer->offsets_size, sizeof(void *)); + + binder_debug(BINDER_DEBUG_BUFFER_ALLOC, + "binder: %d: binder_free_buf %p size %zd buffer" + "_size %zd\n", proc->pid, buffer, size, buffer_size); + + BUG_ON(buffer->free); + BUG_ON(size > buffer_size); + BUG_ON(buffer->transaction != NULL); + BUG_ON((void *)buffer < proc->buffer); + BUG_ON((void *)buffer > proc->buffer + proc->buffer_size); + + if (buffer->async_transaction) { + proc->free_async_space += size + sizeof(struct binder_buffer); + + binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, + "binder: %d: binder_free_buf size %zd " + "async free %zd\n", proc->pid, size, + proc->free_async_space); + } + + binder_update_page_range(proc, 0, + (void *)PAGE_ALIGN((uintptr_t)buffer->data), + (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK), + NULL); + rb_erase(&buffer->rb_node, &proc->allocated_buffers); + buffer->free = 1; + if (!list_is_last(&buffer->entry, &proc->buffers)) { + struct binder_buffer *next = list_entry(buffer->entry.next, + struct binder_buffer, entry); + if (next->free) { + rb_erase(&next->rb_node, &proc->free_buffers); + binder_delete_free_buffer(proc, next); + } + } + if (proc->buffers.next != &buffer->entry) { + struct binder_buffer *prev = list_entry(buffer->entry.prev, + struct binder_buffer, entry); + if (prev->free) { + binder_delete_free_buffer(proc, buffer); + rb_erase(&prev->rb_node, &proc->free_buffers); + buffer = prev; + } + } + binder_insert_free_buffer(proc, buffer); +} + +static struct binder_node *binder_get_node(struct binder_proc *proc, + void __user *ptr) +{ + struct rb_node *n = proc->nodes.rb_node; + struct binder_node *node; + + while (n) { + node = rb_entry(n, struct binder_node, rb_node); + + if (ptr < node->ptr) + n = n->rb_left; + else if (ptr > node->ptr) + n = n->rb_right; + else + return node; + } + return NULL; +} + +static struct binder_node *binder_new_node(struct binder_proc *proc, + void __user *ptr, + void __user *cookie) +{ + struct rb_node **p = &proc->nodes.rb_node; + struct rb_node *parent = NULL; + struct binder_node *node; + + while (*p) { + parent = *p; + node = rb_entry(parent, struct binder_node, rb_node); + + if (ptr < node->ptr) + p = &(*p)->rb_left; + else if (ptr > node->ptr) + p = &(*p)->rb_right; + else + return NULL; + } + + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (node == NULL) + return NULL; + binder_stats_created(BINDER_STAT_NODE); + rb_link_node(&node->rb_node, parent, p); + rb_insert_color(&node->rb_node, &proc->nodes); + node->debug_id = ++binder_last_id; + node->proc = proc; + node->ptr = ptr; + node->cookie = cookie; + node->work.type = BINDER_WORK_NODE; + INIT_LIST_HEAD(&node->work.entry); + INIT_LIST_HEAD(&node->async_todo); + binder_debug(BINDER_DEBUG_INTERNAL_REFS, + "binder: %d:%d node %d u%p c%p created\n", + proc->pid, current->pid, node->debug_id, + node->ptr, node->cookie); + return node; +} + +static int binder_inc_node(struct binder_node *node, int strong, int internal, + struct list_head *target_list) +{ + if (strong) { + if (internal) { + if (target_list == NULL && + node->internal_strong_refs == 0 && + !(node == binder_context_mgr_node && + node->has_strong_ref)) { + printk(KERN_ERR "binder: invalid inc strong " + "node for %d\n", node->debug_id); + return -EINVAL; + } + node->internal_strong_refs++; + } else + node->local_strong_refs++; + if (!node->has_strong_ref && target_list) { + list_del_init(&node->work.entry); + list_add_tail(&node->work.entry, target_list); + } + } else { + if (!internal) + node->local_weak_refs++; + if (!node->has_weak_ref && list_empty(&node->work.entry)) { + if (target_list == NULL) { + printk(KERN_ERR "binder: invalid inc weak node " + "for %d\n", node->debug_id); + return -EINVAL; + } + list_add_tail(&node->work.entry, target_list); + } + } + return 0; +} + +static int binder_dec_node(struct binder_node *node, int strong, int internal) +{ + if (strong) { + if (internal) + node->internal_strong_refs--; + else + node->local_strong_refs--; + if (node->local_strong_refs || node->internal_strong_refs) + return 0; + } else { + if (!internal) + node->local_weak_refs--; + if (node->local_weak_refs || !hlist_empty(&node->refs)) + return 0; + } + if (node->proc && (node->has_strong_ref || node->has_weak_ref)) { + if (list_empty(&node->work.entry)) { + list_add_tail(&node->work.entry, &node->proc->todo); + wake_up_interruptible(&node->proc->wait); + } + } else { + if (hlist_empty(&node->refs) && !node->local_strong_refs && + !node->local_weak_refs) { + list_del_init(&node->work.entry); + if (node->proc) { + rb_erase(&node->rb_node, &node->proc->nodes); + binder_debug(BINDER_DEBUG_INTERNAL_REFS, + "binder: refless node %d deleted\n", + node->debug_id); + } else { + hlist_del(&node->dead_node); + binder_debug(BINDER_DEBUG_INTERNAL_REFS, + "binder: dead node %d deleted\n", + node->debug_id); + } + kfree(node); + binder_stats_deleted(BINDER_STAT_NODE); + } + } + + return 0; +} + + +static struct binder_ref *binder_get_ref(struct binder_proc *proc, + uint32_t desc) +{ + struct rb_node *n = proc->refs_by_desc.rb_node; + struct binder_ref *ref; + + while (n) { + ref = rb_entry(n, struct binder_ref, rb_node_desc); + + if (desc < ref->desc) + n = n->rb_left; + else if (desc > ref->desc) + n = n->rb_right; + else + return ref; + } + return NULL; +} + +static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc, + struct binder_node *node) +{ + struct rb_node *n; + struct rb_node **p = &proc->refs_by_node.rb_node; + struct rb_node *parent = NULL; + struct binder_ref *ref, *new_ref; + + while (*p) { + parent = *p; + ref = rb_entry(parent, struct binder_ref, rb_node_node); + + if (node < ref->node) + p = &(*p)->rb_left; + else if (node > ref->node) + p = &(*p)->rb_right; + else + return ref; + } + new_ref = kzalloc(sizeof(*ref), GFP_KERNEL); + if (new_ref == NULL) + return NULL; + binder_stats_created(BINDER_STAT_REF); + new_ref->debug_id = ++binder_last_id; + new_ref->proc = proc; + new_ref->node = node; + rb_link_node(&new_ref->rb_node_node, parent, p); + rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node); + + new_ref->desc = (node == binder_context_mgr_node) ? 0 : 1; + for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { + ref = rb_entry(n, struct binder_ref, rb_node_desc); + if (ref->desc > new_ref->desc) + break; + new_ref->desc = ref->desc + 1; + } + + p = &proc->refs_by_desc.rb_node; + while (*p) { + parent = *p; + ref = rb_entry(parent, struct binder_ref, rb_node_desc); + + if (new_ref->desc < ref->desc) + p = &(*p)->rb_left; + else if (new_ref->desc > ref->desc) + p = &(*p)->rb_right; + else + BUG(); + } + rb_link_node(&new_ref->rb_node_desc, parent, p); + rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc); + if (node) { + hlist_add_head(&new_ref->node_entry, &node->refs); + + binder_debug(BINDER_DEBUG_INTERNAL_REFS, + "binder: %d new ref %d desc %d for " + "node %d\n", proc->pid, new_ref->debug_id, + new_ref->desc, node->debug_id); + } else { + binder_debug(BINDER_DEBUG_INTERNAL_REFS, + "binder: %d new ref %d desc %d for " + "dead node\n", proc->pid, new_ref->debug_id, + new_ref->desc); + } + return new_ref; +} + +static void binder_delete_ref(struct binder_ref *ref) +{ + binder_debug(BINDER_DEBUG_INTERNAL_REFS, + "binder: %d delete ref %d desc %d for " + "node %d\n", ref->proc->pid, ref->debug_id, + ref->desc, ref->node->debug_id); + + rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc); + rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node); + if (ref->strong) + binder_dec_node(ref->node, 1, 1); + hlist_del(&ref->node_entry); + binder_dec_node(ref->node, 0, 1); + if (ref->death) { + binder_debug(BINDER_DEBUG_DEAD_BINDER, + "binder: %d delete ref %d desc %d " + "has death notification\n", ref->proc->pid, + ref->debug_id, ref->desc); + list_del(&ref->death->work.entry); + kfree(ref->death); + binder_stats_deleted(BINDER_STAT_DEATH); + } + kfree(ref); + binder_stats_deleted(BINDER_STAT_REF); +} + +static int binder_inc_ref(struct binder_ref *ref, int strong, + struct list_head *target_list) +{ + int ret; + if (strong) { + if (ref->strong == 0) { + ret = binder_inc_node(ref->node, 1, 1, target_list); + if (ret) + return ret; + } + ref->strong++; + } else { + if (ref->weak == 0) { + ret = binder_inc_node(ref->node, 0, 1, target_list); + if (ret) + return ret; + } + ref->weak++; + } + return 0; +} + + +static int binder_dec_ref(struct binder_ref *ref, int strong) +{ + if (strong) { + if (ref->strong == 0) { + binder_user_error("binder: %d invalid dec strong, " + "ref %d desc %d s %d w %d\n", + ref->proc->pid, ref->debug_id, + ref->desc, ref->strong, ref->weak); + return -EINVAL; + } + ref->strong--; + if (ref->strong == 0) { + int ret; + ret = binder_dec_node(ref->node, strong, 1); + if (ret) + return ret; + } + } else { + if (ref->weak == 0) { + binder_user_error("binder: %d invalid dec weak, " + "ref %d desc %d s %d w %d\n", + ref->proc->pid, ref->debug_id, + ref->desc, ref->strong, ref->weak); + return -EINVAL; + } + ref->weak--; + } + if (ref->strong == 0 && ref->weak == 0) + binder_delete_ref(ref); + return 0; +} + +static void binder_pop_transaction(struct binder_thread *target_thread, + struct binder_transaction *t) +{ + if (target_thread) { + BUG_ON(target_thread->transaction_stack != t); + BUG_ON(target_thread->transaction_stack->from != target_thread); + target_thread->transaction_stack = + target_thread->transaction_stack->from_parent; + t->from = NULL; + } + t->need_reply = 0; + if (t->buffer) + t->buffer->transaction = NULL; + kfree(t); + binder_stats_deleted(BINDER_STAT_TRANSACTION); +} + +static void binder_send_failed_reply(struct binder_transaction *t, + uint32_t error_code) +{ + struct binder_thread *target_thread; + BUG_ON(t->flags & TF_ONE_WAY); + while (1) { + target_thread = t->from; + if (target_thread) { + if (target_thread->return_error != BR_OK && + target_thread->return_error2 == BR_OK) { + target_thread->return_error2 = + target_thread->return_error; + target_thread->return_error = BR_OK; + } + if (target_thread->return_error == BR_OK) { + binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, + "binder: send failed reply for " + "transaction %d to %d:%d\n", + t->debug_id, target_thread->proc->pid, + target_thread->pid); + + binder_pop_transaction(target_thread, t); + target_thread->return_error = error_code; + wake_up_interruptible(&target_thread->wait); + } else { + printk(KERN_ERR "binder: reply failed, target " + "thread, %d:%d, has error code %d " + "already\n", target_thread->proc->pid, + target_thread->pid, + target_thread->return_error); + } + return; + } else { + struct binder_transaction *next = t->from_parent; + + binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, + "binder: send failed reply " + "for transaction %d, target dead\n", + t->debug_id); + + binder_pop_transaction(target_thread, t); + if (next == NULL) { + binder_debug(BINDER_DEBUG_DEAD_BINDER, + "binder: reply failed," + " no target thread at root\n"); + return; + } + t = next; + binder_debug(BINDER_DEBUG_DEAD_BINDER, + "binder: reply failed, no target " + "thread -- retry %d\n", t->debug_id); + } + } +} + +static void binder_transaction_buffer_release(struct binder_proc *proc, + struct binder_buffer *buffer, + size_t *failed_at) +{ + size_t *offp, *off_end; + int debug_id = buffer->debug_id; + + binder_debug(BINDER_DEBUG_TRANSACTION, + "binder: %d buffer release %d, size %zd-%zd, failed at %p\n", + proc->pid, buffer->debug_id, + buffer->data_size, buffer->offsets_size, failed_at); + + if (buffer->target_node) + binder_dec_node(buffer->target_node, 1, 0); + + offp = (size_t *)(buffer->data + ALIGN(buffer->data_size, sizeof(void *))); + if (failed_at) + off_end = failed_at; + else + off_end = (void *)offp + buffer->offsets_size; + for (; offp < off_end; offp++) { + struct flat_binder_object *fp; + if (*offp > buffer->data_size - sizeof(*fp) || + buffer->data_size < sizeof(*fp) || + !IS_ALIGNED(*offp, sizeof(void *))) { + printk(KERN_ERR "binder: transaction release %d bad" + "offset %zd, size %zd\n", debug_id, + *offp, buffer->data_size); + continue; + } + fp = (struct flat_binder_object *)(buffer->data + *offp); + switch (fp->type) { + case BINDER_TYPE_BINDER: + case BINDER_TYPE_WEAK_BINDER: { + struct binder_node *node = binder_get_node(proc, fp->binder); + if (node == NULL) { + printk(KERN_ERR "binder: transaction release %d" + " bad node %p\n", debug_id, fp->binder); + break; + } + binder_debug(BINDER_DEBUG_TRANSACTION, + " node %d u%p\n", + node->debug_id, node->ptr); + binder_dec_node(node, fp->type == BINDER_TYPE_BINDER, 0); + } break; + case BINDER_TYPE_HANDLE: + case BINDER_TYPE_WEAK_HANDLE: { + struct binder_ref *ref = binder_get_ref(proc, fp->handle); + if (ref == NULL) { + printk(KERN_ERR "binder: transaction release %d" + " bad handle %ld\n", debug_id, + fp->handle); + break; + } + binder_debug(BINDER_DEBUG_TRANSACTION, + " ref %d desc %d (node %d)\n", + ref->debug_id, ref->desc, ref->node->debug_id); + binder_dec_ref(ref, fp->type == BINDER_TYPE_HANDLE); + } break; + + case BINDER_TYPE_FD: + binder_debug(BINDER_DEBUG_TRANSACTION, + " fd %ld\n", fp->handle); + if (failed_at) + task_close_fd(proc, fp->handle); + break; + + default: + printk(KERN_ERR "binder: transaction release %d bad " + "object type %lx\n", debug_id, fp->type); + break; + } + } +} + +static void binder_transaction(struct binder_proc *proc, + struct binder_thread *thread, + struct binder_transaction_data *tr, int reply) +{ + struct binder_transaction *t; + struct binder_work *tcomplete; + size_t *offp, *off_end; + struct binder_proc *target_proc; + struct binder_thread *target_thread = NULL; + struct binder_node *target_node = NULL; + struct list_head *target_list; + wait_queue_head_t *target_wait; + struct binder_transaction *in_reply_to = NULL; + struct binder_transaction_log_entry *e; + uint32_t return_error; + + e = binder_transaction_log_add(&binder_transaction_log); + e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY); + e->from_proc = proc->pid; + e->from_thread = thread->pid; + e->target_handle = tr->target.handle; + e->data_size = tr->data_size; + e->offsets_size = tr->offsets_size; + + if (reply) { + in_reply_to = thread->transaction_stack; + if (in_reply_to == NULL) { + binder_user_error("binder: %d:%d got reply transaction " + "with no transaction stack\n", + proc->pid, thread->pid); + return_error = BR_FAILED_REPLY; + goto err_empty_call_stack; + } + binder_set_nice(in_reply_to->saved_priority); + if (in_reply_to->to_thread != thread) { + binder_user_error("binder: %d:%d got reply transaction " + "with bad transaction stack," + " transaction %d has target %d:%d\n", + proc->pid, thread->pid, in_reply_to->debug_id, + in_reply_to->to_proc ? + in_reply_to->to_proc->pid : 0, + in_reply_to->to_thread ? + in_reply_to->to_thread->pid : 0); + return_error = BR_FAILED_REPLY; + in_reply_to = NULL; + goto err_bad_call_stack; + } + thread->transaction_stack = in_reply_to->to_parent; + target_thread = in_reply_to->from; + if (target_thread == NULL) { + return_error = BR_DEAD_REPLY; + goto err_dead_binder; + } + if (target_thread->transaction_stack != in_reply_to) { + binder_user_error("binder: %d:%d got reply transaction " + "with bad target transaction stack %d, " + "expected %d\n", + proc->pid, thread->pid, + target_thread->transaction_stack ? + target_thread->transaction_stack->debug_id : 0, + in_reply_to->debug_id); + return_error = BR_FAILED_REPLY; + in_reply_to = NULL; + target_thread = NULL; + goto err_dead_binder; + } + target_proc = target_thread->proc; + } else { + if (tr->target.handle) { + struct binder_ref *ref; + ref = binder_get_ref(proc, tr->target.handle); + if (ref == NULL) { + binder_user_error("binder: %d:%d got " + "transaction to invalid handle\n", + proc->pid, thread->pid); + return_error = BR_FAILED_REPLY; + goto err_invalid_target_handle; + } + target_node = ref->node; + } else { + target_node = binder_context_mgr_node; + if (target_node == NULL) { + return_error = BR_DEAD_REPLY; + goto err_no_context_mgr_node; + } + } + e->to_node = target_node->debug_id; + target_proc = target_node->proc; + if (target_proc == NULL) { + return_error = BR_DEAD_REPLY; + goto err_dead_binder; + } + if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) { + struct binder_transaction *tmp; + tmp = thread->transaction_stack; + if (tmp->to_thread != thread) { + binder_user_error("binder: %d:%d got new " + "transaction with bad transaction stack" + ", transaction %d has target %d:%d\n", + proc->pid, thread->pid, tmp->debug_id, + tmp->to_proc ? tmp->to_proc->pid : 0, + tmp->to_thread ? + tmp->to_thread->pid : 0); + return_error = BR_FAILED_REPLY; + goto err_bad_call_stack; + } + while (tmp) { + if (tmp->from && tmp->from->proc == target_proc) + target_thread = tmp->from; + tmp = tmp->from_parent; + } + } + } + if (target_thread) { + e->to_thread = target_thread->pid; + target_list = &target_thread->todo; + target_wait = &target_thread->wait; + } else { + target_list = &target_proc->todo; + target_wait = &target_proc->wait; + } + e->to_proc = target_proc->pid; + + /* TODO: reuse incoming transaction for reply */ + t = kzalloc(sizeof(*t), GFP_KERNEL); + if (t == NULL) { + return_error = BR_FAILED_REPLY; + goto err_alloc_t_failed; + } + binder_stats_created(BINDER_STAT_TRANSACTION); + + tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL); + if (tcomplete == NULL) { + return_error = BR_FAILED_REPLY; + goto err_alloc_tcomplete_failed; + } + binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE); + + t->debug_id = ++binder_last_id; + e->debug_id = t->debug_id; + + if (reply) + binder_debug(BINDER_DEBUG_TRANSACTION, + "binder: %d:%d BC_REPLY %d -> %d:%d, " + "data %p-%p size %zd-%zd\n", + proc->pid, thread->pid, t->debug_id, + target_proc->pid, target_thread->pid, + tr->data.ptr.buffer, tr->data.ptr.offsets, + tr->data_size, tr->offsets_size); + else + binder_debug(BINDER_DEBUG_TRANSACTION, + "binder: %d:%d BC_TRANSACTION %d -> " + "%d - node %d, data %p-%p size %zd-%zd\n", + proc->pid, thread->pid, t->debug_id, + target_proc->pid, target_node->debug_id, + tr->data.ptr.buffer, tr->data.ptr.offsets, + tr->data_size, tr->offsets_size); + + if (!reply && !(tr->flags & TF_ONE_WAY)) + t->from = thread; + else + t->from = NULL; + t->sender_euid = proc->tsk->cred->euid; + t->to_proc = target_proc; + t->to_thread = target_thread; + t->code = tr->code; + t->flags = tr->flags; + t->priority = task_nice(current); + t->buffer = binder_alloc_buf(target_proc, tr->data_size, + tr->offsets_size, !reply && (t->flags & TF_ONE_WAY)); + if (t->buffer == NULL) { + return_error = BR_FAILED_REPLY; + goto err_binder_alloc_buf_failed; + } + t->buffer->allow_user_free = 0; + t->buffer->debug_id = t->debug_id; + t->buffer->transaction = t; + t->buffer->target_node = target_node; + if (target_node) + binder_inc_node(target_node, 1, 0, NULL); + + offp = (size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *))); + + if (copy_from_user(t->buffer->data, tr->data.ptr.buffer, tr->data_size)) { + binder_user_error("binder: %d:%d got transaction with invalid " + "data ptr\n", proc->pid, thread->pid); + return_error = BR_FAILED_REPLY; + goto err_copy_data_failed; + } + if (copy_from_user(offp, tr->data.ptr.offsets, tr->offsets_size)) { + binder_user_error("binder: %d:%d got transaction with invalid " + "offsets ptr\n", proc->pid, thread->pid); + return_error = BR_FAILED_REPLY; + goto err_copy_data_failed; + } + if (!IS_ALIGNED(tr->offsets_size, sizeof(size_t))) { + binder_user_error("binder: %d:%d got transaction with " + "invalid offsets size, %zd\n", + proc->pid, thread->pid, tr->offsets_size); + return_error = BR_FAILED_REPLY; + goto err_bad_offset; + } + off_end = (void *)offp + tr->offsets_size; + for (; offp < off_end; offp++) { + struct flat_binder_object *fp; + if (*offp > t->buffer->data_size - sizeof(*fp) || + t->buffer->data_size < sizeof(*fp) || + !IS_ALIGNED(*offp, sizeof(void *))) { + binder_user_error("binder: %d:%d got transaction with " + "invalid offset, %zd\n", + proc->pid, thread->pid, *offp); + return_error = BR_FAILED_REPLY; + goto err_bad_offset; + } + fp = (struct flat_binder_object *)(t->buffer->data + *offp); + switch (fp->type) { + case BINDER_TYPE_BINDER: + case BINDER_TYPE_WEAK_BINDER: { + struct binder_ref *ref; + struct binder_node *node = binder_get_node(proc, fp->binder); + if (node == NULL) { + node = binder_new_node(proc, fp->binder, fp->cookie); + if (node == NULL) { + return_error = BR_FAILED_REPLY; + goto err_binder_new_node_failed; + } + node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK; + node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS); + } + if (fp->cookie != node->cookie) { + binder_user_error("binder: %d:%d sending u%p " + "node %d, cookie mismatch %p != %p\n", + proc->pid, thread->pid, + fp->binder, node->debug_id, + fp->cookie, node->cookie); + goto err_binder_get_ref_for_node_failed; + } + ref = binder_get_ref_for_node(target_proc, node); + if (ref == NULL) { + return_error = BR_FAILED_REPLY; + goto err_binder_get_ref_for_node_failed; + } + if (fp->type == BINDER_TYPE_BINDER) + fp->type = BINDER_TYPE_HANDLE; + else + fp->type = BINDER_TYPE_WEAK_HANDLE; + fp->handle = ref->desc; + binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE, + &thread->todo); + + binder_debug(BINDER_DEBUG_TRANSACTION, + " node %d u%p -> ref %d desc %d\n", + node->debug_id, node->ptr, ref->debug_id, + ref->desc); + } break; + case BINDER_TYPE_HANDLE: + case BINDER_TYPE_WEAK_HANDLE: { + struct binder_ref *ref = binder_get_ref(proc, fp->handle); + if (ref == NULL) { + binder_user_error("binder: %d:%d got " + "transaction with invalid " + "handle, %ld\n", proc->pid, + thread->pid, fp->handle); + return_error = BR_FAILED_REPLY; + goto err_binder_get_ref_failed; + } + if (ref->node->proc == target_proc) { + if (fp->type == BINDER_TYPE_HANDLE) + fp->type = BINDER_TYPE_BINDER; + else + fp->type = BINDER_TYPE_WEAK_BINDER; + fp->binder = ref->node->ptr; + fp->cookie = ref->node->cookie; + binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL); + binder_debug(BINDER_DEBUG_TRANSACTION, + " ref %d desc %d -> node %d u%p\n", + ref->debug_id, ref->desc, ref->node->debug_id, + ref->node->ptr); + } else { + struct binder_ref *new_ref; + new_ref = binder_get_ref_for_node(target_proc, ref->node); + if (new_ref == NULL) { + return_error = BR_FAILED_REPLY; + goto err_binder_get_ref_for_node_failed; + } + fp->handle = new_ref->desc; + binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL); + binder_debug(BINDER_DEBUG_TRANSACTION, + " ref %d desc %d -> ref %d desc %d (node %d)\n", + ref->debug_id, ref->desc, new_ref->debug_id, + new_ref->desc, ref->node->debug_id); + } + } break; + + case BINDER_TYPE_FD: { + int target_fd; + struct file *file; + + if (reply) { + if (!(in_reply_to->flags & TF_ACCEPT_FDS)) { + binder_user_error("binder: %d:%d got reply with fd, %ld, but target does not allow fds\n", + proc->pid, thread->pid, fp->handle); + return_error = BR_FAILED_REPLY; + goto err_fd_not_allowed; + } + } else if (!target_node->accept_fds) { + binder_user_error("binder: %d:%d got transaction with fd, %ld, but target does not allow fds\n", + proc->pid, thread->pid, fp->handle); + return_error = BR_FAILED_REPLY; + goto err_fd_not_allowed; + } + + file = fget(fp->handle); + if (file == NULL) { + binder_user_error("binder: %d:%d got transaction with invalid fd, %ld\n", + proc->pid, thread->pid, fp->handle); + return_error = BR_FAILED_REPLY; + goto err_fget_failed; + } + target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC); + if (target_fd < 0) { + fput(file); + return_error = BR_FAILED_REPLY; + goto err_get_unused_fd_failed; + } + task_fd_install(target_proc, target_fd, file); + binder_debug(BINDER_DEBUG_TRANSACTION, + " fd %ld -> %d\n", fp->handle, target_fd); + /* TODO: fput? */ + fp->handle = target_fd; + } break; + + default: + binder_user_error("binder: %d:%d got transactio" + "n with invalid object type, %lx\n", + proc->pid, thread->pid, fp->type); + return_error = BR_FAILED_REPLY; + goto err_bad_object_type; + } + } + if (reply) { + BUG_ON(t->buffer->async_transaction != 0); + binder_pop_transaction(target_thread, in_reply_to); + } else if (!(t->flags & TF_ONE_WAY)) { + BUG_ON(t->buffer->async_transaction != 0); + t->need_reply = 1; + t->from_parent = thread->transaction_stack; + thread->transaction_stack = t; + } else { + BUG_ON(target_node == NULL); + BUG_ON(t->buffer->async_transaction != 1); + if (target_node->has_async_transaction) { + target_list = &target_node->async_todo; + target_wait = NULL; + } else + target_node->has_async_transaction = 1; + } + t->work.type = BINDER_WORK_TRANSACTION; + list_add_tail(&t->work.entry, target_list); + tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; + list_add_tail(&tcomplete->entry, &thread->todo); + if (target_wait) + wake_up_interruptible(target_wait); + return; + +err_get_unused_fd_failed: +err_fget_failed: +err_fd_not_allowed: +err_binder_get_ref_for_node_failed: +err_binder_get_ref_failed: +err_binder_new_node_failed: +err_bad_object_type: +err_bad_offset: +err_copy_data_failed: + binder_transaction_buffer_release(target_proc, t->buffer, offp); + t->buffer->transaction = NULL; + binder_free_buf(target_proc, t->buffer); +err_binder_alloc_buf_failed: + kfree(tcomplete); + binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); +err_alloc_tcomplete_failed: + kfree(t); + binder_stats_deleted(BINDER_STAT_TRANSACTION); +err_alloc_t_failed: +err_bad_call_stack: +err_empty_call_stack: +err_dead_binder: +err_invalid_target_handle: +err_no_context_mgr_node: + binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, + "binder: %d:%d transaction failed %d, size %zd-%zd\n", + proc->pid, thread->pid, return_error, + tr->data_size, tr->offsets_size); + + { + struct binder_transaction_log_entry *fe; + fe = binder_transaction_log_add(&binder_transaction_log_failed); + *fe = *e; + } + + BUG_ON(thread->return_error != BR_OK); + if (in_reply_to) { + thread->return_error = BR_TRANSACTION_COMPLETE; + binder_send_failed_reply(in_reply_to, return_error); + } else + thread->return_error = return_error; +} + +int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread, + void __user *buffer, int size, signed long *consumed) +{ + uint32_t cmd; + void __user *ptr = buffer + *consumed; + void __user *end = buffer + size; + + while (ptr < end && thread->return_error == BR_OK) { + if (get_user(cmd, (uint32_t __user *)ptr)) + return -EFAULT; + ptr += sizeof(uint32_t); + if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) { + binder_stats.bc[_IOC_NR(cmd)]++; + proc->stats.bc[_IOC_NR(cmd)]++; + thread->stats.bc[_IOC_NR(cmd)]++; + } + switch (cmd) { + case BC_INCREFS: + case BC_ACQUIRE: + case BC_RELEASE: + case BC_DECREFS: { + uint32_t target; + struct binder_ref *ref; + const char *debug_string; + + if (get_user(target, (uint32_t __user *)ptr)) + return -EFAULT; + ptr += sizeof(uint32_t); + if (target == 0 && binder_context_mgr_node && + (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) { + ref = binder_get_ref_for_node(proc, + binder_context_mgr_node); + if (ref->desc != target) { + binder_user_error("binder: %d:" + "%d tried to acquire " + "reference to desc 0, " + "got %d instead\n", + proc->pid, thread->pid, + ref->desc); + } + } else + ref = binder_get_ref(proc, target); + if (ref == NULL) { + binder_user_error("binder: %d:%d refcou" + "nt change on invalid ref %d\n", + proc->pid, thread->pid, target); + break; + } + switch (cmd) { + case BC_INCREFS: + debug_string = "IncRefs"; + binder_inc_ref(ref, 0, NULL); + break; + case BC_ACQUIRE: + debug_string = "Acquire"; + binder_inc_ref(ref, 1, NULL); + break; + case BC_RELEASE: + debug_string = "Release"; + binder_dec_ref(ref, 1); + break; + case BC_DECREFS: + default: + debug_string = "DecRefs"; + binder_dec_ref(ref, 0); + break; + } + binder_debug(BINDER_DEBUG_USER_REFS, + "binder: %d:%d %s ref %d desc %d s %d w %d for node %d\n", + proc->pid, thread->pid, debug_string, ref->debug_id, + ref->desc, ref->strong, ref->weak, ref->node->debug_id); + break; + } + case BC_INCREFS_DONE: + case BC_ACQUIRE_DONE: { + void __user *node_ptr; + void *cookie; + struct binder_node *node; + + if (get_user(node_ptr, (void * __user *)ptr)) + return -EFAULT; + ptr += sizeof(void *); + if (get_user(cookie, (void * __user *)ptr)) + return -EFAULT; + ptr += sizeof(void *); + node = binder_get_node(proc, node_ptr); + if (node == NULL) { + binder_user_error("binder: %d:%d " + "%s u%p no match\n", + proc->pid, thread->pid, + cmd == BC_INCREFS_DONE ? + "BC_INCREFS_DONE" : + "BC_ACQUIRE_DONE", + node_ptr); + break; + } + if (cookie != node->cookie) { + binder_user_error("binder: %d:%d %s u%p node %d" + " cookie mismatch %p != %p\n", + proc->pid, thread->pid, + cmd == BC_INCREFS_DONE ? + "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", + node_ptr, node->debug_id, + cookie, node->cookie); + break; + } + if (cmd == BC_ACQUIRE_DONE) { + if (node->pending_strong_ref == 0) { + binder_user_error("binder: %d:%d " + "BC_ACQUIRE_DONE node %d has " + "no pending acquire request\n", + proc->pid, thread->pid, + node->debug_id); + break; + } + node->pending_strong_ref = 0; + } else { + if (node->pending_weak_ref == 0) { + binder_user_error("binder: %d:%d " + "BC_INCREFS_DONE node %d has " + "no pending increfs request\n", + proc->pid, thread->pid, + node->debug_id); + break; + } + node->pending_weak_ref = 0; + } + binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0); + binder_debug(BINDER_DEBUG_USER_REFS, + "binder: %d:%d %s node %d ls %d lw %d\n", + proc->pid, thread->pid, + cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", + node->debug_id, node->local_strong_refs, node->local_weak_refs); + break; + } + case BC_ATTEMPT_ACQUIRE: + printk(KERN_ERR "binder: BC_ATTEMPT_ACQUIRE not supported\n"); + return -EINVAL; + case BC_ACQUIRE_RESULT: + printk(KERN_ERR "binder: BC_ACQUIRE_RESULT not supported\n"); + return -EINVAL; + + case BC_FREE_BUFFER: { + void __user *data_ptr; + struct binder_buffer *buffer; + + if (get_user(data_ptr, (void * __user *)ptr)) + return -EFAULT; + ptr += sizeof(void *); + + buffer = binder_buffer_lookup(proc, data_ptr); + if (buffer == NULL) { + binder_user_error("binder: %d:%d " + "BC_FREE_BUFFER u%p no match\n", + proc->pid, thread->pid, data_ptr); + break; + } + if (!buffer->allow_user_free) { + binder_user_error("binder: %d:%d " + "BC_FREE_BUFFER u%p matched " + "unreturned buffer\n", + proc->pid, thread->pid, data_ptr); + break; + } + binder_debug(BINDER_DEBUG_FREE_BUFFER, + "binder: %d:%d BC_FREE_BUFFER u%p found buffer %d for %s transaction\n", + proc->pid, thread->pid, data_ptr, buffer->debug_id, + buffer->transaction ? "active" : "finished"); + + if (buffer->transaction) { + buffer->transaction->buffer = NULL; + buffer->transaction = NULL; + } + if (buffer->async_transaction && buffer->target_node) { + BUG_ON(!buffer->target_node->has_async_transaction); + if (list_empty(&buffer->target_node->async_todo)) + buffer->target_node->has_async_transaction = 0; + else + list_move_tail(buffer->target_node->async_todo.next, &thread->todo); + } + binder_transaction_buffer_release(proc, buffer, NULL); + binder_free_buf(proc, buffer); + break; + } + + case BC_TRANSACTION: + case BC_REPLY: { + struct binder_transaction_data tr; + + if (copy_from_user(&tr, ptr, sizeof(tr))) + return -EFAULT; + ptr += sizeof(tr); + binder_transaction(proc, thread, &tr, cmd == BC_REPLY); + break; + } + + case BC_REGISTER_LOOPER: + binder_debug(BINDER_DEBUG_THREADS, + "binder: %d:%d BC_REGISTER_LOOPER\n", + proc->pid, thread->pid); + if (thread->looper & BINDER_LOOPER_STATE_ENTERED) { + thread->looper |= BINDER_LOOPER_STATE_INVALID; + binder_user_error("binder: %d:%d ERROR:" + " BC_REGISTER_LOOPER called " + "after BC_ENTER_LOOPER\n", + proc->pid, thread->pid); + } else if (proc->requested_threads == 0) { + thread->looper |= BINDER_LOOPER_STATE_INVALID; + binder_user_error("binder: %d:%d ERROR:" + " BC_REGISTER_LOOPER called " + "without request\n", + proc->pid, thread->pid); + } else { + proc->requested_threads--; + proc->requested_threads_started++; + } + thread->looper |= BINDER_LOOPER_STATE_REGISTERED; + break; + case BC_ENTER_LOOPER: + binder_debug(BINDER_DEBUG_THREADS, + "binder: %d:%d BC_ENTER_LOOPER\n", + proc->pid, thread->pid); + if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) { + thread->looper |= BINDER_LOOPER_STATE_INVALID; + binder_user_error("binder: %d:%d ERROR:" + " BC_ENTER_LOOPER called after " + "BC_REGISTER_LOOPER\n", + proc->pid, thread->pid); + } + thread->looper |= BINDER_LOOPER_STATE_ENTERED; + break; + case BC_EXIT_LOOPER: + binder_debug(BINDER_DEBUG_THREADS, + "binder: %d:%d BC_EXIT_LOOPER\n", + proc->pid, thread->pid); + thread->looper |= BINDER_LOOPER_STATE_EXITED; + break; + + case BC_REQUEST_DEATH_NOTIFICATION: + case BC_CLEAR_DEATH_NOTIFICATION: { + uint32_t target; + void __user *cookie; + struct binder_ref *ref; + struct binder_ref_death *death; + + if (get_user(target, (uint32_t __user *)ptr)) + return -EFAULT; + ptr += sizeof(uint32_t); + if (get_user(cookie, (void __user * __user *)ptr)) + return -EFAULT; + ptr += sizeof(void *); + ref = binder_get_ref(proc, target); + if (ref == NULL) { + binder_user_error("binder: %d:%d %s " + "invalid ref %d\n", + proc->pid, thread->pid, + cmd == BC_REQUEST_DEATH_NOTIFICATION ? + "BC_REQUEST_DEATH_NOTIFICATION" : + "BC_CLEAR_DEATH_NOTIFICATION", + target); + break; + } + + binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, + "binder: %d:%d %s %p ref %d desc %d s %d w %d for node %d\n", + proc->pid, thread->pid, + cmd == BC_REQUEST_DEATH_NOTIFICATION ? + "BC_REQUEST_DEATH_NOTIFICATION" : + "BC_CLEAR_DEATH_NOTIFICATION", + cookie, ref->debug_id, ref->desc, + ref->strong, ref->weak, ref->node->debug_id); + + if (cmd == BC_REQUEST_DEATH_NOTIFICATION) { + if (ref->death) { + binder_user_error("binder: %d:%" + "d BC_REQUEST_DEATH_NOTI" + "FICATION death notific" + "ation already set\n", + proc->pid, thread->pid); + break; + } + death = kzalloc(sizeof(*death), GFP_KERNEL); + if (death == NULL) { + thread->return_error = BR_ERROR; + binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, + "binder: %d:%d " + "BC_REQUEST_DEATH_NOTIFICATION failed\n", + proc->pid, thread->pid); + break; + } + binder_stats_created(BINDER_STAT_DEATH); + INIT_LIST_HEAD(&death->work.entry); + death->cookie = cookie; + ref->death = death; + if (ref->node->proc == NULL) { + ref->death->work.type = BINDER_WORK_DEAD_BINDER; + if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { + list_add_tail(&ref->death->work.entry, &thread->todo); + } else { + list_add_tail(&ref->death->work.entry, &proc->todo); + wake_up_interruptible(&proc->wait); + } + } + } else { + if (ref->death == NULL) { + binder_user_error("binder: %d:%" + "d BC_CLEAR_DEATH_NOTIFI" + "CATION death notificat" + "ion not active\n", + proc->pid, thread->pid); + break; + } + death = ref->death; + if (death->cookie != cookie) { + binder_user_error("binder: %d:%" + "d BC_CLEAR_DEATH_NOTIFI" + "CATION death notificat" + "ion cookie mismatch " + "%p != %p\n", + proc->pid, thread->pid, + death->cookie, cookie); + break; + } + ref->death = NULL; + if (list_empty(&death->work.entry)) { + death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; + if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { + list_add_tail(&death->work.entry, &thread->todo); + } else { + list_add_tail(&death->work.entry, &proc->todo); + wake_up_interruptible(&proc->wait); + } + } else { + BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER); + death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR; + } + } + } break; + case BC_DEAD_BINDER_DONE: { + struct binder_work *w; + void __user *cookie; + struct binder_ref_death *death = NULL; + if (get_user(cookie, (void __user * __user *)ptr)) + return -EFAULT; + + ptr += sizeof(void *); + list_for_each_entry(w, &proc->delivered_death, entry) { + struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work); + if (tmp_death->cookie == cookie) { + death = tmp_death; + break; + } + } + binder_debug(BINDER_DEBUG_DEAD_BINDER, + "binder: %d:%d BC_DEAD_BINDER_DONE %p found %p\n", + proc->pid, thread->pid, cookie, death); + if (death == NULL) { + binder_user_error("binder: %d:%d BC_DEAD" + "_BINDER_DONE %p not found\n", + proc->pid, thread->pid, cookie); + break; + } + + list_del_init(&death->work.entry); + if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) { + death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; + if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { + list_add_tail(&death->work.entry, &thread->todo); + } else { + list_add_tail(&death->work.entry, &proc->todo); + wake_up_interruptible(&proc->wait); + } + } + } break; + + default: + printk(KERN_ERR "binder: %d:%d unknown command %d\n", + proc->pid, thread->pid, cmd); + return -EINVAL; + } + *consumed = ptr - buffer; + } + return 0; +} + +void binder_stat_br(struct binder_proc *proc, struct binder_thread *thread, + uint32_t cmd) +{ + if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) { + binder_stats.br[_IOC_NR(cmd)]++; + proc->stats.br[_IOC_NR(cmd)]++; + thread->stats.br[_IOC_NR(cmd)]++; + } +} + +static int binder_has_proc_work(struct binder_proc *proc, + struct binder_thread *thread) +{ + return !list_empty(&proc->todo) || + (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN); +} + +static int binder_has_thread_work(struct binder_thread *thread) +{ + return !list_empty(&thread->todo) || thread->return_error != BR_OK || + (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN); +} + +static int binder_thread_read(struct binder_proc *proc, + struct binder_thread *thread, + void __user *buffer, int size, + signed long *consumed, int non_block) +{ + void __user *ptr = buffer + *consumed; + void __user *end = buffer + size; + + int ret = 0; + int wait_for_proc_work; + + if (*consumed == 0) { + if (put_user(BR_NOOP, (uint32_t __user *)ptr)) + return -EFAULT; + ptr += sizeof(uint32_t); + } + +retry: + wait_for_proc_work = thread->transaction_stack == NULL && + list_empty(&thread->todo); + + if (thread->return_error != BR_OK && ptr < end) { + if (thread->return_error2 != BR_OK) { + if (put_user(thread->return_error2, (uint32_t __user *)ptr)) + return -EFAULT; + ptr += sizeof(uint32_t); + if (ptr == end) + goto done; + thread->return_error2 = BR_OK; + } + if (put_user(thread->return_error, (uint32_t __user *)ptr)) + return -EFAULT; + ptr += sizeof(uint32_t); + thread->return_error = BR_OK; + goto done; + } + + + thread->looper |= BINDER_LOOPER_STATE_WAITING; + if (wait_for_proc_work) + proc->ready_threads++; + mutex_unlock(&binder_lock); + if (wait_for_proc_work) { + if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED | + BINDER_LOOPER_STATE_ENTERED))) { + binder_user_error("binder: %d:%d ERROR: Thread waiting " + "for process work before calling BC_REGISTER_" + "LOOPER or BC_ENTER_LOOPER (state %x)\n", + proc->pid, thread->pid, thread->looper); + wait_event_interruptible(binder_user_error_wait, + binder_stop_on_user_error < 2); + } + binder_set_nice(proc->default_priority); + if (non_block) { + if (!binder_has_proc_work(proc, thread)) + ret = -EAGAIN; + } else + ret = wait_event_interruptible_exclusive(proc->wait, binder_has_proc_work(proc, thread)); + } else { + if (non_block) { + if (!binder_has_thread_work(thread)) + ret = -EAGAIN; + } else + ret = wait_event_interruptible(thread->wait, binder_has_thread_work(thread)); + } + mutex_lock(&binder_lock); + if (wait_for_proc_work) + proc->ready_threads--; + thread->looper &= ~BINDER_LOOPER_STATE_WAITING; + + if (ret) + return ret; + + while (1) { + uint32_t cmd; + struct binder_transaction_data tr; + struct binder_work *w; + struct binder_transaction *t = NULL; + + if (!list_empty(&thread->todo)) + w = list_first_entry(&thread->todo, struct binder_work, entry); + else if (!list_empty(&proc->todo) && wait_for_proc_work) + w = list_first_entry(&proc->todo, struct binder_work, entry); + else { + if (ptr - buffer == 4 && !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) /* no data added */ + goto retry; + break; + } + + if (end - ptr < sizeof(tr) + 4) + break; + + switch (w->type) { + case BINDER_WORK_TRANSACTION: { + t = container_of(w, struct binder_transaction, work); + } break; + case BINDER_WORK_TRANSACTION_COMPLETE: { + cmd = BR_TRANSACTION_COMPLETE; + if (put_user(cmd, (uint32_t __user *)ptr)) + return -EFAULT; + ptr += sizeof(uint32_t); + + binder_stat_br(proc, thread, cmd); + binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE, + "binder: %d:%d BR_TRANSACTION_COMPLETE\n", + proc->pid, thread->pid); + + list_del(&w->entry); + kfree(w); + binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); + } break; + case BINDER_WORK_NODE: { + struct binder_node *node = container_of(w, struct binder_node, work); + uint32_t cmd = BR_NOOP; + const char *cmd_name; + int strong = node->internal_strong_refs || node->local_strong_refs; + int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong; + if (weak && !node->has_weak_ref) { + cmd = BR_INCREFS; + cmd_name = "BR_INCREFS"; + node->has_weak_ref = 1; + node->pending_weak_ref = 1; + node->local_weak_refs++; + } else if (strong && !node->has_strong_ref) { + cmd = BR_ACQUIRE; + cmd_name = "BR_ACQUIRE"; + node->has_strong_ref = 1; + node->pending_strong_ref = 1; + node->local_strong_refs++; + } else if (!strong && node->has_strong_ref) { + cmd = BR_RELEASE; + cmd_name = "BR_RELEASE"; + node->has_strong_ref = 0; + } else if (!weak && node->has_weak_ref) { + cmd = BR_DECREFS; + cmd_name = "BR_DECREFS"; + node->has_weak_ref = 0; + } + if (cmd != BR_NOOP) { + if (put_user(cmd, (uint32_t __user *)ptr)) + return -EFAULT; + ptr += sizeof(uint32_t); + if (put_user(node->ptr, (void * __user *)ptr)) + return -EFAULT; + ptr += sizeof(void *); + if (put_user(node->cookie, (void * __user *)ptr)) + return -EFAULT; + ptr += sizeof(void *); + + binder_stat_br(proc, thread, cmd); + binder_debug(BINDER_DEBUG_USER_REFS, + "binder: %d:%d %s %d u%p c%p\n", + proc->pid, thread->pid, cmd_name, node->debug_id, node->ptr, node->cookie); + } else { + list_del_init(&w->entry); + if (!weak && !strong) { + binder_debug(BINDER_DEBUG_INTERNAL_REFS, + "binder: %d:%d node %d u%p c%p deleted\n", + proc->pid, thread->pid, node->debug_id, + node->ptr, node->cookie); + rb_erase(&node->rb_node, &proc->nodes); + kfree(node); + binder_stats_deleted(BINDER_STAT_NODE); + } else { + binder_debug(BINDER_DEBUG_INTERNAL_REFS, + "binder: %d:%d node %d u%p c%p state unchanged\n", + proc->pid, thread->pid, node->debug_id, node->ptr, + node->cookie); + } + } + } break; + case BINDER_WORK_DEAD_BINDER: + case BINDER_WORK_DEAD_BINDER_AND_CLEAR: + case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { + struct binder_ref_death *death; + uint32_t cmd; + + death = container_of(w, struct binder_ref_death, work); + if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) + cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE; + else + cmd = BR_DEAD_BINDER; + if (put_user(cmd, (uint32_t __user *)ptr)) + return -EFAULT; + ptr += sizeof(uint32_t); + if (put_user(death->cookie, (void * __user *)ptr)) + return -EFAULT; + ptr += sizeof(void *); + binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, + "binder: %d:%d %s %p\n", + proc->pid, thread->pid, + cmd == BR_DEAD_BINDER ? + "BR_DEAD_BINDER" : + "BR_CLEAR_DEATH_NOTIFICATION_DONE", + death->cookie); + + if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) { + list_del(&w->entry); + kfree(death); + binder_stats_deleted(BINDER_STAT_DEATH); + } else + list_move(&w->entry, &proc->delivered_death); + if (cmd == BR_DEAD_BINDER) + goto done; /* DEAD_BINDER notifications can cause transactions */ + } break; + } + + if (!t) + continue; + + BUG_ON(t->buffer == NULL); + if (t->buffer->target_node) { + struct binder_node *target_node = t->buffer->target_node; + tr.target.ptr = target_node->ptr; + tr.cookie = target_node->cookie; + t->saved_priority = task_nice(current); + if (t->priority < target_node->min_priority && + !(t->flags & TF_ONE_WAY)) + binder_set_nice(t->priority); + else if (!(t->flags & TF_ONE_WAY) || + t->saved_priority > target_node->min_priority) + binder_set_nice(target_node->min_priority); + cmd = BR_TRANSACTION; + } else { + tr.target.ptr = NULL; + tr.cookie = NULL; + cmd = BR_REPLY; + } + tr.code = t->code; + tr.flags = t->flags; + tr.sender_euid = t->sender_euid; + + if (t->from) { + struct task_struct *sender = t->from->proc->tsk; + tr.sender_pid = task_tgid_nr_ns(sender, + current->nsproxy->pid_ns); + } else { + tr.sender_pid = 0; + } + + tr.data_size = t->buffer->data_size; + tr.offsets_size = t->buffer->offsets_size; + tr.data.ptr.buffer = (void *)t->buffer->data + + proc->user_buffer_offset; + tr.data.ptr.offsets = tr.data.ptr.buffer + + ALIGN(t->buffer->data_size, + sizeof(void *)); + + if (put_user(cmd, (uint32_t __user *)ptr)) + return -EFAULT; + ptr += sizeof(uint32_t); + if (copy_to_user(ptr, &tr, sizeof(tr))) + return -EFAULT; + ptr += sizeof(tr); + + binder_stat_br(proc, thread, cmd); + binder_debug(BINDER_DEBUG_TRANSACTION, + "binder: %d:%d %s %d %d:%d, cmd %d" + "size %zd-%zd ptr %p-%p\n", + proc->pid, thread->pid, + (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" : + "BR_REPLY", + t->debug_id, t->from ? t->from->proc->pid : 0, + t->from ? t->from->pid : 0, cmd, + t->buffer->data_size, t->buffer->offsets_size, + tr.data.ptr.buffer, tr.data.ptr.offsets); + + list_del(&t->work.entry); + t->buffer->allow_user_free = 1; + if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) { + t->to_parent = thread->transaction_stack; + t->to_thread = thread; + thread->transaction_stack = t; + } else { + t->buffer->transaction = NULL; + kfree(t); + binder_stats_deleted(BINDER_STAT_TRANSACTION); + } + break; + } + +done: + + *consumed = ptr - buffer; + if (proc->requested_threads + proc->ready_threads == 0 && + proc->requested_threads_started < proc->max_threads && + (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | + BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */ + /*spawn a new thread if we leave this out */) { + proc->requested_threads++; + binder_debug(BINDER_DEBUG_THREADS, + "binder: %d:%d BR_SPAWN_LOOPER\n", + proc->pid, thread->pid); + if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer)) + return -EFAULT; + } + return 0; +} + +static void binder_release_work(struct list_head *list) +{ + struct binder_work *w; + while (!list_empty(list)) { + w = list_first_entry(list, struct binder_work, entry); + list_del_init(&w->entry); + switch (w->type) { + case BINDER_WORK_TRANSACTION: { + struct binder_transaction *t; + + t = container_of(w, struct binder_transaction, work); + if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) + binder_send_failed_reply(t, BR_DEAD_REPLY); + } break; + case BINDER_WORK_TRANSACTION_COMPLETE: { + kfree(w); + binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); + } break; + default: + break; + } + } + +} + +static struct binder_thread *binder_get_thread(struct binder_proc *proc) +{ + struct binder_thread *thread = NULL; + struct rb_node *parent = NULL; + struct rb_node **p = &proc->threads.rb_node; + + while (*p) { + parent = *p; + thread = rb_entry(parent, struct binder_thread, rb_node); + + if (current->pid < thread->pid) + p = &(*p)->rb_left; + else if (current->pid > thread->pid) + p = &(*p)->rb_right; + else + break; + } + if (*p == NULL) { + thread = kzalloc(sizeof(*thread), GFP_KERNEL); + if (thread == NULL) + return NULL; + binder_stats_created(BINDER_STAT_THREAD); + thread->proc = proc; + thread->pid = current->pid; + init_waitqueue_head(&thread->wait); + INIT_LIST_HEAD(&thread->todo); + rb_link_node(&thread->rb_node, parent, p); + rb_insert_color(&thread->rb_node, &proc->threads); + thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN; + thread->return_error = BR_OK; + thread->return_error2 = BR_OK; + } + return thread; +} + +static int binder_free_thread(struct binder_proc *proc, + struct binder_thread *thread) +{ + struct binder_transaction *t; + struct binder_transaction *send_reply = NULL; + int active_transactions = 0; + + rb_erase(&thread->rb_node, &proc->threads); + t = thread->transaction_stack; + if (t && t->to_thread == thread) + send_reply = t; + while (t) { + active_transactions++; + binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, + "binder: release %d:%d transaction %d " + "%s, still active\n", proc->pid, thread->pid, + t->debug_id, + (t->to_thread == thread) ? "in" : "out"); + + if (t->to_thread == thread) { + t->to_proc = NULL; + t->to_thread = NULL; + if (t->buffer) { + t->buffer->transaction = NULL; + t->buffer = NULL; + } + t = t->to_parent; + } else if (t->from == thread) { + t->from = NULL; + t = t->from_parent; + } else + BUG(); + } + if (send_reply) + binder_send_failed_reply(send_reply, BR_DEAD_REPLY); + binder_release_work(&thread->todo); + kfree(thread); + binder_stats_deleted(BINDER_STAT_THREAD); + return active_transactions; +} + +static unsigned int binder_poll(struct file *filp, + struct poll_table_struct *wait) +{ + struct binder_proc *proc = filp->private_data; + struct binder_thread *thread = NULL; + int wait_for_proc_work; + + mutex_lock(&binder_lock); + thread = binder_get_thread(proc); + + wait_for_proc_work = thread->transaction_stack == NULL && + list_empty(&thread->todo) && thread->return_error == BR_OK; + mutex_unlock(&binder_lock); + + if (wait_for_proc_work) { + if (binder_has_proc_work(proc, thread)) + return POLLIN; + poll_wait(filp, &proc->wait, wait); + if (binder_has_proc_work(proc, thread)) + return POLLIN; + } else { + if (binder_has_thread_work(thread)) + return POLLIN; + poll_wait(filp, &thread->wait, wait); + if (binder_has_thread_work(thread)) + return POLLIN; + } + return 0; +} + +static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + int ret; + struct binder_proc *proc = filp->private_data; + struct binder_thread *thread; + unsigned int size = _IOC_SIZE(cmd); + void __user *ubuf = (void __user *)arg; + + /*printk(KERN_INFO "binder_ioctl: %d:%d %x %lx\n", proc->pid, current->pid, cmd, arg);*/ + + ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); + if (ret) + return ret; + + mutex_lock(&binder_lock); + thread = binder_get_thread(proc); + if (thread == NULL) { + ret = -ENOMEM; + goto err; + } + + switch (cmd) { + case BINDER_WRITE_READ: { + struct binder_write_read bwr; + if (size != sizeof(struct binder_write_read)) { + ret = -EINVAL; + goto err; + } + if (copy_from_user(&bwr, ubuf, sizeof(bwr))) { + ret = -EFAULT; + goto err; + } + binder_debug(BINDER_DEBUG_READ_WRITE, + "binder: %d:%d write %ld at %08lx, read %ld at %08lx\n", + proc->pid, thread->pid, bwr.write_size, bwr.write_buffer, + bwr.read_size, bwr.read_buffer); + + if (bwr.write_size > 0) { + ret = binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed); + if (ret < 0) { + bwr.read_consumed = 0; + if (copy_to_user(ubuf, &bwr, sizeof(bwr))) + ret = -EFAULT; + goto err; + } + } + if (bwr.read_size > 0) { + ret = binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK); + if (!list_empty(&proc->todo)) + wake_up_interruptible(&proc->wait); + if (ret < 0) { + if (copy_to_user(ubuf, &bwr, sizeof(bwr))) + ret = -EFAULT; + goto err; + } + } + binder_debug(BINDER_DEBUG_READ_WRITE, + "binder: %d:%d wrote %ld of %ld, read return %ld of %ld\n", + proc->pid, thread->pid, bwr.write_consumed, bwr.write_size, + bwr.read_consumed, bwr.read_size); + if (copy_to_user(ubuf, &bwr, sizeof(bwr))) { + ret = -EFAULT; + goto err; + } + break; + } + case BINDER_SET_MAX_THREADS: + if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) { + ret = -EINVAL; + goto err; + } + break; + case BINDER_SET_CONTEXT_MGR: + if (binder_context_mgr_node != NULL) { + printk(KERN_ERR "binder: BINDER_SET_CONTEXT_MGR already set\n"); + ret = -EBUSY; + goto err; + } + if (binder_context_mgr_uid != -1) { + if (binder_context_mgr_uid != current->cred->euid) { + printk(KERN_ERR "binder: BINDER_SET_" + "CONTEXT_MGR bad uid %d != %d\n", + current->cred->euid, + binder_context_mgr_uid); + ret = -EPERM; + goto err; + } + } else + binder_context_mgr_uid = current->cred->euid; + binder_context_mgr_node = binder_new_node(proc, NULL, NULL); + if (binder_context_mgr_node == NULL) { + ret = -ENOMEM; + goto err; + } + binder_context_mgr_node->local_weak_refs++; + binder_context_mgr_node->local_strong_refs++; + binder_context_mgr_node->has_strong_ref = 1; + binder_context_mgr_node->has_weak_ref = 1; + break; + case BINDER_THREAD_EXIT: + binder_debug(BINDER_DEBUG_THREADS, "binder: %d:%d exit\n", + proc->pid, thread->pid); + binder_free_thread(proc, thread); + thread = NULL; + break; + case BINDER_VERSION: + if (size != sizeof(struct binder_version)) { + ret = -EINVAL; + goto err; + } + if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, &((struct binder_version *)ubuf)->protocol_version)) { + ret = -EINVAL; + goto err; + } + break; + default: + ret = -EINVAL; + goto err; + } + ret = 0; +err: + if (thread) + thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN; + mutex_unlock(&binder_lock); + wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); + if (ret && ret != -ERESTARTSYS) + printk(KERN_INFO "binder: %d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret); + return ret; +} + +static void binder_vma_open(struct vm_area_struct *vma) +{ + struct binder_proc *proc = vma->vm_private_data; + binder_debug(BINDER_DEBUG_OPEN_CLOSE, + "binder: %d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", + proc->pid, vma->vm_start, vma->vm_end, + (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, + (unsigned long)pgprot_val(vma->vm_page_prot)); + dump_stack(); +} + +static void binder_vma_close(struct vm_area_struct *vma) +{ + struct binder_proc *proc = vma->vm_private_data; + binder_debug(BINDER_DEBUG_OPEN_CLOSE, + "binder: %d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", + proc->pid, vma->vm_start, vma->vm_end, + (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, + (unsigned long)pgprot_val(vma->vm_page_prot)); + proc->vma = NULL; + binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES); +} + +static struct vm_operations_struct binder_vm_ops = { + .open = binder_vma_open, + .close = binder_vma_close, +}; + +static int binder_mmap(struct file *filp, struct vm_area_struct *vma) +{ + int ret; + struct vm_struct *area; + struct binder_proc *proc = filp->private_data; + const char *failure_string; + struct binder_buffer *buffer; + + if ((vma->vm_end - vma->vm_start) > SZ_4M) + vma->vm_end = vma->vm_start + SZ_4M; + + binder_debug(BINDER_DEBUG_OPEN_CLOSE, + "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n", + proc->pid, vma->vm_start, vma->vm_end, + (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, + (unsigned long)pgprot_val(vma->vm_page_prot)); + + if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) { + ret = -EPERM; + failure_string = "bad vm_flags"; + goto err_bad_arg; + } + vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE; + + if (proc->buffer) { + ret = -EBUSY; + failure_string = "already mapped"; + goto err_already_mapped; + } + + area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP); + if (area == NULL) { + ret = -ENOMEM; + failure_string = "get_vm_area"; + goto err_get_vm_area_failed; + } + proc->buffer = area->addr; + proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer; + +#ifdef CONFIG_CPU_CACHE_VIPT + if (cache_is_vipt_aliasing()) { + while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) { + printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer); + vma->vm_start += PAGE_SIZE; + } + } +#endif + proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL); + if (proc->pages == NULL) { + ret = -ENOMEM; + failure_string = "alloc page array"; + goto err_alloc_pages_failed; + } + proc->buffer_size = vma->vm_end - vma->vm_start; + + vma->vm_ops = &binder_vm_ops; + vma->vm_private_data = proc; + + if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) { + ret = -ENOMEM; + failure_string = "alloc small buf"; + goto err_alloc_small_buf_failed; + } + buffer = proc->buffer; + INIT_LIST_HEAD(&proc->buffers); + list_add(&buffer->entry, &proc->buffers); + buffer->free = 1; + binder_insert_free_buffer(proc, buffer); + proc->free_async_space = proc->buffer_size / 2; + barrier(); + proc->files = get_files_struct(current); + proc->vma = vma; + + /*printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p\n", + proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/ + return 0; + +err_alloc_small_buf_failed: + kfree(proc->pages); + proc->pages = NULL; +err_alloc_pages_failed: + vfree(proc->buffer); + proc->buffer = NULL; +err_get_vm_area_failed: +err_already_mapped: +err_bad_arg: + printk(KERN_ERR "binder_mmap: %d %lx-%lx %s failed %d\n", + proc->pid, vma->vm_start, vma->vm_end, failure_string, ret); + return ret; +} + +static int binder_open(struct inode *nodp, struct file *filp) +{ + struct binder_proc *proc; + + binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n", + current->group_leader->pid, current->pid); + + proc = kzalloc(sizeof(*proc), GFP_KERNEL); + if (proc == NULL) + return -ENOMEM; + get_task_struct(current); + proc->tsk = current; + INIT_LIST_HEAD(&proc->todo); + init_waitqueue_head(&proc->wait); + proc->default_priority = task_nice(current); + mutex_lock(&binder_lock); + binder_stats_created(BINDER_STAT_PROC); + hlist_add_head(&proc->proc_node, &binder_procs); + proc->pid = current->group_leader->pid; + INIT_LIST_HEAD(&proc->delivered_death); + filp->private_data = proc; + mutex_unlock(&binder_lock); + + if (binder_debugfs_dir_entry_proc) { + char strbuf[11]; + snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); + proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO, + binder_debugfs_dir_entry_proc, proc, &binder_proc_fops); + } + + return 0; +} + +static int binder_flush(struct file *filp, fl_owner_t id) +{ + struct binder_proc *proc = filp->private_data; + + binder_defer_work(proc, BINDER_DEFERRED_FLUSH); + + return 0; +} + +static void binder_deferred_flush(struct binder_proc *proc) +{ + struct rb_node *n; + int wake_count = 0; + for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { + struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node); + thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN; + if (thread->looper & BINDER_LOOPER_STATE_WAITING) { + wake_up_interruptible(&thread->wait); + wake_count++; + } + } + wake_up_interruptible_all(&proc->wait); + + binder_debug(BINDER_DEBUG_OPEN_CLOSE, + "binder_flush: %d woke %d threads\n", proc->pid, + wake_count); +} + +static int binder_release(struct inode *nodp, struct file *filp) +{ + struct binder_proc *proc = filp->private_data; + debugfs_remove(proc->debugfs_entry); + binder_defer_work(proc, BINDER_DEFERRED_RELEASE); + + return 0; +} + +static void binder_deferred_release(struct binder_proc *proc) +{ + struct hlist_node *pos; + struct binder_transaction *t; + struct rb_node *n; + int threads, nodes, incoming_refs, outgoing_refs, buffers, active_transactions, page_count; + + BUG_ON(proc->vma); + BUG_ON(proc->files); + + hlist_del(&proc->proc_node); + if (binder_context_mgr_node && binder_context_mgr_node->proc == proc) { + binder_debug(BINDER_DEBUG_DEAD_BINDER, + "binder_release: %d context_mgr_node gone\n", + proc->pid); + binder_context_mgr_node = NULL; + } + + threads = 0; + active_transactions = 0; + while ((n = rb_first(&proc->threads))) { + struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node); + threads++; + active_transactions += binder_free_thread(proc, thread); + } + nodes = 0; + incoming_refs = 0; + while ((n = rb_first(&proc->nodes))) { + struct binder_node *node = rb_entry(n, struct binder_node, rb_node); + + nodes++; + rb_erase(&node->rb_node, &proc->nodes); + list_del_init(&node->work.entry); + if (hlist_empty(&node->refs)) { + kfree(node); + binder_stats_deleted(BINDER_STAT_NODE); + } else { + struct binder_ref *ref; + int death = 0; + + node->proc = NULL; + node->local_strong_refs = 0; + node->local_weak_refs = 0; + hlist_add_head(&node->dead_node, &binder_dead_nodes); + + hlist_for_each_entry(ref, pos, &node->refs, node_entry) { + incoming_refs++; + if (ref->death) { + death++; + if (list_empty(&ref->death->work.entry)) { + ref->death->work.type = BINDER_WORK_DEAD_BINDER; + list_add_tail(&ref->death->work.entry, &ref->proc->todo); + wake_up_interruptible(&ref->proc->wait); + } else + BUG(); + } + } + binder_debug(BINDER_DEBUG_DEAD_BINDER, + "binder: node %d now dead, " + "refs %d, death %d\n", node->debug_id, + incoming_refs, death); + } + } + outgoing_refs = 0; + while ((n = rb_first(&proc->refs_by_desc))) { + struct binder_ref *ref = rb_entry(n, struct binder_ref, + rb_node_desc); + outgoing_refs++; + binder_delete_ref(ref); + } + binder_release_work(&proc->todo); + buffers = 0; + + while ((n = rb_first(&proc->allocated_buffers))) { + struct binder_buffer *buffer = rb_entry(n, struct binder_buffer, + rb_node); + t = buffer->transaction; + if (t) { + t->buffer = NULL; + buffer->transaction = NULL; + printk(KERN_ERR "binder: release proc %d, " + "transaction %d, not freed\n", + proc->pid, t->debug_id); + /*BUG();*/ + } + binder_free_buf(proc, buffer); + buffers++; + } + + binder_stats_deleted(BINDER_STAT_PROC); + + page_count = 0; + if (proc->pages) { + int i; + for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) { + if (proc->pages[i]) { + void *page_addr = proc->buffer + i * PAGE_SIZE; + binder_debug(BINDER_DEBUG_BUFFER_ALLOC, + "binder_release: %d: " + "page %d at %p not freed\n", + proc->pid, i, + page_addr); + unmap_kernel_range((unsigned long)page_addr, + PAGE_SIZE); + __free_page(proc->pages[i]); + page_count++; + } + } + kfree(proc->pages); + vfree(proc->buffer); + } + + put_task_struct(proc->tsk); + + binder_debug(BINDER_DEBUG_OPEN_CLOSE, + "binder_release: %d threads %d, nodes %d (ref %d), " + "refs %d, active transactions %d, buffers %d, " + "pages %d\n", + proc->pid, threads, nodes, incoming_refs, outgoing_refs, + active_transactions, buffers, page_count); + + kfree(proc); +} + +static void binder_deferred_func(struct work_struct *work) +{ + struct binder_proc *proc; + struct files_struct *files; + + int defer; + do { + mutex_lock(&binder_lock); + mutex_lock(&binder_deferred_lock); + if (!hlist_empty(&binder_deferred_list)) { + proc = hlist_entry(binder_deferred_list.first, + struct binder_proc, deferred_work_node); + hlist_del_init(&proc->deferred_work_node); + defer = proc->deferred_work; + proc->deferred_work = 0; + } else { + proc = NULL; + defer = 0; + } + mutex_unlock(&binder_deferred_lock); + + files = NULL; + if (defer & BINDER_DEFERRED_PUT_FILES) { + files = proc->files; + if (files) + proc->files = NULL; + } + + if (defer & BINDER_DEFERRED_FLUSH) + binder_deferred_flush(proc); + + if (defer & BINDER_DEFERRED_RELEASE) + binder_deferred_release(proc); /* frees proc */ + + mutex_unlock(&binder_lock); + if (files) + put_files_struct(files); + } while (proc); +} +static DECLARE_WORK(binder_deferred_work, binder_deferred_func); + +static void +binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer) +{ + mutex_lock(&binder_deferred_lock); + proc->deferred_work |= defer; + if (hlist_unhashed(&proc->deferred_work_node)) { + hlist_add_head(&proc->deferred_work_node, + &binder_deferred_list); + queue_work(binder_deferred_workqueue, &binder_deferred_work); + } + mutex_unlock(&binder_deferred_lock); +} + +static void print_binder_transaction(struct seq_file *m, const char *prefix, + struct binder_transaction *t) +{ + seq_printf(m, + "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d", + prefix, t->debug_id, t, + t->from ? t->from->proc->pid : 0, + t->from ? t->from->pid : 0, + t->to_proc ? t->to_proc->pid : 0, + t->to_thread ? t->to_thread->pid : 0, + t->code, t->flags, t->priority, t->need_reply); + if (t->buffer == NULL) { + seq_puts(m, " buffer free\n"); + return; + } + if (t->buffer->target_node) + seq_printf(m, " node %d", + t->buffer->target_node->debug_id); + seq_printf(m, " size %zd:%zd data %p\n", + t->buffer->data_size, t->buffer->offsets_size, + t->buffer->data); +} + +static void print_binder_buffer(struct seq_file *m, const char *prefix, + struct binder_buffer *buffer) +{ + seq_printf(m, "%s %d: %p size %zd:%zd %s\n", + prefix, buffer->debug_id, buffer->data, + buffer->data_size, buffer->offsets_size, + buffer->transaction ? "active" : "delivered"); +} + +static void print_binder_work(struct seq_file *m, const char *prefix, + const char *transaction_prefix, + struct binder_work *w) +{ + struct binder_node *node; + struct binder_transaction *t; + + switch (w->type) { + case BINDER_WORK_TRANSACTION: + t = container_of(w, struct binder_transaction, work); + print_binder_transaction(m, transaction_prefix, t); + break; + case BINDER_WORK_TRANSACTION_COMPLETE: + seq_printf(m, "%stransaction complete\n", prefix); + break; + case BINDER_WORK_NODE: + node = container_of(w, struct binder_node, work); + seq_printf(m, "%snode work %d: u%p c%p\n", + prefix, node->debug_id, node->ptr, node->cookie); + break; + case BINDER_WORK_DEAD_BINDER: + seq_printf(m, "%shas dead binder\n", prefix); + break; + case BINDER_WORK_DEAD_BINDER_AND_CLEAR: + seq_printf(m, "%shas cleared dead binder\n", prefix); + break; + case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: + seq_printf(m, "%shas cleared death notification\n", prefix); + break; + default: + seq_printf(m, "%sunknown work: type %d\n", prefix, w->type); + break; + } +} + +static void print_binder_thread(struct seq_file *m, + struct binder_thread *thread, + int print_always) +{ + struct binder_transaction *t; + struct binder_work *w; + size_t start_pos = m->count; + size_t header_pos; + + seq_printf(m, " thread %d: l %02x\n", thread->pid, thread->looper); + header_pos = m->count; + t = thread->transaction_stack; + while (t) { + if (t->from == thread) { + print_binder_transaction(m, + " outgoing transaction", t); + t = t->from_parent; + } else if (t->to_thread == thread) { + print_binder_transaction(m, + " incoming transaction", t); + t = t->to_parent; + } else { + print_binder_transaction(m, " bad transaction", t); + t = NULL; + } + } + list_for_each_entry(w, &thread->todo, entry) { + print_binder_work(m, " ", " pending transaction", w); + } + if (!print_always && m->count == header_pos) + m->count = start_pos; +} + +static void print_binder_node(struct seq_file *m, struct binder_node *node) +{ + struct binder_ref *ref; + struct hlist_node *pos; + struct binder_work *w; + int count; + + count = 0; + hlist_for_each_entry(ref, pos, &node->refs, node_entry) + count++; + + seq_printf(m, " node %d: u%p c%p hs %d hw %d ls %d lw %d is %d iw %d", + node->debug_id, node->ptr, node->cookie, + node->has_strong_ref, node->has_weak_ref, + node->local_strong_refs, node->local_weak_refs, + node->internal_strong_refs, count); + if (count) { + seq_puts(m, " proc"); + hlist_for_each_entry(ref, pos, &node->refs, node_entry) + seq_printf(m, " %d", ref->proc->pid); + } + seq_puts(m, "\n"); + list_for_each_entry(w, &node->async_todo, entry) + print_binder_work(m, " ", + " pending async transaction", w); +} + +static void print_binder_ref(struct seq_file *m, struct binder_ref *ref) +{ + seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %p\n", + ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ", + ref->node->debug_id, ref->strong, ref->weak, ref->death); +} + +static void print_binder_proc(struct seq_file *m, + struct binder_proc *proc, int print_all) +{ + struct binder_work *w; + struct rb_node *n; + size_t start_pos = m->count; + size_t header_pos; + + seq_printf(m, "proc %d\n", proc->pid); + header_pos = m->count; + + for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) + print_binder_thread(m, rb_entry(n, struct binder_thread, + rb_node), print_all); + for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { + struct binder_node *node = rb_entry(n, struct binder_node, + rb_node); + if (print_all || node->has_async_transaction) + print_binder_node(m, node); + } + if (print_all) { + for (n = rb_first(&proc->refs_by_desc); + n != NULL; + n = rb_next(n)) + print_binder_ref(m, rb_entry(n, struct binder_ref, + rb_node_desc)); + } + for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n)) + print_binder_buffer(m, " buffer", + rb_entry(n, struct binder_buffer, rb_node)); + list_for_each_entry(w, &proc->todo, entry) + print_binder_work(m, " ", " pending transaction", w); + list_for_each_entry(w, &proc->delivered_death, entry) { + seq_puts(m, " has delivered dead binder\n"); + break; + } + if (!print_all && m->count == header_pos) + m->count = start_pos; +} + +static const char *binder_return_strings[] = { + "BR_ERROR", + "BR_OK", + "BR_TRANSACTION", + "BR_REPLY", + "BR_ACQUIRE_RESULT", + "BR_DEAD_REPLY", + "BR_TRANSACTION_COMPLETE", + "BR_INCREFS", + "BR_ACQUIRE", + "BR_RELEASE", + "BR_DECREFS", + "BR_ATTEMPT_ACQUIRE", + "BR_NOOP", + "BR_SPAWN_LOOPER", + "BR_FINISHED", + "BR_DEAD_BINDER", + "BR_CLEAR_DEATH_NOTIFICATION_DONE", + "BR_FAILED_REPLY" +}; + +static const char *binder_command_strings[] = { + "BC_TRANSACTION", + "BC_REPLY", + "BC_ACQUIRE_RESULT", + "BC_FREE_BUFFER", + "BC_INCREFS", + "BC_ACQUIRE", + "BC_RELEASE", + "BC_DECREFS", + "BC_INCREFS_DONE", + "BC_ACQUIRE_DONE", + "BC_ATTEMPT_ACQUIRE", + "BC_REGISTER_LOOPER", + "BC_ENTER_LOOPER", + "BC_EXIT_LOOPER", + "BC_REQUEST_DEATH_NOTIFICATION", + "BC_CLEAR_DEATH_NOTIFICATION", + "BC_DEAD_BINDER_DONE" +}; + +static const char *binder_objstat_strings[] = { + "proc", + "thread", + "node", + "ref", + "death", + "transaction", + "transaction_complete" +}; + +static void print_binder_stats(struct seq_file *m, const char *prefix, + struct binder_stats *stats) +{ + int i; + + BUILD_BUG_ON(ARRAY_SIZE(stats->bc) != + ARRAY_SIZE(binder_command_strings)); + for (i = 0; i < ARRAY_SIZE(stats->bc); i++) { + if (stats->bc[i]) + seq_printf(m, "%s%s: %d\n", prefix, + binder_command_strings[i], stats->bc[i]); + } + + BUILD_BUG_ON(ARRAY_SIZE(stats->br) != + ARRAY_SIZE(binder_return_strings)); + for (i = 0; i < ARRAY_SIZE(stats->br); i++) { + if (stats->br[i]) + seq_printf(m, "%s%s: %d\n", prefix, + binder_return_strings[i], stats->br[i]); + } + + BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != + ARRAY_SIZE(binder_objstat_strings)); + BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != + ARRAY_SIZE(stats->obj_deleted)); + for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) { + if (stats->obj_created[i] || stats->obj_deleted[i]) + seq_printf(m, "%s%s: active %d total %d\n", prefix, + binder_objstat_strings[i], + stats->obj_created[i] - stats->obj_deleted[i], + stats->obj_created[i]); + } +} + +static void print_binder_proc_stats(struct seq_file *m, + struct binder_proc *proc) +{ + struct binder_work *w; + struct rb_node *n; + int count, strong, weak; + + seq_printf(m, "proc %d\n", proc->pid); + count = 0; + for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) + count++; + seq_printf(m, " threads: %d\n", count); + seq_printf(m, " requested threads: %d+%d/%d\n" + " ready threads %d\n" + " free async space %zd\n", proc->requested_threads, + proc->requested_threads_started, proc->max_threads, + proc->ready_threads, proc->free_async_space); + count = 0; + for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) + count++; + seq_printf(m, " nodes: %d\n", count); + count = 0; + strong = 0; + weak = 0; + for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { + struct binder_ref *ref = rb_entry(n, struct binder_ref, + rb_node_desc); + count++; + strong += ref->strong; + weak += ref->weak; + } + seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak); + + count = 0; + for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n)) + count++; + seq_printf(m, " buffers: %d\n", count); + + count = 0; + list_for_each_entry(w, &proc->todo, entry) { + switch (w->type) { + case BINDER_WORK_TRANSACTION: + count++; + break; + default: + break; + } + } + seq_printf(m, " pending transactions: %d\n", count); + + print_binder_stats(m, " ", &proc->stats); +} + + +static int binder_state_show(struct seq_file *m, void *unused) +{ + struct binder_proc *proc; + struct hlist_node *pos; + struct binder_node *node; + int do_lock = !binder_debug_no_lock; + + if (do_lock) + mutex_lock(&binder_lock); + + seq_puts(m, "binder state:\n"); + + if (!hlist_empty(&binder_dead_nodes)) + seq_puts(m, "dead nodes:\n"); + hlist_for_each_entry(node, pos, &binder_dead_nodes, dead_node) + print_binder_node(m, node); + + hlist_for_each_entry(proc, pos, &binder_procs, proc_node) + print_binder_proc(m, proc, 1); + if (do_lock) + mutex_unlock(&binder_lock); + return 0; +} + +static int binder_stats_show(struct seq_file *m, void *unused) +{ + struct binder_proc *proc; + struct hlist_node *pos; + int do_lock = !binder_debug_no_lock; + + if (do_lock) + mutex_lock(&binder_lock); + + seq_puts(m, "binder stats:\n"); + + print_binder_stats(m, "", &binder_stats); + + hlist_for_each_entry(proc, pos, &binder_procs, proc_node) + print_binder_proc_stats(m, proc); + if (do_lock) + mutex_unlock(&binder_lock); + return 0; +} + +static int binder_transactions_show(struct seq_file *m, void *unused) +{ + struct binder_proc *proc; + struct hlist_node *pos; + int do_lock = !binder_debug_no_lock; + + if (do_lock) + mutex_lock(&binder_lock); + + seq_puts(m, "binder transactions:\n"); + hlist_for_each_entry(proc, pos, &binder_procs, proc_node) + print_binder_proc(m, proc, 0); + if (do_lock) + mutex_unlock(&binder_lock); + return 0; +} + +static int binder_proc_show(struct seq_file *m, void *unused) +{ + struct binder_proc *proc = m->private; + int do_lock = !binder_debug_no_lock; + + if (do_lock) + mutex_lock(&binder_lock); + seq_puts(m, "binder proc state:\n"); + print_binder_proc(m, proc, 1); + if (do_lock) + mutex_unlock(&binder_lock); + return 0; +} + +static void print_binder_transaction_log_entry(struct seq_file *m, + struct binder_transaction_log_entry *e) +{ + seq_printf(m, + "%d: %s from %d:%d to %d:%d node %d handle %d size %d:%d\n", + e->debug_id, (e->call_type == 2) ? "reply" : + ((e->call_type == 1) ? "async" : "call "), e->from_proc, + e->from_thread, e->to_proc, e->to_thread, e->to_node, + e->target_handle, e->data_size, e->offsets_size); +} + +static int binder_transaction_log_show(struct seq_file *m, void *unused) +{ + struct binder_transaction_log *log = m->private; + int i; + + if (log->full) { + for (i = log->next; i < ARRAY_SIZE(log->entry); i++) + print_binder_transaction_log_entry(m, &log->entry[i]); + } + for (i = 0; i < log->next; i++) + print_binder_transaction_log_entry(m, &log->entry[i]); + return 0; +} + +static const struct file_operations binder_fops = { + .owner = THIS_MODULE, + .poll = binder_poll, + .unlocked_ioctl = binder_ioctl, + .mmap = binder_mmap, + .open = binder_open, + .flush = binder_flush, + .release = binder_release, +}; + +static struct miscdevice binder_miscdev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "binder", + .fops = &binder_fops +}; + +BINDER_DEBUG_ENTRY(state); +BINDER_DEBUG_ENTRY(stats); +BINDER_DEBUG_ENTRY(transactions); +BINDER_DEBUG_ENTRY(transaction_log); + +static int __init binder_init(void) +{ + int ret; + + binder_deferred_workqueue = create_singlethread_workqueue("binder"); + if (!binder_deferred_workqueue) + return -ENOMEM; + + binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL); + if (binder_debugfs_dir_entry_root) + binder_debugfs_dir_entry_proc = debugfs_create_dir("proc", + binder_debugfs_dir_entry_root); + ret = misc_register(&binder_miscdev); + if (binder_debugfs_dir_entry_root) { + debugfs_create_file("state", + S_IRUGO, + binder_debugfs_dir_entry_root, + NULL, + &binder_state_fops); + debugfs_create_file("stats", + S_IRUGO, + binder_debugfs_dir_entry_root, + NULL, + &binder_stats_fops); + debugfs_create_file("transactions", + S_IRUGO, + binder_debugfs_dir_entry_root, + NULL, + &binder_transactions_fops); + debugfs_create_file("transaction_log", + S_IRUGO, + binder_debugfs_dir_entry_root, + &binder_transaction_log, + &binder_transaction_log_fops); + debugfs_create_file("failed_transaction_log", + S_IRUGO, + binder_debugfs_dir_entry_root, + &binder_transaction_log_failed, + &binder_transaction_log_fops); + } + return ret; +} + +device_initcall(binder_init); + +MODULE_LICENSE("GPL v2"); diff --git a/drivers/staging/android/binder.h b/drivers/staging/android/binder.h new file mode 100644 index 000000000000..863ae1ad5d55 --- /dev/null +++ b/drivers/staging/android/binder.h @@ -0,0 +1,330 @@ +/* + * Copyright (C) 2008 Google, Inc. + * + * Based on, but no longer compatible with, the original + * OpenBinder.org binder driver interface, which is: + * + * Copyright (c) 2005 Palmsource, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _LINUX_BINDER_H +#define _LINUX_BINDER_H + +#include <linux/ioctl.h> + +#define B_PACK_CHARS(c1, c2, c3, c4) \ + ((((c1)<<24)) | (((c2)<<16)) | (((c3)<<8)) | (c4)) +#define B_TYPE_LARGE 0x85 + +enum { + BINDER_TYPE_BINDER = B_PACK_CHARS('s', 'b', '*', B_TYPE_LARGE), + BINDER_TYPE_WEAK_BINDER = B_PACK_CHARS('w', 'b', '*', B_TYPE_LARGE), + BINDER_TYPE_HANDLE = B_PACK_CHARS('s', 'h', '*', B_TYPE_LARGE), + BINDER_TYPE_WEAK_HANDLE = B_PACK_CHARS('w', 'h', '*', B_TYPE_LARGE), + BINDER_TYPE_FD = B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE), +}; + +enum { + FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff, + FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100, +}; + +/* + * This is the flattened representation of a Binder object for transfer + * between processes. The 'offsets' supplied as part of a binder transaction + * contains offsets into the data where these structures occur. The Binder + * driver takes care of re-writing the structure type and data as it moves + * between processes. + */ +struct flat_binder_object { + /* 8 bytes for large_flat_header. */ + unsigned long type; + unsigned long flags; + + /* 8 bytes of data. */ + union { + void *binder; /* local object */ + signed long handle; /* remote object */ + }; + + /* extra data associated with local object */ + void *cookie; +}; + +/* + * On 64-bit platforms where user code may run in 32-bits the driver must + * translate the buffer (and local binder) addresses apropriately. + */ + +struct binder_write_read { + signed long write_size; /* bytes to write */ + signed long write_consumed; /* bytes consumed by driver */ + unsigned long write_buffer; + signed long read_size; /* bytes to read */ + signed long read_consumed; /* bytes consumed by driver */ + unsigned long read_buffer; +}; + +/* Use with BINDER_VERSION, driver fills in fields. */ +struct binder_version { + /* driver protocol version -- increment with incompatible change */ + signed long protocol_version; +}; + +/* This is the current protocol version. */ +#define BINDER_CURRENT_PROTOCOL_VERSION 7 + +#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read) +#define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, int64_t) +#define BINDER_SET_MAX_THREADS _IOW('b', 5, size_t) +#define BINDER_SET_IDLE_PRIORITY _IOW('b', 6, int) +#define BINDER_SET_CONTEXT_MGR _IOW('b', 7, int) +#define BINDER_THREAD_EXIT _IOW('b', 8, int) +#define BINDER_VERSION _IOWR('b', 9, struct binder_version) + +/* + * NOTE: Two special error codes you should check for when calling + * in to the driver are: + * + * EINTR -- The operation has been interupted. This should be + * handled by retrying the ioctl() until a different error code + * is returned. + * + * ECONNREFUSED -- The driver is no longer accepting operations + * from your process. That is, the process is being destroyed. + * You should handle this by exiting from your process. Note + * that once this error code is returned, all further calls to + * the driver from any thread will return this same code. + */ + +enum transaction_flags { + TF_ONE_WAY = 0x01, /* this is a one-way call: async, no return */ + TF_ROOT_OBJECT = 0x04, /* contents are the component's root object */ + TF_STATUS_CODE = 0x08, /* contents are a 32-bit status code */ + TF_ACCEPT_FDS = 0x10, /* allow replies with file descriptors */ +}; + +struct binder_transaction_data { + /* The first two are only used for bcTRANSACTION and brTRANSACTION, + * identifying the target and contents of the transaction. + */ + union { + size_t handle; /* target descriptor of command transaction */ + void *ptr; /* target descriptor of return transaction */ + } target; + void *cookie; /* target object cookie */ + unsigned int code; /* transaction command */ + + /* General information about the transaction. */ + unsigned int flags; + pid_t sender_pid; + uid_t sender_euid; + size_t data_size; /* number of bytes of data */ + size_t offsets_size; /* number of bytes of offsets */ + + /* If this transaction is inline, the data immediately + * follows here; otherwise, it ends with a pointer to + * the data buffer. + */ + union { + struct { + /* transaction data */ + const void *buffer; + /* offsets from buffer to flat_binder_object structs */ + const void *offsets; + } ptr; + uint8_t buf[8]; + } data; +}; + +struct binder_ptr_cookie { + void *ptr; + void *cookie; +}; + +struct binder_pri_desc { + int priority; + int desc; +}; + +struct binder_pri_ptr_cookie { + int priority; + void *ptr; + void *cookie; +}; + +enum BinderDriverReturnProtocol { + BR_ERROR = _IOR('r', 0, int), + /* + * int: error code + */ + + BR_OK = _IO('r', 1), + /* No parameters! */ + + BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data), + BR_REPLY = _IOR('r', 3, struct binder_transaction_data), + /* + * binder_transaction_data: the received command. + */ + + BR_ACQUIRE_RESULT = _IOR('r', 4, int), + /* + * not currently supported + * int: 0 if the last bcATTEMPT_ACQUIRE was not successful. + * Else the remote object has acquired a primary reference. + */ + + BR_DEAD_REPLY = _IO('r', 5), + /* + * The target of the last transaction (either a bcTRANSACTION or + * a bcATTEMPT_ACQUIRE) is no longer with us. No parameters. + */ + + BR_TRANSACTION_COMPLETE = _IO('r', 6), + /* + * No parameters... always refers to the last transaction requested + * (including replies). Note that this will be sent even for + * asynchronous transactions. + */ + + BR_INCREFS = _IOR('r', 7, struct binder_ptr_cookie), + BR_ACQUIRE = _IOR('r', 8, struct binder_ptr_cookie), + BR_RELEASE = _IOR('r', 9, struct binder_ptr_cookie), + BR_DECREFS = _IOR('r', 10, struct binder_ptr_cookie), + /* + * void *: ptr to binder + * void *: cookie for binder + */ + + BR_ATTEMPT_ACQUIRE = _IOR('r', 11, struct binder_pri_ptr_cookie), + /* + * not currently supported + * int: priority + * void *: ptr to binder + * void *: cookie for binder + */ + + BR_NOOP = _IO('r', 12), + /* + * No parameters. Do nothing and examine the next command. It exists + * primarily so that we can replace it with a BR_SPAWN_LOOPER command. + */ + + BR_SPAWN_LOOPER = _IO('r', 13), + /* + * No parameters. The driver has determined that a process has no + * threads waiting to service incomming transactions. When a process + * receives this command, it must spawn a new service thread and + * register it via bcENTER_LOOPER. + */ + + BR_FINISHED = _IO('r', 14), + /* + * not currently supported + * stop threadpool thread + */ + + BR_DEAD_BINDER = _IOR('r', 15, void *), + /* + * void *: cookie + */ + BR_CLEAR_DEATH_NOTIFICATION_DONE = _IOR('r', 16, void *), + /* + * void *: cookie + */ + + BR_FAILED_REPLY = _IO('r', 17), + /* + * The the last transaction (either a bcTRANSACTION or + * a bcATTEMPT_ACQUIRE) failed (e.g. out of memory). No parameters. + */ +}; + +enum BinderDriverCommandProtocol { + BC_TRANSACTION = _IOW('c', 0, struct binder_transaction_data), + BC_REPLY = _IOW('c', 1, struct binder_transaction_data), + /* + * binder_transaction_data: the sent command. + */ + + BC_ACQUIRE_RESULT = _IOW('c', 2, int), + /* + * not currently supported + * int: 0 if the last BR_ATTEMPT_ACQUIRE was not successful. + * Else you have acquired a primary reference on the object. + */ + + BC_FREE_BUFFER = _IOW('c', 3, int), + /* + * void *: ptr to transaction data received on a read + */ + + BC_INCREFS = _IOW('c', 4, int), + BC_ACQUIRE = _IOW('c', 5, int), + BC_RELEASE = _IOW('c', 6, int), + BC_DECREFS = _IOW('c', 7, int), + /* + * int: descriptor + */ + + BC_INCREFS_DONE = _IOW('c', 8, struct binder_ptr_cookie), + BC_ACQUIRE_DONE = _IOW('c', 9, struct binder_ptr_cookie), + /* + * void *: ptr to binder + * void *: cookie for binder + */ + + BC_ATTEMPT_ACQUIRE = _IOW('c', 10, struct binder_pri_desc), + /* + * not currently supported + * int: priority + * int: descriptor + */ + + BC_REGISTER_LOOPER = _IO('c', 11), + /* + * No parameters. + * Register a spawned looper thread with the device. + */ + + BC_ENTER_LOOPER = _IO('c', 12), + BC_EXIT_LOOPER = _IO('c', 13), + /* + * No parameters. + * These two commands are sent as an application-level thread + * enters and exits the binder loop, respectively. They are + * used so the binder can have an accurate count of the number + * of looping threads it has available. + */ + + BC_REQUEST_DEATH_NOTIFICATION = _IOW('c', 14, struct binder_ptr_cookie), + /* + * void *: ptr to binder + * void *: cookie + */ + + BC_CLEAR_DEATH_NOTIFICATION = _IOW('c', 15, struct binder_ptr_cookie), + /* + * void *: ptr to binder + * void *: cookie + */ + + BC_DEAD_BINDER_DONE = _IOW('c', 16, void *), + /* + * void *: cookie + */ +}; + +#endif /* _LINUX_BINDER_H */ + diff --git a/drivers/staging/android/logger.c b/drivers/staging/android/logger.c new file mode 100644 index 000000000000..531bdbeede89 --- /dev/null +++ b/drivers/staging/android/logger.c @@ -0,0 +1,616 @@ +/* + * drivers/misc/logger.c + * + * A Logging Subsystem + * + * Copyright (C) 2007-2008 Google, Inc. + * + * Robert Love <rlove@google.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/sched.h> +#include <linux/module.h> +#include <linux/fs.h> +#include <linux/miscdevice.h> +#include <linux/uaccess.h> +#include <linux/poll.h> +#include <linux/slab.h> +#include <linux/time.h> +#include "logger.h" + +#include <asm/ioctls.h> + +/* + * struct logger_log - represents a specific log, such as 'main' or 'radio' + * + * This structure lives from module insertion until module removal, so it does + * not need additional reference counting. The structure is protected by the + * mutex 'mutex'. + */ +struct logger_log { + unsigned char *buffer;/* the ring buffer itself */ + struct miscdevice misc; /* misc device representing the log */ + wait_queue_head_t wq; /* wait queue for readers */ + struct list_head readers; /* this log's readers */ + struct mutex mutex; /* mutex protecting buffer */ + size_t w_off; /* current write head offset */ + size_t head; /* new readers start here */ + size_t size; /* size of the log */ +}; + +/* + * struct logger_reader - a logging device open for reading + * + * This object lives from open to release, so we don't need additional + * reference counting. The structure is protected by log->mutex. + */ +struct logger_reader { + struct logger_log *log; /* associated log */ + struct list_head list; /* entry in logger_log's list */ + size_t r_off; /* current read head offset */ +}; + +/* logger_offset - returns index 'n' into the log via (optimized) modulus */ +#define logger_offset(n) ((n) & (log->size - 1)) + +/* + * file_get_log - Given a file structure, return the associated log + * + * This isn't aesthetic. We have several goals: + * + * 1) Need to quickly obtain the associated log during an I/O operation + * 2) Readers need to maintain state (logger_reader) + * 3) Writers need to be very fast (open() should be a near no-op) + * + * In the reader case, we can trivially go file->logger_reader->logger_log. + * For a writer, we don't want to maintain a logger_reader, so we just go + * file->logger_log. Thus what file->private_data points at depends on whether + * or not the file was opened for reading. This function hides that dirtiness. + */ +static inline struct logger_log *file_get_log(struct file *file) +{ + if (file->f_mode & FMODE_READ) { + struct logger_reader *reader = file->private_data; + return reader->log; + } else + return file->private_data; +} + +/* + * get_entry_len - Grabs the length of the payload of the next entry starting + * from 'off'. + * + * Caller needs to hold log->mutex. + */ +static __u32 get_entry_len(struct logger_log *log, size_t off) +{ + __u16 val; + + switch (log->size - off) { + case 1: + memcpy(&val, log->buffer + off, 1); + memcpy(((char *) &val) + 1, log->buffer, 1); + break; + default: + memcpy(&val, log->buffer + off, 2); + } + + return sizeof(struct logger_entry) + val; +} + +/* + * do_read_log_to_user - reads exactly 'count' bytes from 'log' into the + * user-space buffer 'buf'. Returns 'count' on success. + * + * Caller must hold log->mutex. + */ +static ssize_t do_read_log_to_user(struct logger_log *log, + struct logger_reader *reader, + char __user *buf, + size_t count) +{ + size_t len; + + /* + * We read from the log in two disjoint operations. First, we read from + * the current read head offset up to 'count' bytes or to the end of + * the log, whichever comes first. + */ + len = min(count, log->size - reader->r_off); + if (copy_to_user(buf, log->buffer + reader->r_off, len)) + return -EFAULT; + + /* + * Second, we read any remaining bytes, starting back at the head of + * the log. + */ + if (count != len) + if (copy_to_user(buf + len, log->buffer, count - len)) + return -EFAULT; + + reader->r_off = logger_offset(reader->r_off + count); + + return count; +} + +/* + * logger_read - our log's read() method + * + * Behavior: + * + * - O_NONBLOCK works + * - If there are no log entries to read, blocks until log is written to + * - Atomically reads exactly one log entry + * + * Optimal read size is LOGGER_ENTRY_MAX_LEN. Will set errno to EINVAL if read + * buffer is insufficient to hold next entry. + */ +static ssize_t logger_read(struct file *file, char __user *buf, + size_t count, loff_t *pos) +{ + struct logger_reader *reader = file->private_data; + struct logger_log *log = reader->log; + ssize_t ret; + DEFINE_WAIT(wait); + +start: + while (1) { + prepare_to_wait(&log->wq, &wait, TASK_INTERRUPTIBLE); + + mutex_lock(&log->mutex); + ret = (log->w_off == reader->r_off); + mutex_unlock(&log->mutex); + if (!ret) + break; + + if (file->f_flags & O_NONBLOCK) { + ret = -EAGAIN; + break; + } + + if (signal_pending(current)) { + ret = -EINTR; + break; + } + + schedule(); + } + + finish_wait(&log->wq, &wait); + if (ret) + return ret; + + mutex_lock(&log->mutex); + + /* is there still something to read or did we race? */ + if (unlikely(log->w_off == reader->r_off)) { + mutex_unlock(&log->mutex); + goto start; + } + + /* get the size of the next entry */ + ret = get_entry_len(log, reader->r_off); + if (count < ret) { + ret = -EINVAL; + goto out; + } + + /* get exactly one entry from the log */ + ret = do_read_log_to_user(log, reader, buf, ret); + +out: + mutex_unlock(&log->mutex); + + return ret; +} + +/* + * get_next_entry - return the offset of the first valid entry at least 'len' + * bytes after 'off'. + * + * Caller must hold log->mutex. + */ +static size_t get_next_entry(struct logger_log *log, size_t off, size_t len) +{ + size_t count = 0; + + do { + size_t nr = get_entry_len(log, off); + off = logger_offset(off + nr); + count += nr; + } while (count < len); + + return off; +} + +/* + * clock_interval - is a < c < b in mod-space? Put another way, does the line + * from a to b cross c? + */ +static inline int clock_interval(size_t a, size_t b, size_t c) +{ + if (b < a) { + if (a < c || b >= c) + return 1; + } else { + if (a < c && b >= c) + return 1; + } + + return 0; +} + +/* + * fix_up_readers - walk the list of all readers and "fix up" any who were + * lapped by the writer; also do the same for the default "start head". + * We do this by "pulling forward" the readers and start head to the first + * entry after the new write head. + * + * The caller needs to hold log->mutex. + */ +static void fix_up_readers(struct logger_log *log, size_t len) +{ + size_t old = log->w_off; + size_t new = logger_offset(old + len); + struct logger_reader *reader; + + if (clock_interval(old, new, log->head)) + log->head = get_next_entry(log, log->head, len); + + list_for_each_entry(reader, &log->readers, list) + if (clock_interval(old, new, reader->r_off)) + reader->r_off = get_next_entry(log, reader->r_off, len); +} + +/* + * do_write_log - writes 'len' bytes from 'buf' to 'log' + * + * The caller needs to hold log->mutex. + */ +static void do_write_log(struct logger_log *log, const void *buf, size_t count) +{ + size_t len; + + len = min(count, log->size - log->w_off); + memcpy(log->buffer + log->w_off, buf, len); + + if (count != len) + memcpy(log->buffer, buf + len, count - len); + + log->w_off = logger_offset(log->w_off + count); + +} + +/* + * do_write_log_user - writes 'len' bytes from the user-space buffer 'buf' to + * the log 'log' + * + * The caller needs to hold log->mutex. + * + * Returns 'count' on success, negative error code on failure. + */ +static ssize_t do_write_log_from_user(struct logger_log *log, + const void __user *buf, size_t count) +{ + size_t len; + + len = min(count, log->size - log->w_off); + if (len && copy_from_user(log->buffer + log->w_off, buf, len)) + return -EFAULT; + + if (count != len) + if (copy_from_user(log->buffer, buf + len, count - len)) + return -EFAULT; + + log->w_off = logger_offset(log->w_off + count); + + return count; +} + +/* + * logger_aio_write - our write method, implementing support for write(), + * writev(), and aio_write(). Writes are our fast path, and we try to optimize + * them above all else. + */ +ssize_t logger_aio_write(struct kiocb *iocb, const struct iovec *iov, + unsigned long nr_segs, loff_t ppos) +{ + struct logger_log *log = file_get_log(iocb->ki_filp); + size_t orig = log->w_off; + struct logger_entry header; + struct timespec now; + ssize_t ret = 0; + + now = current_kernel_time(); + + header.pid = current->tgid; + header.tid = current->pid; + header.sec = now.tv_sec; + header.nsec = now.tv_nsec; + header.len = min_t(size_t, iocb->ki_left, LOGGER_ENTRY_MAX_PAYLOAD); + + /* null writes succeed, return zero */ + if (unlikely(!header.len)) + return 0; + + mutex_lock(&log->mutex); + + /* + * Fix up any readers, pulling them forward to the first readable + * entry after (what will be) the new write offset. We do this now + * because if we partially fail, we can end up with clobbered log + * entries that encroach on readable buffer. + */ + fix_up_readers(log, sizeof(struct logger_entry) + header.len); + + do_write_log(log, &header, sizeof(struct logger_entry)); + + while (nr_segs-- > 0) { + size_t len; + ssize_t nr; + + /* figure out how much of this vector we can keep */ + len = min_t(size_t, iov->iov_len, header.len - ret); + + /* write out this segment's payload */ + nr = do_write_log_from_user(log, iov->iov_base, len); + if (unlikely(nr < 0)) { + log->w_off = orig; + mutex_unlock(&log->mutex); + return nr; + } + + iov++; + ret += nr; + } + + mutex_unlock(&log->mutex); + + /* wake up any blocked readers */ + wake_up_interruptible(&log->wq); + + return ret; +} + +static struct logger_log *get_log_from_minor(int); + +/* + * logger_open - the log's open() file operation + * + * Note how near a no-op this is in the write-only case. Keep it that way! + */ +static int logger_open(struct inode *inode, struct file *file) +{ + struct logger_log *log; + int ret; + + ret = nonseekable_open(inode, file); + if (ret) + return ret; + + log = get_log_from_minor(MINOR(inode->i_rdev)); + if (!log) + return -ENODEV; + + if (file->f_mode & FMODE_READ) { + struct logger_reader *reader; + + reader = kmalloc(sizeof(struct logger_reader), GFP_KERNEL); + if (!reader) + return -ENOMEM; + + reader->log = log; + INIT_LIST_HEAD(&reader->list); + + mutex_lock(&log->mutex); + reader->r_off = log->head; + list_add_tail(&reader->list, &log->readers); + mutex_unlock(&log->mutex); + + file->private_data = reader; + } else + file->private_data = log; + + return 0; +} + +/* + * logger_release - the log's release file operation + * + * Note this is a total no-op in the write-only case. Keep it that way! + */ +static int logger_release(struct inode *ignored, struct file *file) +{ + if (file->f_mode & FMODE_READ) { + struct logger_reader *reader = file->private_data; + list_del(&reader->list); + kfree(reader); + } + + return 0; +} + +/* + * logger_poll - the log's poll file operation, for poll/select/epoll + * + * Note we always return POLLOUT, because you can always write() to the log. + * Note also that, strictly speaking, a return value of POLLIN does not + * guarantee that the log is readable without blocking, as there is a small + * chance that the writer can lap the reader in the interim between poll() + * returning and the read() request. + */ +static unsigned int logger_poll(struct file *file, poll_table *wait) +{ + struct logger_reader *reader; + struct logger_log *log; + unsigned int ret = POLLOUT | POLLWRNORM; + + if (!(file->f_mode & FMODE_READ)) + return ret; + + reader = file->private_data; + log = reader->log; + + poll_wait(file, &log->wq, wait); + + mutex_lock(&log->mutex); + if (log->w_off != reader->r_off) + ret |= POLLIN | POLLRDNORM; + mutex_unlock(&log->mutex); + + return ret; +} + +static long logger_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct logger_log *log = file_get_log(file); + struct logger_reader *reader; + long ret = -ENOTTY; + + mutex_lock(&log->mutex); + + switch (cmd) { + case LOGGER_GET_LOG_BUF_SIZE: + ret = log->size; + break; + case LOGGER_GET_LOG_LEN: + if (!(file->f_mode & FMODE_READ)) { + ret = -EBADF; + break; + } + reader = file->private_data; + if (log->w_off >= reader->r_off) + ret = log->w_off - reader->r_off; + else + ret = (log->size - reader->r_off) + log->w_off; + break; + case LOGGER_GET_NEXT_ENTRY_LEN: + if (!(file->f_mode & FMODE_READ)) { + ret = -EBADF; + break; + } + reader = file->private_data; + if (log->w_off != reader->r_off) + ret = get_entry_len(log, reader->r_off); + else + ret = 0; + break; + case LOGGER_FLUSH_LOG: + if (!(file->f_mode & FMODE_WRITE)) { + ret = -EBADF; + break; + } + list_for_each_entry(reader, &log->readers, list) + reader->r_off = log->w_off; + log->head = log->w_off; + ret = 0; + break; + } + + mutex_unlock(&log->mutex); + + return ret; +} + +static const struct file_operations logger_fops = { + .owner = THIS_MODULE, + .read = logger_read, + .aio_write = logger_aio_write, + .poll = logger_poll, + .unlocked_ioctl = logger_ioctl, + .compat_ioctl = logger_ioctl, + .open = logger_open, + .release = logger_release, +}; + +/* + * Defines a log structure with name 'NAME' and a size of 'SIZE' bytes, which + * must be a power of two, greater than LOGGER_ENTRY_MAX_LEN, and less than + * LONG_MAX minus LOGGER_ENTRY_MAX_LEN. + */ +#define DEFINE_LOGGER_DEVICE(VAR, NAME, SIZE) \ +static unsigned char _buf_ ## VAR[SIZE]; \ +static struct logger_log VAR = { \ + .buffer = _buf_ ## VAR, \ + .misc = { \ + .minor = MISC_DYNAMIC_MINOR, \ + .name = NAME, \ + .fops = &logger_fops, \ + .parent = NULL, \ + }, \ + .wq = __WAIT_QUEUE_HEAD_INITIALIZER(VAR .wq), \ + .readers = LIST_HEAD_INIT(VAR .readers), \ + .mutex = __MUTEX_INITIALIZER(VAR .mutex), \ + .w_off = 0, \ + .head = 0, \ + .size = SIZE, \ +}; + +DEFINE_LOGGER_DEVICE(log_main, LOGGER_LOG_MAIN, 64*1024) +DEFINE_LOGGER_DEVICE(log_events, LOGGER_LOG_EVENTS, 256*1024) +DEFINE_LOGGER_DEVICE(log_radio, LOGGER_LOG_RADIO, 64*1024) +DEFINE_LOGGER_DEVICE(log_system, LOGGER_LOG_SYSTEM, 64*1024) + +static struct logger_log *get_log_from_minor(int minor) +{ + if (log_main.misc.minor == minor) + return &log_main; + if (log_events.misc.minor == minor) + return &log_events; + if (log_radio.misc.minor == minor) + return &log_radio; + if (log_system.misc.minor == minor) + return &log_system; + return NULL; +} + +static int __init init_log(struct logger_log *log) +{ + int ret; + + ret = misc_register(&log->misc); + if (unlikely(ret)) { + printk(KERN_ERR "logger: failed to register misc " + "device for log '%s'!\n", log->misc.name); + return ret; + } + + printk(KERN_INFO "logger: created %luK log '%s'\n", + (unsigned long) log->size >> 10, log->misc.name); + + return 0; +} + +static int __init logger_init(void) +{ + int ret; + + ret = init_log(&log_main); + if (unlikely(ret)) + goto out; + + ret = init_log(&log_events); + if (unlikely(ret)) + goto out; + + ret = init_log(&log_radio); + if (unlikely(ret)) + goto out; + + ret = init_log(&log_system); + if (unlikely(ret)) + goto out; + +out: + return ret; +} +device_initcall(logger_init); diff --git a/drivers/staging/android/logger.h b/drivers/staging/android/logger.h new file mode 100644 index 000000000000..2cb06e9d8f98 --- /dev/null +++ b/drivers/staging/android/logger.h @@ -0,0 +1,49 @@ +/* include/linux/logger.h + * + * Copyright (C) 2007-2008 Google, Inc. + * Author: Robert Love <rlove@android.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _LINUX_LOGGER_H +#define _LINUX_LOGGER_H + +#include <linux/types.h> +#include <linux/ioctl.h> + +struct logger_entry { + __u16 len; /* length of the payload */ + __u16 __pad; /* no matter what, we get 2 bytes of padding */ + __s32 pid; /* generating process's pid */ + __s32 tid; /* generating process's tid */ + __s32 sec; /* seconds since Epoch */ + __s32 nsec; /* nanoseconds */ + char msg[0]; /* the entry's payload */ +}; + +#define LOGGER_LOG_RADIO "log_radio" /* radio-related messages */ +#define LOGGER_LOG_EVENTS "log_events" /* system/hardware events */ +#define LOGGER_LOG_SYSTEM "log_system" /* system/framework messages */ +#define LOGGER_LOG_MAIN "log_main" /* everything else */ + +#define LOGGER_ENTRY_MAX_LEN (4*1024) +#define LOGGER_ENTRY_MAX_PAYLOAD \ + (LOGGER_ENTRY_MAX_LEN - sizeof(struct logger_entry)) + +#define __LOGGERIO 0xAE + +#define LOGGER_GET_LOG_BUF_SIZE _IO(__LOGGERIO, 1) /* size of log */ +#define LOGGER_GET_LOG_LEN _IO(__LOGGERIO, 2) /* used log len */ +#define LOGGER_GET_NEXT_ENTRY_LEN _IO(__LOGGERIO, 3) /* next entry len */ +#define LOGGER_FLUSH_LOG _IO(__LOGGERIO, 4) /* flush log */ + +#endif /* _LINUX_LOGGER_H */ diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c new file mode 100644 index 000000000000..8b67ac06e4b0 --- /dev/null +++ b/drivers/staging/android/lowmemorykiller.c @@ -0,0 +1,208 @@ +/* drivers/misc/lowmemorykiller.c + * + * The lowmemorykiller driver lets user-space specify a set of memory thresholds + * where processes with a range of oom_adj values will get killed. Specify the + * minimum oom_adj values in /sys/module/lowmemorykiller/parameters/adj and the + * number of free pages in /sys/module/lowmemorykiller/parameters/minfree. Both + * files take a comma separated list of numbers in ascending order. + * + * For example, write "0,8" to /sys/module/lowmemorykiller/parameters/adj and + * "1024,4096" to /sys/module/lowmemorykiller/parameters/minfree to kill processes + * with a oom_adj value of 8 or higher when the free memory drops below 4096 pages + * and kill processes with a oom_adj value of 0 or higher when the free memory + * drops below 1024 pages. + * + * The driver considers memory used for caches to be free, but if a large + * percentage of the cached memory is locked this can be very inaccurate + * and processes may not get killed until the normal oom killer is triggered. + * + * Copyright (C) 2007-2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/oom.h> +#include <linux/sched.h> +#include <linux/notifier.h> + +static uint32_t lowmem_debug_level = 2; +static int lowmem_adj[6] = { + 0, + 1, + 6, + 12, +}; +static int lowmem_adj_size = 4; +static size_t lowmem_minfree[6] = { + 3 * 512, /* 6MB */ + 2 * 1024, /* 8MB */ + 4 * 1024, /* 16MB */ + 16 * 1024, /* 64MB */ +}; +static int lowmem_minfree_size = 4; + +static struct task_struct *lowmem_deathpending; + +#define lowmem_print(level, x...) \ + do { \ + if (lowmem_debug_level >= (level)) \ + printk(x); \ + } while (0) + +static int +task_notify_func(struct notifier_block *self, unsigned long val, void *data); + +static struct notifier_block task_nb = { + .notifier_call = task_notify_func, +}; + +static int +task_notify_func(struct notifier_block *self, unsigned long val, void *data) +{ + struct task_struct *task = data; + if (task == lowmem_deathpending) { + lowmem_deathpending = NULL; + task_free_unregister(&task_nb); + } + return NOTIFY_OK; +} + +static int lowmem_shrink(struct shrinker *s, int nr_to_scan, gfp_t gfp_mask) +{ + struct task_struct *p; + struct task_struct *selected = NULL; + int rem = 0; + int tasksize; + int i; + int min_adj = OOM_ADJUST_MAX + 1; + int selected_tasksize = 0; + int selected_oom_adj; + int array_size = ARRAY_SIZE(lowmem_adj); + int other_free = global_page_state(NR_FREE_PAGES); + int other_file = global_page_state(NR_FILE_PAGES); + + /* + * If we already have a death outstanding, then + * bail out right away; indicating to vmscan + * that we have nothing further to offer on + * this pass. + * + */ + if (lowmem_deathpending) + return 0; + + if (lowmem_adj_size < array_size) + array_size = lowmem_adj_size; + if (lowmem_minfree_size < array_size) + array_size = lowmem_minfree_size; + for (i = 0; i < array_size; i++) { + if (other_free < lowmem_minfree[i] && + other_file < lowmem_minfree[i]) { + min_adj = lowmem_adj[i]; + break; + } + } + if (nr_to_scan > 0) + lowmem_print(3, "lowmem_shrink %d, %x, ofree %d %d, ma %d\n", + nr_to_scan, gfp_mask, other_free, other_file, + min_adj); + rem = global_page_state(NR_ACTIVE_ANON) + + global_page_state(NR_ACTIVE_FILE) + + global_page_state(NR_INACTIVE_ANON) + + global_page_state(NR_INACTIVE_FILE); + if (nr_to_scan <= 0 || min_adj == OOM_ADJUST_MAX + 1) { + lowmem_print(5, "lowmem_shrink %d, %x, return %d\n", + nr_to_scan, gfp_mask, rem); + return rem; + } + selected_oom_adj = min_adj; + + read_lock(&tasklist_lock); + for_each_process(p) { + struct mm_struct *mm; + struct signal_struct *sig; + int oom_adj; + + task_lock(p); + mm = p->mm; + sig = p->signal; + if (!mm || !sig) { + task_unlock(p); + continue; + } + oom_adj = sig->oom_adj; + if (oom_adj < min_adj) { + task_unlock(p); + continue; + } + tasksize = get_mm_rss(mm); + task_unlock(p); + if (tasksize <= 0) + continue; + if (selected) { + if (oom_adj < selected_oom_adj) + continue; + if (oom_adj == selected_oom_adj && + tasksize <= selected_tasksize) + continue; + } + selected = p; + selected_tasksize = tasksize; + selected_oom_adj = oom_adj; + lowmem_print(2, "select %d (%s), adj %d, size %d, to kill\n", + p->pid, p->comm, oom_adj, tasksize); + } + if (selected) { + lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d\n", + selected->pid, selected->comm, + selected_oom_adj, selected_tasksize); + lowmem_deathpending = selected; + task_free_register(&task_nb); + force_sig(SIGKILL, selected); + rem -= selected_tasksize; + } + lowmem_print(4, "lowmem_shrink %d, %x, return %d\n", + nr_to_scan, gfp_mask, rem); + read_unlock(&tasklist_lock); + return rem; +} + +static struct shrinker lowmem_shrinker = { + .shrink = lowmem_shrink, + .seeks = DEFAULT_SEEKS * 16 +}; + +static int __init lowmem_init(void) +{ + register_shrinker(&lowmem_shrinker); + return 0; +} + +static void __exit lowmem_exit(void) +{ + unregister_shrinker(&lowmem_shrinker); +} + +module_param_named(cost, lowmem_shrinker.seeks, int, S_IRUGO | S_IWUSR); +module_param_array_named(adj, lowmem_adj, int, &lowmem_adj_size, + S_IRUGO | S_IWUSR); +module_param_array_named(minfree, lowmem_minfree, uint, &lowmem_minfree_size, + S_IRUGO | S_IWUSR); +module_param_named(debug_level, lowmem_debug_level, uint, S_IRUGO | S_IWUSR); + +module_init(lowmem_init); +module_exit(lowmem_exit); + +MODULE_LICENSE("GPL"); + diff --git a/drivers/staging/android/ram_console.c b/drivers/staging/android/ram_console.c new file mode 100644 index 000000000000..53f736b0ec83 --- /dev/null +++ b/drivers/staging/android/ram_console.c @@ -0,0 +1,418 @@ +/* drivers/android/ram_console.c + * + * Copyright (C) 2007-2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/console.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/proc_fs.h> +#include <linux/string.h> +#include <linux/uaccess.h> +#include <linux/io.h> + +#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION +#include <linux/rslib.h> +#endif + +struct ram_console_buffer { + uint32_t sig; + uint32_t start; + uint32_t size; + uint8_t data[0]; +}; + +#define RAM_CONSOLE_SIG (0x43474244) /* DBGC */ + +#ifdef CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT +static char __initdata + ram_console_old_log_init_buffer[CONFIG_ANDROID_RAM_CONSOLE_EARLY_SIZE]; +#endif +static char *ram_console_old_log; +static size_t ram_console_old_log_size; + +static struct ram_console_buffer *ram_console_buffer; +static size_t ram_console_buffer_size; +#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION +static char *ram_console_par_buffer; +static struct rs_control *ram_console_rs_decoder; +static int ram_console_corrected_bytes; +static int ram_console_bad_blocks; +#define ECC_BLOCK_SIZE CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_DATA_SIZE +#define ECC_SIZE CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_ECC_SIZE +#define ECC_SYMSIZE CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE +#define ECC_POLY CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_POLYNOMIAL +#endif + +#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION +static void ram_console_encode_rs8(uint8_t *data, size_t len, uint8_t *ecc) +{ + int i; + uint16_t par[ECC_SIZE]; + /* Initialize the parity buffer */ + memset(par, 0, sizeof(par)); + encode_rs8(ram_console_rs_decoder, data, len, par, 0); + for (i = 0; i < ECC_SIZE; i++) + ecc[i] = par[i]; +} + +static int ram_console_decode_rs8(void *data, size_t len, uint8_t *ecc) +{ + int i; + uint16_t par[ECC_SIZE]; + for (i = 0; i < ECC_SIZE; i++) + par[i] = ecc[i]; + return decode_rs8(ram_console_rs_decoder, data, par, len, + NULL, 0, NULL, 0, NULL); +} +#endif + +static void ram_console_update(const char *s, unsigned int count) +{ + struct ram_console_buffer *buffer = ram_console_buffer; +#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION + uint8_t *buffer_end = buffer->data + ram_console_buffer_size; + uint8_t *block; + uint8_t *par; + int size = ECC_BLOCK_SIZE; +#endif + memcpy(buffer->data + buffer->start, s, count); +#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION + block = buffer->data + (buffer->start & ~(ECC_BLOCK_SIZE - 1)); + par = ram_console_par_buffer + + (buffer->start / ECC_BLOCK_SIZE) * ECC_SIZE; + do { + if (block + ECC_BLOCK_SIZE > buffer_end) + size = buffer_end - block; + ram_console_encode_rs8(block, size, par); + block += ECC_BLOCK_SIZE; + par += ECC_SIZE; + } while (block < buffer->data + buffer->start + count); +#endif +} + +static void ram_console_update_header(void) +{ +#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION + struct ram_console_buffer *buffer = ram_console_buffer; + uint8_t *par; + par = ram_console_par_buffer + + DIV_ROUND_UP(ram_console_buffer_size, ECC_BLOCK_SIZE) * ECC_SIZE; + ram_console_encode_rs8((uint8_t *)buffer, sizeof(*buffer), par); +#endif +} + +static void +ram_console_write(struct console *console, const char *s, unsigned int count) +{ + int rem; + struct ram_console_buffer *buffer = ram_console_buffer; + + if (count > ram_console_buffer_size) { + s += count - ram_console_buffer_size; + count = ram_console_buffer_size; + } + rem = ram_console_buffer_size - buffer->start; + if (rem < count) { + ram_console_update(s, rem); + s += rem; + count -= rem; + buffer->start = 0; + buffer->size = ram_console_buffer_size; + } + ram_console_update(s, count); + + buffer->start += count; + if (buffer->size < ram_console_buffer_size) + buffer->size += count; + ram_console_update_header(); +} + +static struct console ram_console = { + .name = "ram", + .write = ram_console_write, + .flags = CON_PRINTBUFFER | CON_ENABLED, + .index = -1, +}; + +void ram_console_enable_console(int enabled) +{ + if (enabled) + ram_console.flags |= CON_ENABLED; + else + ram_console.flags &= ~CON_ENABLED; +} + +static void __init +ram_console_save_old(struct ram_console_buffer *buffer, char *dest) +{ + size_t old_log_size = buffer->size; +#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION + uint8_t *block; + uint8_t *par; + char strbuf[80]; + int strbuf_len; + + block = buffer->data; + par = ram_console_par_buffer; + while (block < buffer->data + buffer->size) { + int numerr; + int size = ECC_BLOCK_SIZE; + if (block + size > buffer->data + ram_console_buffer_size) + size = buffer->data + ram_console_buffer_size - block; + numerr = ram_console_decode_rs8(block, size, par); + if (numerr > 0) { +#if 0 + printk(KERN_INFO "ram_console: error in block %p, %d\n", + block, numerr); +#endif + ram_console_corrected_bytes += numerr; + } else if (numerr < 0) { +#if 0 + printk(KERN_INFO "ram_console: uncorrectable error in " + "block %p\n", block); +#endif + ram_console_bad_blocks++; + } + block += ECC_BLOCK_SIZE; + par += ECC_SIZE; + } + if (ram_console_corrected_bytes || ram_console_bad_blocks) + strbuf_len = snprintf(strbuf, sizeof(strbuf), + "\n%d Corrected bytes, %d unrecoverable blocks\n", + ram_console_corrected_bytes, ram_console_bad_blocks); + else + strbuf_len = snprintf(strbuf, sizeof(strbuf), + "\nNo errors detected\n"); + if (strbuf_len >= sizeof(strbuf)) + strbuf_len = sizeof(strbuf) - 1; + old_log_size += strbuf_len; +#endif + + if (dest == NULL) { + dest = kmalloc(old_log_size, GFP_KERNEL); + if (dest == NULL) { + printk(KERN_ERR + "ram_console: failed to allocate buffer\n"); + return; + } + } + + ram_console_old_log = dest; + ram_console_old_log_size = old_log_size; + memcpy(ram_console_old_log, + &buffer->data[buffer->start], buffer->size - buffer->start); + memcpy(ram_console_old_log + buffer->size - buffer->start, + &buffer->data[0], buffer->start); +#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION + memcpy(ram_console_old_log + old_log_size - strbuf_len, + strbuf, strbuf_len); +#endif +} + +static int __init ram_console_init(struct ram_console_buffer *buffer, + size_t buffer_size, char *old_buf) +{ +#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION + int numerr; + uint8_t *par; +#endif + ram_console_buffer = buffer; + ram_console_buffer_size = + buffer_size - sizeof(struct ram_console_buffer); + + if (ram_console_buffer_size > buffer_size) { + pr_err("ram_console: buffer %p, invalid size %zu, " + "datasize %zu\n", buffer, buffer_size, + ram_console_buffer_size); + return 0; + } + +#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION + ram_console_buffer_size -= (DIV_ROUND_UP(ram_console_buffer_size, + ECC_BLOCK_SIZE) + 1) * ECC_SIZE; + + if (ram_console_buffer_size > buffer_size) { + pr_err("ram_console: buffer %p, invalid size %zu, " + "non-ecc datasize %zu\n", + buffer, buffer_size, ram_console_buffer_size); + return 0; + } + + ram_console_par_buffer = buffer->data + ram_console_buffer_size; + + + /* first consecutive root is 0 + * primitive element to generate roots = 1 + */ + ram_console_rs_decoder = init_rs(ECC_SYMSIZE, ECC_POLY, 0, 1, ECC_SIZE); + if (ram_console_rs_decoder == NULL) { + printk(KERN_INFO "ram_console: init_rs failed\n"); + return 0; + } + + ram_console_corrected_bytes = 0; + ram_console_bad_blocks = 0; + + par = ram_console_par_buffer + + DIV_ROUND_UP(ram_console_buffer_size, ECC_BLOCK_SIZE) * ECC_SIZE; + + numerr = ram_console_decode_rs8(buffer, sizeof(*buffer), par); + if (numerr > 0) { + printk(KERN_INFO "ram_console: error in header, %d\n", numerr); + ram_console_corrected_bytes += numerr; + } else if (numerr < 0) { + printk(KERN_INFO + "ram_console: uncorrectable error in header\n"); + ram_console_bad_blocks++; + } +#endif + + if (buffer->sig == RAM_CONSOLE_SIG) { + if (buffer->size > ram_console_buffer_size + || buffer->start > buffer->size) + printk(KERN_INFO "ram_console: found existing invalid " + "buffer, size %d, start %d\n", + buffer->size, buffer->start); + else { + printk(KERN_INFO "ram_console: found existing buffer, " + "size %d, start %d\n", + buffer->size, buffer->start); + ram_console_save_old(buffer, old_buf); + } + } else { + printk(KERN_INFO "ram_console: no valid data in buffer " + "(sig = 0x%08x)\n", buffer->sig); + } + + buffer->sig = RAM_CONSOLE_SIG; + buffer->start = 0; + buffer->size = 0; + + register_console(&ram_console); +#ifdef CONFIG_ANDROID_RAM_CONSOLE_ENABLE_VERBOSE + console_verbose(); +#endif + return 0; +} + +#ifdef CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT +static int __init ram_console_early_init(void) +{ + return ram_console_init((struct ram_console_buffer *) + CONFIG_ANDROID_RAM_CONSOLE_EARLY_ADDR, + CONFIG_ANDROID_RAM_CONSOLE_EARLY_SIZE, + ram_console_old_log_init_buffer); +} +#else +static int ram_console_driver_probe(struct platform_device *pdev) +{ + struct resource *res = pdev->resource; + size_t start; + size_t buffer_size; + void *buffer; + + if (res == NULL || pdev->num_resources != 1 || + !(res->flags & IORESOURCE_MEM)) { + printk(KERN_ERR "ram_console: invalid resource, %p %d flags " + "%lx\n", res, pdev->num_resources, res ? res->flags : 0); + return -ENXIO; + } + buffer_size = res->end - res->start + 1; + start = res->start; + printk(KERN_INFO "ram_console: got buffer at %zx, size %zx\n", + start, buffer_size); + buffer = ioremap(res->start, buffer_size); + if (buffer == NULL) { + printk(KERN_ERR "ram_console: failed to map memory\n"); + return -ENOMEM; + } + + return ram_console_init(buffer, buffer_size, NULL/* allocate */); +} + +static struct platform_driver ram_console_driver = { + .probe = ram_console_driver_probe, + .driver = { + .name = "ram_console", + }, +}; + +static int __init ram_console_module_init(void) +{ + int err; + err = platform_driver_register(&ram_console_driver); + return err; +} +#endif + +static ssize_t ram_console_read_old(struct file *file, char __user *buf, + size_t len, loff_t *offset) +{ + loff_t pos = *offset; + ssize_t count; + + if (pos >= ram_console_old_log_size) + return 0; + + count = min(len, (size_t)(ram_console_old_log_size - pos)); + if (copy_to_user(buf, ram_console_old_log + pos, count)) + return -EFAULT; + + *offset += count; + return count; +} + +static const struct file_operations ram_console_file_ops = { + .owner = THIS_MODULE, + .read = ram_console_read_old, +}; + +static int __init ram_console_late_init(void) +{ + struct proc_dir_entry *entry; + + if (ram_console_old_log == NULL) + return 0; +#ifdef CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT + ram_console_old_log = kmalloc(ram_console_old_log_size, GFP_KERNEL); + if (ram_console_old_log == NULL) { + printk(KERN_ERR + "ram_console: failed to allocate buffer for old log\n"); + ram_console_old_log_size = 0; + return 0; + } + memcpy(ram_console_old_log, + ram_console_old_log_init_buffer, ram_console_old_log_size); +#endif + entry = create_proc_entry("last_kmsg", S_IFREG | S_IRUGO, NULL); + if (!entry) { + printk(KERN_ERR "ram_console: failed to create proc entry\n"); + kfree(ram_console_old_log); + ram_console_old_log = NULL; + return 0; + } + + entry->proc_fops = &ram_console_file_ops; + entry->size = ram_console_old_log_size; + return 0; +} + +#ifdef CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT +console_initcall(ram_console_early_init); +#else +postcore_initcall(ram_console_module_init); +#endif +late_initcall(ram_console_late_init); + diff --git a/drivers/staging/android/timed_gpio.c b/drivers/staging/android/timed_gpio.c new file mode 100644 index 000000000000..a64481c3e86d --- /dev/null +++ b/drivers/staging/android/timed_gpio.c @@ -0,0 +1,176 @@ +/* drivers/misc/timed_gpio.c + * + * Copyright (C) 2008 Google, Inc. + * Author: Mike Lockwood <lockwood@android.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/hrtimer.h> +#include <linux/err.h> +#include <linux/gpio.h> + +#include "timed_output.h" +#include "timed_gpio.h" + + +struct timed_gpio_data { + struct timed_output_dev dev; + struct hrtimer timer; + spinlock_t lock; + unsigned gpio; + int max_timeout; + u8 active_low; +}; + +static enum hrtimer_restart gpio_timer_func(struct hrtimer *timer) +{ + struct timed_gpio_data *data = + container_of(timer, struct timed_gpio_data, timer); + + gpio_direction_output(data->gpio, data->active_low ? 1 : 0); + return HRTIMER_NORESTART; +} + +static int gpio_get_time(struct timed_output_dev *dev) +{ + struct timed_gpio_data *data = + container_of(dev, struct timed_gpio_data, dev); + + if (hrtimer_active(&data->timer)) { + ktime_t r = hrtimer_get_remaining(&data->timer); + struct timeval t = ktime_to_timeval(r); + return t.tv_sec * 1000 + t.tv_usec / 1000; + } else + return 0; +} + +static void gpio_enable(struct timed_output_dev *dev, int value) +{ + struct timed_gpio_data *data = + container_of(dev, struct timed_gpio_data, dev); + unsigned long flags; + + spin_lock_irqsave(&data->lock, flags); + + /* cancel previous timer and set GPIO according to value */ + hrtimer_cancel(&data->timer); + gpio_direction_output(data->gpio, data->active_low ? !value : !!value); + + if (value > 0) { + if (value > data->max_timeout) + value = data->max_timeout; + + hrtimer_start(&data->timer, + ktime_set(value / 1000, (value % 1000) * 1000000), + HRTIMER_MODE_REL); + } + + spin_unlock_irqrestore(&data->lock, flags); +} + +static int timed_gpio_probe(struct platform_device *pdev) +{ + struct timed_gpio_platform_data *pdata = pdev->dev.platform_data; + struct timed_gpio *cur_gpio; + struct timed_gpio_data *gpio_data, *gpio_dat; + int i, j, ret = 0; + + if (!pdata) + return -EBUSY; + + gpio_data = kzalloc(sizeof(struct timed_gpio_data) * pdata->num_gpios, + GFP_KERNEL); + if (!gpio_data) + return -ENOMEM; + + for (i = 0; i < pdata->num_gpios; i++) { + cur_gpio = &pdata->gpios[i]; + gpio_dat = &gpio_data[i]; + + hrtimer_init(&gpio_dat->timer, CLOCK_MONOTONIC, + HRTIMER_MODE_REL); + gpio_dat->timer.function = gpio_timer_func; + spin_lock_init(&gpio_dat->lock); + + gpio_dat->dev.name = cur_gpio->name; + gpio_dat->dev.get_time = gpio_get_time; + gpio_dat->dev.enable = gpio_enable; + ret = gpio_request(cur_gpio->gpio, cur_gpio->name); + if (ret >= 0) { + ret = timed_output_dev_register(&gpio_dat->dev); + if (ret < 0) + gpio_free(cur_gpio->gpio); + } + if (ret < 0) { + for (j = 0; j < i; j++) { + timed_output_dev_unregister(&gpio_data[i].dev); + gpio_free(gpio_data[i].gpio); + } + kfree(gpio_data); + return ret; + } + + gpio_dat->gpio = cur_gpio->gpio; + gpio_dat->max_timeout = cur_gpio->max_timeout; + gpio_dat->active_low = cur_gpio->active_low; + gpio_direction_output(gpio_dat->gpio, gpio_dat->active_low); + } + + platform_set_drvdata(pdev, gpio_data); + + return 0; +} + +static int timed_gpio_remove(struct platform_device *pdev) +{ + struct timed_gpio_platform_data *pdata = pdev->dev.platform_data; + struct timed_gpio_data *gpio_data = platform_get_drvdata(pdev); + int i; + + for (i = 0; i < pdata->num_gpios; i++) { + timed_output_dev_unregister(&gpio_data[i].dev); + gpio_free(gpio_data[i].gpio); + } + + kfree(gpio_data); + + return 0; +} + +static struct platform_driver timed_gpio_driver = { + .probe = timed_gpio_probe, + .remove = timed_gpio_remove, + .driver = { + .name = TIMED_GPIO_NAME, + .owner = THIS_MODULE, + }, +}; + +static int __init timed_gpio_init(void) +{ + return platform_driver_register(&timed_gpio_driver); +} + +static void __exit timed_gpio_exit(void) +{ + platform_driver_unregister(&timed_gpio_driver); +} + +module_init(timed_gpio_init); +module_exit(timed_gpio_exit); + +MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>"); +MODULE_DESCRIPTION("timed gpio driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/staging/android/timed_gpio.h b/drivers/staging/android/timed_gpio.h new file mode 100644 index 000000000000..a0e15f8be3f7 --- /dev/null +++ b/drivers/staging/android/timed_gpio.h @@ -0,0 +1,33 @@ +/* include/linux/timed_gpio.h + * + * Copyright (C) 2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * +*/ + +#ifndef _LINUX_TIMED_GPIO_H +#define _LINUX_TIMED_GPIO_H + +#define TIMED_GPIO_NAME "timed-gpio" + +struct timed_gpio { + const char *name; + unsigned gpio; + int max_timeout; + u8 active_low; +}; + +struct timed_gpio_platform_data { + int num_gpios; + struct timed_gpio *gpios; +}; + +#endif diff --git a/drivers/staging/android/timed_output.c b/drivers/staging/android/timed_output.c new file mode 100644 index 000000000000..f373422308e0 --- /dev/null +++ b/drivers/staging/android/timed_output.c @@ -0,0 +1,123 @@ +/* drivers/misc/timed_output.c + * + * Copyright (C) 2009 Google, Inc. + * Author: Mike Lockwood <lockwood@android.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/module.h> +#include <linux/types.h> +#include <linux/device.h> +#include <linux/fs.h> +#include <linux/err.h> + +#include "timed_output.h" + +static struct class *timed_output_class; +static atomic_t device_count; + +static ssize_t enable_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct timed_output_dev *tdev = dev_get_drvdata(dev); + int remaining = tdev->get_time(tdev); + + return sprintf(buf, "%d\n", remaining); +} + +static ssize_t enable_store( + struct device *dev, struct device_attribute *attr, + const char *buf, size_t size) +{ + struct timed_output_dev *tdev = dev_get_drvdata(dev); + int value; + + if (sscanf(buf, "%d", &value) != 1) + return -EINVAL; + + tdev->enable(tdev, value); + + return size; +} + +static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, enable_show, enable_store); + +static int create_timed_output_class(void) +{ + if (!timed_output_class) { + timed_output_class = class_create(THIS_MODULE, "timed_output"); + if (IS_ERR(timed_output_class)) + return PTR_ERR(timed_output_class); + atomic_set(&device_count, 0); + } + + return 0; +} + +int timed_output_dev_register(struct timed_output_dev *tdev) +{ + int ret; + + if (!tdev || !tdev->name || !tdev->enable || !tdev->get_time) + return -EINVAL; + + ret = create_timed_output_class(); + if (ret < 0) + return ret; + + tdev->index = atomic_inc_return(&device_count); + tdev->dev = device_create(timed_output_class, NULL, + MKDEV(0, tdev->index), NULL, tdev->name); + if (IS_ERR(tdev->dev)) + return PTR_ERR(tdev->dev); + + ret = device_create_file(tdev->dev, &dev_attr_enable); + if (ret < 0) + goto err_create_file; + + dev_set_drvdata(tdev->dev, tdev); + tdev->state = 0; + return 0; + +err_create_file: + device_destroy(timed_output_class, MKDEV(0, tdev->index)); + printk(KERN_ERR "timed_output: Failed to register driver %s\n", + tdev->name); + + return ret; +} +EXPORT_SYMBOL_GPL(timed_output_dev_register); + +void timed_output_dev_unregister(struct timed_output_dev *tdev) +{ + device_remove_file(tdev->dev, &dev_attr_enable); + device_destroy(timed_output_class, MKDEV(0, tdev->index)); + dev_set_drvdata(tdev->dev, NULL); +} +EXPORT_SYMBOL_GPL(timed_output_dev_unregister); + +static int __init timed_output_init(void) +{ + return create_timed_output_class(); +} + +static void __exit timed_output_exit(void) +{ + class_destroy(timed_output_class); +} + +module_init(timed_output_init); +module_exit(timed_output_exit); + +MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>"); +MODULE_DESCRIPTION("timed output class driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/staging/android/timed_output.h b/drivers/staging/android/timed_output.h new file mode 100644 index 000000000000..ec907ab2ff54 --- /dev/null +++ b/drivers/staging/android/timed_output.h @@ -0,0 +1,37 @@ +/* include/linux/timed_output.h + * + * Copyright (C) 2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * +*/ + +#ifndef _LINUX_TIMED_OUTPUT_H +#define _LINUX_TIMED_OUTPUT_H + +struct timed_output_dev { + const char *name; + + /* enable the output and set the timer */ + void (*enable)(struct timed_output_dev *sdev, int timeout); + + /* returns the current number of milliseconds remaining on the timer */ + int (*get_time)(struct timed_output_dev *sdev); + + /* private data */ + struct device *dev; + int index; + int state; +}; + +extern int timed_output_dev_register(struct timed_output_dev *dev); +extern void timed_output_dev_unregister(struct timed_output_dev *dev); + +#endif diff --git a/drivers/switch/Kconfig b/drivers/switch/Kconfig new file mode 100644 index 000000000000..52385914b9ae --- /dev/null +++ b/drivers/switch/Kconfig @@ -0,0 +1,15 @@ +menuconfig SWITCH + tristate "Switch class support" + help + Say Y here to enable switch class support. This allows + monitoring switches by userspace via sysfs and uevent. + +if SWITCH + +config SWITCH_GPIO + tristate "GPIO Swith support" + depends on GENERIC_GPIO + help + Say Y here to enable GPIO based switch support. + +endif # SWITCH diff --git a/drivers/switch/Makefile b/drivers/switch/Makefile new file mode 100644 index 000000000000..f7606ed4a719 --- /dev/null +++ b/drivers/switch/Makefile @@ -0,0 +1,4 @@ +# Switch Class Driver +obj-$(CONFIG_SWITCH) += switch_class.o +obj-$(CONFIG_SWITCH_GPIO) += switch_gpio.o + diff --git a/drivers/switch/switch_class.c b/drivers/switch/switch_class.c new file mode 100644 index 000000000000..e05fc2591147 --- /dev/null +++ b/drivers/switch/switch_class.c @@ -0,0 +1,174 @@ +/* + * drivers/switch/switch_class.c + * + * Copyright (C) 2008 Google, Inc. + * Author: Mike Lockwood <lockwood@android.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * +*/ + +#include <linux/module.h> +#include <linux/types.h> +#include <linux/init.h> +#include <linux/device.h> +#include <linux/fs.h> +#include <linux/err.h> +#include <linux/switch.h> + +struct class *switch_class; +static atomic_t device_count; + +static ssize_t state_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct switch_dev *sdev = (struct switch_dev *) + dev_get_drvdata(dev); + + if (sdev->print_state) { + int ret = sdev->print_state(sdev, buf); + if (ret >= 0) + return ret; + } + return sprintf(buf, "%d\n", sdev->state); +} + +static ssize_t name_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct switch_dev *sdev = (struct switch_dev *) + dev_get_drvdata(dev); + + if (sdev->print_name) { + int ret = sdev->print_name(sdev, buf); + if (ret >= 0) + return ret; + } + return sprintf(buf, "%s\n", sdev->name); +} + +static DEVICE_ATTR(state, S_IRUGO | S_IWUSR, state_show, NULL); +static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, name_show, NULL); + +void switch_set_state(struct switch_dev *sdev, int state) +{ + char name_buf[120]; + char state_buf[120]; + char *prop_buf; + char *envp[3]; + int env_offset = 0; + int length; + + if (sdev->state != state) { + sdev->state = state; + + prop_buf = (char *)get_zeroed_page(GFP_KERNEL); + if (prop_buf) { + length = name_show(sdev->dev, NULL, prop_buf); + if (length > 0) { + if (prop_buf[length - 1] == '\n') + prop_buf[length - 1] = 0; + snprintf(name_buf, sizeof(name_buf), + "SWITCH_NAME=%s", prop_buf); + envp[env_offset++] = name_buf; + } + length = state_show(sdev->dev, NULL, prop_buf); + if (length > 0) { + if (prop_buf[length - 1] == '\n') + prop_buf[length - 1] = 0; + snprintf(state_buf, sizeof(state_buf), + "SWITCH_STATE=%s", prop_buf); + envp[env_offset++] = state_buf; + } + envp[env_offset] = NULL; + kobject_uevent_env(&sdev->dev->kobj, KOBJ_CHANGE, envp); + free_page((unsigned long)prop_buf); + } else { + printk(KERN_ERR "out of memory in switch_set_state\n"); + kobject_uevent(&sdev->dev->kobj, KOBJ_CHANGE); + } + } +} +EXPORT_SYMBOL_GPL(switch_set_state); + +static int create_switch_class(void) +{ + if (!switch_class) { + switch_class = class_create(THIS_MODULE, "switch"); + if (IS_ERR(switch_class)) + return PTR_ERR(switch_class); + atomic_set(&device_count, 0); + } + + return 0; +} + +int switch_dev_register(struct switch_dev *sdev) +{ + int ret; + + if (!switch_class) { + ret = create_switch_class(); + if (ret < 0) + return ret; + } + + sdev->index = atomic_inc_return(&device_count); + sdev->dev = device_create(switch_class, NULL, + MKDEV(0, sdev->index), NULL, sdev->name); + if (IS_ERR(sdev->dev)) + return PTR_ERR(sdev->dev); + + ret = device_create_file(sdev->dev, &dev_attr_state); + if (ret < 0) + goto err_create_file_1; + ret = device_create_file(sdev->dev, &dev_attr_name); + if (ret < 0) + goto err_create_file_2; + + dev_set_drvdata(sdev->dev, sdev); + sdev->state = 0; + return 0; + +err_create_file_2: + device_remove_file(sdev->dev, &dev_attr_state); +err_create_file_1: + device_destroy(switch_class, MKDEV(0, sdev->index)); + printk(KERN_ERR "switch: Failed to register driver %s\n", sdev->name); + + return ret; +} +EXPORT_SYMBOL_GPL(switch_dev_register); + +void switch_dev_unregister(struct switch_dev *sdev) +{ + device_remove_file(sdev->dev, &dev_attr_name); + device_remove_file(sdev->dev, &dev_attr_state); + device_destroy(switch_class, MKDEV(0, sdev->index)); + dev_set_drvdata(sdev->dev, NULL); +} +EXPORT_SYMBOL_GPL(switch_dev_unregister); + +static int __init switch_class_init(void) +{ + return create_switch_class(); +} + +static void __exit switch_class_exit(void) +{ + class_destroy(switch_class); +} + +module_init(switch_class_init); +module_exit(switch_class_exit); + +MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>"); +MODULE_DESCRIPTION("Switch class driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/switch/switch_gpio.c b/drivers/switch/switch_gpio.c new file mode 100644 index 000000000000..7e9faa211e48 --- /dev/null +++ b/drivers/switch/switch_gpio.c @@ -0,0 +1,172 @@ +/* + * drivers/switch/switch_gpio.c + * + * Copyright (C) 2008 Google, Inc. + * Author: Mike Lockwood <lockwood@android.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * +*/ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/switch.h> +#include <linux/workqueue.h> +#include <linux/gpio.h> + +struct gpio_switch_data { + struct switch_dev sdev; + unsigned gpio; + const char *name_on; + const char *name_off; + const char *state_on; + const char *state_off; + int irq; + struct work_struct work; +}; + +static void gpio_switch_work(struct work_struct *work) +{ + int state; + struct gpio_switch_data *data = + container_of(work, struct gpio_switch_data, work); + + state = gpio_get_value(data->gpio); + switch_set_state(&data->sdev, state); +} + +static irqreturn_t gpio_irq_handler(int irq, void *dev_id) +{ + struct gpio_switch_data *switch_data = + (struct gpio_switch_data *)dev_id; + + schedule_work(&switch_data->work); + return IRQ_HANDLED; +} + +static ssize_t switch_gpio_print_state(struct switch_dev *sdev, char *buf) +{ + struct gpio_switch_data *switch_data = + container_of(sdev, struct gpio_switch_data, sdev); + const char *state; + if (switch_get_state(sdev)) + state = switch_data->state_on; + else + state = switch_data->state_off; + + if (state) + return sprintf(buf, "%s\n", state); + return -1; +} + +static int gpio_switch_probe(struct platform_device *pdev) +{ + struct gpio_switch_platform_data *pdata = pdev->dev.platform_data; + struct gpio_switch_data *switch_data; + int ret = 0; + + if (!pdata) + return -EBUSY; + + switch_data = kzalloc(sizeof(struct gpio_switch_data), GFP_KERNEL); + if (!switch_data) + return -ENOMEM; + + switch_data->sdev.name = pdata->name; + switch_data->gpio = pdata->gpio; + switch_data->name_on = pdata->name_on; + switch_data->name_off = pdata->name_off; + switch_data->state_on = pdata->state_on; + switch_data->state_off = pdata->state_off; + switch_data->sdev.print_state = switch_gpio_print_state; + + ret = switch_dev_register(&switch_data->sdev); + if (ret < 0) + goto err_switch_dev_register; + + ret = gpio_request(switch_data->gpio, pdev->name); + if (ret < 0) + goto err_request_gpio; + + ret = gpio_direction_input(switch_data->gpio); + if (ret < 0) + goto err_set_gpio_input; + + INIT_WORK(&switch_data->work, gpio_switch_work); + + switch_data->irq = gpio_to_irq(switch_data->gpio); + if (switch_data->irq < 0) { + ret = switch_data->irq; + goto err_detect_irq_num_failed; + } + + ret = request_irq(switch_data->irq, gpio_irq_handler, + IRQF_TRIGGER_LOW, pdev->name, switch_data); + if (ret < 0) + goto err_request_irq; + + /* Perform initial detection */ + gpio_switch_work(&switch_data->work); + + return 0; + +err_request_irq: +err_detect_irq_num_failed: +err_set_gpio_input: + gpio_free(switch_data->gpio); +err_request_gpio: + switch_dev_unregister(&switch_data->sdev); +err_switch_dev_register: + kfree(switch_data); + + return ret; +} + +static int __devexit gpio_switch_remove(struct platform_device *pdev) +{ + struct gpio_switch_data *switch_data = platform_get_drvdata(pdev); + + cancel_work_sync(&switch_data->work); + gpio_free(switch_data->gpio); + switch_dev_unregister(&switch_data->sdev); + kfree(switch_data); + + return 0; +} + +static struct platform_driver gpio_switch_driver = { + .probe = gpio_switch_probe, + .remove = __devexit_p(gpio_switch_remove), + .driver = { + .name = "switch-gpio", + .owner = THIS_MODULE, + }, +}; + +static int __init gpio_switch_init(void) +{ + return platform_driver_register(&gpio_switch_driver); +} + +static void __exit gpio_switch_exit(void) +{ + platform_driver_unregister(&gpio_switch_driver); +} + +module_init(gpio_switch_init); +module_exit(gpio_switch_exit); + +MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>"); +MODULE_DESCRIPTION("GPIO Switch driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig index bd1751e37f1d..08da28181280 100644 --- a/drivers/usb/gadget/Kconfig +++ b/drivers/usb/gadget/Kconfig @@ -837,6 +837,54 @@ config USB_G_PRINTER For more information, see Documentation/usb/gadget_printer.txt which includes sample code for accessing the device file. +config USB_ANDROID + boolean "Android Gadget" + depends on SWITCH + help + The Android gadget driver supports multiple USB functions. + The functions can be configured via a board file and may be + enabled and disabled dynamically. + +config USB_ANDROID_ACM + boolean "Android gadget ACM serial function" + depends on USB_ANDROID + help + Provides ACM serial function for android gadget driver. + +config USB_ANDROID_ADB + boolean "Android gadget adb function" + depends on USB_ANDROID + help + Provides adb function for android gadget driver. + +config USB_ANDROID_MASS_STORAGE + boolean "Android gadget mass storage function" + depends on USB_ANDROID && SWITCH + help + Provides USB mass storage function for android gadget driver. + +config USB_ANDROID_MTP + boolean "Android MTP function" + depends on USB_ANDROID + help + Provides Media Transfer Protocol (MTP) support for android gadget driver. + +config USB_ANDROID_RNDIS + boolean "Android gadget RNDIS ethernet function" + depends on USB_ANDROID + help + Provides RNDIS ethernet function for android gadget driver. + +config USB_ANDROID_RNDIS_WCEIS + boolean "Use Windows Internet Sharing Class/SubClass/Protocol" + depends on USB_ANDROID_RNDIS + help + Causes the driver to look like a Windows-compatible Internet + Sharing device, so Windows auto-detects it. + + If you enable this option, the device is no longer CDC ethernet + compatible. + config USB_CDC_COMPOSITE tristate "CDC Composite Device (Ethernet and ACM)" depends on NET diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile index f6f822d2841a..d51e31f5592c 100644 --- a/drivers/usb/gadget/Makefile +++ b/drivers/usb/gadget/Makefile @@ -67,4 +67,10 @@ obj-$(CONFIG_USB_G_DBGP) += g_dbgp.o obj-$(CONFIG_USB_G_MULTI) += g_multi.o obj-$(CONFIG_USB_G_NOKIA) += g_nokia.o obj-$(CONFIG_USB_G_WEBCAM) += g_webcam.o +obj-$(CONFIG_USB_ANDROID) += android.o +obj-$(CONFIG_USB_ANDROID_ACM) += f_acm.o u_serial.o +obj-$(CONFIG_USB_ANDROID_ADB) += f_adb.o +obj-$(CONFIG_USB_ANDROID_MASS_STORAGE) += f_mass_storage.o +obj-$(CONFIG_USB_ANDROID_MTP) += f_mtp.o +obj-$(CONFIG_USB_ANDROID_RNDIS) += f_rndis.o u_ether.o diff --git a/drivers/usb/gadget/android.c b/drivers/usb/gadget/android.c new file mode 100644 index 000000000000..ed4f5739cb9c --- /dev/null +++ b/drivers/usb/gadget/android.c @@ -0,0 +1,417 @@ +/* + * Gadget Driver for Android + * + * Copyright (C) 2008 Google, Inc. + * Author: Mike Lockwood <lockwood@android.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +/* #define DEBUG */ +/* #define VERBOSE_DEBUG */ + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/fs.h> + +#include <linux/delay.h> +#include <linux/kernel.h> +#include <linux/utsname.h> +#include <linux/platform_device.h> + +#include <linux/usb/android_composite.h> +#include <linux/usb/ch9.h> +#include <linux/usb/composite.h> +#include <linux/usb/gadget.h> + +#include "gadget_chips.h" + +/* + * Kbuild is not very cooperative with respect to linking separately + * compiled library objects into one module. So for now we won't use + * separate compilation ... ensuring init/exit sections work to shrink + * the runtime footprint, and giving us at least some parts of what + * a "gcc --combine ... part1.c part2.c part3.c ... " build would. + */ +#include "usbstring.c" +#include "config.c" +#include "epautoconf.c" +#include "composite.c" + +MODULE_AUTHOR("Mike Lockwood"); +MODULE_DESCRIPTION("Android Composite USB Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION("1.0"); + +static const char longname[] = "Gadget Android"; + +/* Default vendor and product IDs, overridden by platform data */ +#define VENDOR_ID 0x18D1 +#define PRODUCT_ID 0x0001 + +struct android_dev { + struct usb_composite_dev *cdev; + struct usb_configuration *config; + int num_products; + struct android_usb_product *products; + int num_functions; + char **functions; + + int product_id; + int version; +}; + +static struct android_dev *_android_dev; + +/* string IDs are assigned dynamically */ + +#define STRING_MANUFACTURER_IDX 0 +#define STRING_PRODUCT_IDX 1 +#define STRING_SERIAL_IDX 2 + +/* String Table */ +static struct usb_string strings_dev[] = { + /* These dummy values should be overridden by platform data */ + [STRING_MANUFACTURER_IDX].s = "Android", + [STRING_PRODUCT_IDX].s = "Android", + [STRING_SERIAL_IDX].s = "0123456789ABCDEF", + { } /* end of list */ +}; + +static struct usb_gadget_strings stringtab_dev = { + .language = 0x0409, /* en-us */ + .strings = strings_dev, +}; + +static struct usb_gadget_strings *dev_strings[] = { + &stringtab_dev, + NULL, +}; + +static struct usb_device_descriptor device_desc = { + .bLength = sizeof(device_desc), + .bDescriptorType = USB_DT_DEVICE, + .bcdUSB = __constant_cpu_to_le16(0x0200), + .bDeviceClass = USB_CLASS_PER_INTERFACE, + .idVendor = __constant_cpu_to_le16(VENDOR_ID), + .idProduct = __constant_cpu_to_le16(PRODUCT_ID), + .bcdDevice = __constant_cpu_to_le16(0xffff), + .bNumConfigurations = 1, +}; + +static struct list_head _functions = LIST_HEAD_INIT(_functions); +static int _registered_function_count = 0; + +static struct android_usb_function *get_function(const char *name) +{ + struct android_usb_function *f; + list_for_each_entry(f, &_functions, list) { + if (!strcmp(name, f->name)) + return f; + } + return 0; +} + +static void bind_functions(struct android_dev *dev) +{ + struct android_usb_function *f; + char **functions = dev->functions; + int i; + + for (i = 0; i < dev->num_functions; i++) { + char *name = *functions++; + f = get_function(name); + if (f) + f->bind_config(dev->config); + else + printk(KERN_ERR "function %s not found in bind_functions\n", name); + } +} + +static int android_bind_config(struct usb_configuration *c) +{ + struct android_dev *dev = _android_dev; + + printk(KERN_DEBUG "android_bind_config\n"); + dev->config = c; + + /* bind our functions if they have all registered */ + if (_registered_function_count == dev->num_functions) + bind_functions(dev); + + return 0; +} + +static int android_setup_config(struct usb_configuration *c, + const struct usb_ctrlrequest *ctrl); + +static struct usb_configuration android_config_driver = { + .label = "android", + .bind = android_bind_config, + .setup = android_setup_config, + .bConfigurationValue = 1, + .bmAttributes = USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER, + .bMaxPower = 0xFA, /* 500ma */ +}; + +static int android_setup_config(struct usb_configuration *c, + const struct usb_ctrlrequest *ctrl) +{ + int i; + int ret = -EOPNOTSUPP; + + for (i = 0; i < android_config_driver.next_interface_id; i++) { + if (android_config_driver.interface[i]->setup) { + ret = android_config_driver.interface[i]->setup( + android_config_driver.interface[i], ctrl); + if (ret >= 0) + return ret; + } + } + return ret; +} + +static int product_has_function(struct android_usb_product *p, + struct usb_function *f) +{ + char **functions = p->functions; + int count = p->num_functions; + const char *name = f->name; + int i; + + for (i = 0; i < count; i++) { + if (!strcmp(name, *functions++)) + return 1; + } + return 0; +} + +static int product_matches_functions(struct android_usb_product *p) +{ + struct usb_function *f; + list_for_each_entry(f, &android_config_driver.functions, list) { + if (product_has_function(p, f) == !!f->disabled) + return 0; + } + return 1; +} + +static int get_product_id(struct android_dev *dev) +{ + struct android_usb_product *p = dev->products; + int count = dev->num_products; + int i; + + if (p) { + for (i = 0; i < count; i++, p++) { + if (product_matches_functions(p)) + return p->product_id; + } + } + /* use default product ID */ + return dev->product_id; +} + +static int android_bind(struct usb_composite_dev *cdev) +{ + struct android_dev *dev = _android_dev; + struct usb_gadget *gadget = cdev->gadget; + int gcnum, id, product_id, ret; + + printk(KERN_INFO "android_bind\n"); + + /* Allocate string descriptor numbers ... note that string + * contents can be overridden by the composite_dev glue. + */ + id = usb_string_id(cdev); + if (id < 0) + return id; + strings_dev[STRING_MANUFACTURER_IDX].id = id; + device_desc.iManufacturer = id; + + id = usb_string_id(cdev); + if (id < 0) + return id; + strings_dev[STRING_PRODUCT_IDX].id = id; + device_desc.iProduct = id; + + id = usb_string_id(cdev); + if (id < 0) + return id; + strings_dev[STRING_SERIAL_IDX].id = id; + device_desc.iSerialNumber = id; + + /* register our configuration */ + ret = usb_add_config(cdev, &android_config_driver); + if (ret) { + printk(KERN_ERR "usb_add_config failed\n"); + return ret; + } + + gcnum = usb_gadget_controller_number(gadget); + if (gcnum >= 0) + device_desc.bcdDevice = cpu_to_le16(0x0200 + gcnum); + else { + /* gadget zero is so simple (for now, no altsettings) that + * it SHOULD NOT have problems with bulk-capable hardware. + * so just warn about unrcognized controllers -- don't panic. + * + * things like configuration and altsetting numbering + * can need hardware-specific attention though. + */ + pr_warning("%s: controller '%s' not recognized\n", + longname, gadget->name); + device_desc.bcdDevice = __constant_cpu_to_le16(0x9999); + } + + usb_gadget_set_selfpowered(gadget); + dev->cdev = cdev; + product_id = get_product_id(dev); + device_desc.idProduct = __constant_cpu_to_le16(product_id); + cdev->desc.idProduct = device_desc.idProduct; + + return 0; +} + +static struct usb_composite_driver android_usb_driver = { + .name = "android_usb", + .dev = &device_desc, + .strings = dev_strings, + .bind = android_bind, + .enable_function = android_enable_function, +}; + +void android_register_function(struct android_usb_function *f) +{ + struct android_dev *dev = _android_dev; + + printk(KERN_INFO "android_register_function %s\n", f->name); + list_add_tail(&f->list, &_functions); + _registered_function_count++; + + /* bind our functions if they have all registered + * and the main driver has bound. + */ + if (dev && dev->config && _registered_function_count == dev->num_functions) + bind_functions(dev); +} + +void android_enable_function(struct usb_function *f, int enable) +{ + struct android_dev *dev = _android_dev; + int disable = !enable; + int product_id; + + if (!!f->disabled != disable) { + usb_function_set_enabled(f, !disable); + +#ifdef CONFIG_USB_ANDROID_RNDIS + if (!strcmp(f->name, "rndis")) { + struct usb_function *func; + + /* We need to specify the COMM class in the device descriptor + * if we are using RNDIS. + */ + if (enable) +#ifdef CONFIG_USB_ANDROID_RNDIS_WCEIS + dev->cdev->desc.bDeviceClass = USB_CLASS_WIRELESS_CONTROLLER; +#else + dev->cdev->desc.bDeviceClass = USB_CLASS_COMM; +#endif + else + dev->cdev->desc.bDeviceClass = USB_CLASS_PER_INTERFACE; + + /* Windows does not support other interfaces when RNDIS is enabled, + * so we disable UMS and MTP when RNDIS is on. + */ + list_for_each_entry(func, &android_config_driver.functions, list) { + if (!strcmp(func->name, "usb_mass_storage") + || !strcmp(func->name, "mtp")) { + usb_function_set_enabled(func, !enable); + } + } + } +#endif + + product_id = get_product_id(dev); + device_desc.idProduct = __constant_cpu_to_le16(product_id); + if (dev->cdev) + dev->cdev->desc.idProduct = device_desc.idProduct; + usb_composite_force_reset(dev->cdev); + } +} + +static int android_probe(struct platform_device *pdev) +{ + struct android_usb_platform_data *pdata = pdev->dev.platform_data; + struct android_dev *dev = _android_dev; + + printk(KERN_INFO "android_probe pdata: %p\n", pdata); + + if (pdata) { + dev->products = pdata->products; + dev->num_products = pdata->num_products; + dev->functions = pdata->functions; + dev->num_functions = pdata->num_functions; + if (pdata->vendor_id) + device_desc.idVendor = + __constant_cpu_to_le16(pdata->vendor_id); + if (pdata->product_id) { + dev->product_id = pdata->product_id; + device_desc.idProduct = + __constant_cpu_to_le16(pdata->product_id); + } + if (pdata->version) + dev->version = pdata->version; + + if (pdata->product_name) + strings_dev[STRING_PRODUCT_IDX].s = pdata->product_name; + if (pdata->manufacturer_name) + strings_dev[STRING_MANUFACTURER_IDX].s = + pdata->manufacturer_name; + if (pdata->serial_number) + strings_dev[STRING_SERIAL_IDX].s = pdata->serial_number; + } + + return usb_composite_register(&android_usb_driver); +} + +static struct platform_driver android_platform_driver = { + .driver = { .name = "android_usb", }, + .probe = android_probe, +}; + +static int __init init(void) +{ + struct android_dev *dev; + + printk(KERN_INFO "android init\n"); + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) + return -ENOMEM; + + /* set default values, which should be overridden by platform data */ + dev->product_id = PRODUCT_ID; + _android_dev = dev; + + return platform_driver_register(&android_platform_driver); +} +module_init(init); + +static void __exit cleanup(void) +{ + usb_composite_unregister(&android_usb_driver); + platform_driver_unregister(&android_platform_driver); + kfree(_android_dev); + _android_dev = NULL; +} +module_exit(cleanup); diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index 1160c55de7f2..840c41e2f33e 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -71,6 +71,59 @@ MODULE_PARM_DESC(iSerialNumber, "SerialNumber string"); /*-------------------------------------------------------------------------*/ +static ssize_t enable_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct usb_function *f = dev_get_drvdata(dev); + return sprintf(buf, "%d\n", !f->disabled); +} + +static ssize_t enable_store( + struct device *dev, struct device_attribute *attr, + const char *buf, size_t size) +{ + struct usb_function *f = dev_get_drvdata(dev); + struct usb_composite_driver *driver = f->config->cdev->driver; + int value; + + sscanf(buf, "%d", &value); + if (driver->enable_function) + driver->enable_function(f, value); + else + usb_function_set_enabled(f, value); + + return size; +} + +static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, enable_show, enable_store); + +void usb_function_set_enabled(struct usb_function *f, int enabled) +{ + f->disabled = !enabled; + kobject_uevent(&f->dev->kobj, KOBJ_CHANGE); +} + + +void usb_composite_force_reset(struct usb_composite_dev *cdev) +{ + unsigned long flags; + + spin_lock_irqsave(&cdev->lock, flags); + /* force reenumeration */ + if (cdev && cdev->gadget && + cdev->gadget->speed != USB_SPEED_UNKNOWN) { + /* avoid sending a disconnect switch event until after we disconnect */ + cdev->mute_switch = 1; + spin_unlock_irqrestore(&cdev->lock, flags); + + usb_gadget_disconnect(cdev->gadget); + msleep(10); + usb_gadget_connect(cdev->gadget); + } else { + spin_unlock_irqrestore(&cdev->lock, flags); + } +} + /** * usb_add_function() - add a function to a configuration * @config: the configuration @@ -88,15 +141,30 @@ MODULE_PARM_DESC(iSerialNumber, "SerialNumber string"); int usb_add_function(struct usb_configuration *config, struct usb_function *function) { + struct usb_composite_dev *cdev = config->cdev; int value = -EINVAL; + int index; - DBG(config->cdev, "adding '%s'/%p to config '%s'/%p\n", + DBG(cdev, "adding '%s'/%p to config '%s'/%p\n", function->name, function, config->label, config); if (!function->set_alt || !function->disable) goto done; + index = atomic_inc_return(&cdev->driver->function_count); + function->dev = device_create(cdev->driver->class, NULL, + MKDEV(0, index), NULL, function->name); + if (IS_ERR(function->dev)) + return PTR_ERR(function->dev); + + value = device_create_file(function->dev, &dev_attr_enable); + if (value < 0) { + device_destroy(cdev->driver->class, MKDEV(0, index)); + return value; + } + dev_set_drvdata(function->dev, function); + function->config = config; list_add_tail(&function->list, &config->functions); @@ -122,7 +190,7 @@ int usb_add_function(struct usb_configuration *config, done: if (value) - DBG(config->cdev, "adding '%s'/%p --> %d\n", + DBG(cdev, "adding '%s'/%p --> %d\n", function->name, function, value); return value; } @@ -232,17 +300,19 @@ static int config_buf(struct usb_configuration *config, enum usb_device_speed speed, void *buf, u8 type) { struct usb_config_descriptor *c = buf; + struct usb_interface_descriptor *intf; void *next = buf + USB_DT_CONFIG_SIZE; int len = USB_BUFSIZ - USB_DT_CONFIG_SIZE; struct usb_function *f; int status; + int interfaceCount = 0; + u8 *dest; /* write the config descriptor */ c = buf; c->bLength = USB_DT_CONFIG_SIZE; c->bDescriptorType = type; - /* wTotalLength is written later */ - c->bNumInterfaces = config->next_interface_id; + /* wTotalLength and bNumInterfaces are written later */ c->bConfigurationValue = config->bConfigurationValue; c->iConfiguration = config->iConfiguration; c->bmAttributes = USB_CONFIG_ATT_ONE | config->bmAttributes; @@ -261,23 +331,40 @@ static int config_buf(struct usb_configuration *config, /* add each function's descriptors */ list_for_each_entry(f, &config->functions, list) { struct usb_descriptor_header **descriptors; + struct usb_descriptor_header *descriptor; if (speed == USB_SPEED_HIGH) descriptors = f->hs_descriptors; else descriptors = f->descriptors; - if (!descriptors) + if (f->disabled || !descriptors || descriptors[0] == NULL) continue; status = usb_descriptor_fillbuf(next, len, (const struct usb_descriptor_header **) descriptors); if (status < 0) return status; + + /* set interface numbers dynamically */ + dest = next; + while ((descriptor = *descriptors++) != NULL) { + intf = (struct usb_interface_descriptor *)dest; + if (intf->bDescriptorType == USB_DT_INTERFACE) { + /* don't increment bInterfaceNumber for alternate settings */ + if (intf->bAlternateSetting == 0) + intf->bInterfaceNumber = interfaceCount++; + else + intf->bInterfaceNumber = interfaceCount - 1; + } + dest += intf->bLength; + } + len -= status; next += status; } len = next - buf; c->wTotalLength = cpu_to_le16(len); + c->bNumInterfaces = interfaceCount; return len; } @@ -424,6 +511,8 @@ static int set_config(struct usb_composite_dev *cdev, if (!f) break; + if (f->disabled) + continue; /* * Record which endpoints are used by the function. This is used @@ -463,6 +552,8 @@ static int set_config(struct usb_composite_dev *cdev, power = c->bMaxPower ? (2 * c->bMaxPower) : CONFIG_USB_GADGET_VBUS_DRAW; done: usb_gadget_vbus_draw(gadget, power); + + schedule_work(&cdev->switch_work); return result; } @@ -822,6 +913,21 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) case USB_DT_STRING: value = get_string(cdev, req->buf, w_index, w_value & 0xff); + + /* Allow functions to handle USB_DT_STRING. + * This is required for MTP. + */ + if (value < 0) { + struct usb_configuration *cfg; + list_for_each_entry(cfg, &cdev->configs, list) { + if (cfg && cfg->setup) { + value = cfg->setup(cfg, ctrl); + if (value >= 0) + break; + } + } + } + if (value >= 0) value = min(w_length, (u16) value); break; @@ -847,11 +953,11 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) case USB_REQ_GET_CONFIGURATION: if (ctrl->bRequestType != USB_DIR_IN) goto unknown; - if (cdev->config) + if (cdev->config) { *(u8 *)req->buf = cdev->config->bConfigurationValue; - else + value = min(w_length, (u16) 1); + } else *(u8 *)req->buf = 0; - value = min(w_length, (u16) 1); break; /* function drivers must handle get/set altsetting; if there's @@ -901,6 +1007,9 @@ unknown: */ switch (ctrl->bRequestType & USB_RECIP_MASK) { case USB_RECIP_INTERFACE: + if (cdev->config == NULL) + return value; + f = cdev->config->interface[intf]; break; @@ -925,6 +1034,25 @@ unknown: value = c->setup(c, ctrl); } + /* If the vendor request is not processed (value < 0), + * call all device registered configure setup callbacks + * to process it. + * This is used to handle the following cases: + * - vendor request is for the device and arrives before + * setconfiguration. + * - Some devices are required to handle vendor request before + * setconfiguration such as MTP, USBNET. + */ + + if (value < 0) { + struct usb_configuration *cfg; + + list_for_each_entry(cfg, &cdev->configs, list) { + if (cfg && cfg->setup) + value = cfg->setup(cfg, ctrl); + } + } + goto done; } @@ -956,8 +1084,14 @@ static void composite_disconnect(struct usb_gadget *gadget) spin_lock_irqsave(&cdev->lock, flags); if (cdev->config) reset_config(cdev); + if (composite->disconnect) composite->disconnect(cdev); + + if (cdev->mute_switch) + cdev->mute_switch = 0; + else + schedule_work(&cdev->switch_work); spin_unlock_irqrestore(&cdev->lock, flags); } @@ -1019,6 +1153,8 @@ composite_unbind(struct usb_gadget *gadget) kfree(cdev->req->buf); usb_ep_free_request(gadget->ep0, cdev->req); } + + switch_dev_unregister(&cdev->sdev); kfree(cdev); set_gadget_data(gadget, NULL); device_remove_file(&gadget->dev, &dev_attr_suspended); @@ -1047,6 +1183,19 @@ string_override(struct usb_gadget_strings **tab, u8 id, const char *s) } } +static void +composite_switch_work(struct work_struct *data) +{ + struct usb_composite_dev *cdev = + container_of(data, struct usb_composite_dev, switch_work); + struct usb_configuration *config = cdev->config; + + if (config) + switch_set_state(&cdev->sdev, config->bConfigurationValue); + else + switch_set_state(&cdev->sdev, 0); +} + static int composite_bind(struct usb_gadget *gadget) { struct usb_composite_dev *cdev; @@ -1098,6 +1247,12 @@ static int composite_bind(struct usb_gadget *gadget) if (status < 0) goto fail; + cdev->sdev.name = "usb_configuration"; + status = switch_dev_register(&cdev->sdev); + if (status < 0) + goto fail; + INIT_WORK(&cdev->switch_work, composite_switch_work); + cdev->desc = *composite->dev; cdev->desc.bMaxPacketSize0 = gadget->ep0->maxpacket; @@ -1172,6 +1327,23 @@ composite_resume(struct usb_gadget *gadget) cdev->suspended = 0; } +static int +composite_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + struct usb_function *f = dev_get_drvdata(dev); + + if (!f) { + /* this happens when the device is first created */ + return 0; + } + + if (add_uevent_var(env, "FUNCTION=%s", f->name)) + return -ENOMEM; + if (add_uevent_var(env, "ENABLED=%d", !f->disabled)) + return -ENOMEM; + return 0; +} + /*-------------------------------------------------------------------------*/ static struct usb_gadget_driver composite_driver = { @@ -1217,6 +1389,11 @@ int usb_composite_register(struct usb_composite_driver *driver) composite_driver.driver.name = driver->name; composite = driver; + driver->class = class_create(THIS_MODULE, "usb_composite"); + if (IS_ERR(driver->class)) + return PTR_ERR(driver->class); + driver->class->dev_uevent = composite_uevent; + return usb_gadget_register_driver(&composite_driver); } diff --git a/drivers/usb/gadget/f_acm.c b/drivers/usb/gadget/f_acm.c index d47a123f15ab..e3e5410d922f 100644 --- a/drivers/usb/gadget/f_acm.c +++ b/drivers/usb/gadget/f_acm.c @@ -17,6 +17,7 @@ #include <linux/slab.h> #include <linux/kernel.h> #include <linux/device.h> +#include <linux/usb/android_composite.h> #include "u_serial.h" #include "gadget_chips.h" @@ -405,10 +406,10 @@ static int acm_set_alt(struct usb_function *f, unsigned intf, unsigned alt) usb_ep_disable(acm->notify); } else { VDBG(cdev, "init acm ctrl interface %d\n", intf); - acm->notify_desc = ep_choose(cdev->gadget, - acm->hs.notify, - acm->fs.notify); } + acm->notify_desc = ep_choose(cdev->gadget, + acm->hs.notify, + acm->fs.notify); usb_ep_enable(acm->notify, acm->notify_desc); acm->notify->driver_data = acm; @@ -418,11 +419,11 @@ static int acm_set_alt(struct usb_function *f, unsigned intf, unsigned alt) gserial_disconnect(&acm->port); } else { DBG(cdev, "activate acm ttyGS%d\n", acm->port_num); - acm->port.in_desc = ep_choose(cdev->gadget, - acm->hs.in, acm->fs.in); - acm->port.out_desc = ep_choose(cdev->gadget, - acm->hs.out, acm->fs.out); } + acm->port.in_desc = ep_choose(cdev->gadget, + acm->hs.in, acm->fs.in); + acm->port.out_desc = ep_choose(cdev->gadget, + acm->hs.out, acm->fs.out); gserial_connect(&acm->port, acm->port_num); } else @@ -782,3 +783,28 @@ int acm_bind_config(struct usb_configuration *c, u8 port_num) kfree(acm); return status; } + +#ifdef CONFIG_USB_ANDROID_ACM + +int acm_function_bind_config(struct usb_configuration *c) +{ + int ret = acm_bind_config(c, 0); + if (ret == 0) + gserial_setup(c->cdev->gadget, 1); + return ret; +} + +static struct android_usb_function acm_function = { + .name = "acm", + .bind_config = acm_function_bind_config, +}; + +static int __init init(void) +{ + printk(KERN_INFO "f_acm init\n"); + android_register_function(&acm_function); + return 0; +} +module_init(init); + +#endif /* CONFIG_USB_ANDROID_ACM */ diff --git a/drivers/usb/gadget/f_adb.c b/drivers/usb/gadget/f_adb.c new file mode 100644 index 000000000000..a0b0774b9556 --- /dev/null +++ b/drivers/usb/gadget/f_adb.c @@ -0,0 +1,655 @@ +/* + * Gadget Driver for Android ADB + * + * Copyright (C) 2008 Google, Inc. + * Author: Mike Lockwood <lockwood@android.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +/* #define DEBUG */ +/* #define VERBOSE_DEBUG */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/poll.h> +#include <linux/delay.h> +#include <linux/wait.h> +#include <linux/err.h> +#include <linux/interrupt.h> + +#include <linux/types.h> +#include <linux/device.h> +#include <linux/miscdevice.h> + +#include <linux/usb/android_composite.h> + +#define BULK_BUFFER_SIZE 4096 + +/* number of tx requests to allocate */ +#define TX_REQ_MAX 4 + +static const char shortname[] = "android_adb"; + +struct adb_dev { + struct usb_function function; + struct usb_composite_dev *cdev; + spinlock_t lock; + + struct usb_ep *ep_in; + struct usb_ep *ep_out; + + int online; + int error; + + atomic_t read_excl; + atomic_t write_excl; + atomic_t open_excl; + + struct list_head tx_idle; + + wait_queue_head_t read_wq; + wait_queue_head_t write_wq; + struct usb_request *rx_req; + int rx_done; +}; + +static struct usb_interface_descriptor adb_interface_desc = { + .bLength = USB_DT_INTERFACE_SIZE, + .bDescriptorType = USB_DT_INTERFACE, + .bInterfaceNumber = 0, + .bNumEndpoints = 2, + .bInterfaceClass = 0xFF, + .bInterfaceSubClass = 0x42, + .bInterfaceProtocol = 1, +}; + +static struct usb_endpoint_descriptor adb_highspeed_in_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = __constant_cpu_to_le16(512), +}; + +static struct usb_endpoint_descriptor adb_highspeed_out_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = __constant_cpu_to_le16(512), +}; + +static struct usb_endpoint_descriptor adb_fullspeed_in_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, +}; + +static struct usb_endpoint_descriptor adb_fullspeed_out_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, +}; + +static struct usb_descriptor_header *fs_adb_descs[] = { + (struct usb_descriptor_header *) &adb_interface_desc, + (struct usb_descriptor_header *) &adb_fullspeed_in_desc, + (struct usb_descriptor_header *) &adb_fullspeed_out_desc, + NULL, +}; + +static struct usb_descriptor_header *hs_adb_descs[] = { + (struct usb_descriptor_header *) &adb_interface_desc, + (struct usb_descriptor_header *) &adb_highspeed_in_desc, + (struct usb_descriptor_header *) &adb_highspeed_out_desc, + NULL, +}; + + +/* temporary variable used between adb_open() and adb_gadget_bind() */ +static struct adb_dev *_adb_dev; + +static atomic_t adb_enable_excl; + +static inline struct adb_dev *func_to_dev(struct usb_function *f) +{ + return container_of(f, struct adb_dev, function); +} + + +static struct usb_request *adb_request_new(struct usb_ep *ep, int buffer_size) +{ + struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL); + if (!req) + return NULL; + + /* now allocate buffers for the requests */ + req->buf = kmalloc(buffer_size, GFP_KERNEL); + if (!req->buf) { + usb_ep_free_request(ep, req); + return NULL; + } + + return req; +} + +static void adb_request_free(struct usb_request *req, struct usb_ep *ep) +{ + if (req) { + kfree(req->buf); + usb_ep_free_request(ep, req); + } +} + +static inline int _lock(atomic_t *excl) +{ + if (atomic_inc_return(excl) == 1) { + return 0; + } else { + atomic_dec(excl); + return -1; + } +} + +static inline void _unlock(atomic_t *excl) +{ + atomic_dec(excl); +} + +/* add a request to the tail of a list */ +void req_put(struct adb_dev *dev, struct list_head *head, + struct usb_request *req) +{ + unsigned long flags; + + spin_lock_irqsave(&dev->lock, flags); + list_add_tail(&req->list, head); + spin_unlock_irqrestore(&dev->lock, flags); +} + +/* remove a request from the head of a list */ +struct usb_request *req_get(struct adb_dev *dev, struct list_head *head) +{ + unsigned long flags; + struct usb_request *req; + + spin_lock_irqsave(&dev->lock, flags); + if (list_empty(head)) { + req = 0; + } else { + req = list_first_entry(head, struct usb_request, list); + list_del(&req->list); + } + spin_unlock_irqrestore(&dev->lock, flags); + return req; +} + +static void adb_complete_in(struct usb_ep *ep, struct usb_request *req) +{ + struct adb_dev *dev = _adb_dev; + + if (req->status != 0) + dev->error = 1; + + req_put(dev, &dev->tx_idle, req); + + wake_up(&dev->write_wq); +} + +static void adb_complete_out(struct usb_ep *ep, struct usb_request *req) +{ + struct adb_dev *dev = _adb_dev; + + dev->rx_done = 1; + if (req->status != 0) + dev->error = 1; + + wake_up(&dev->read_wq); +} + +static int __init create_bulk_endpoints(struct adb_dev *dev, + struct usb_endpoint_descriptor *in_desc, + struct usb_endpoint_descriptor *out_desc) +{ + struct usb_composite_dev *cdev = dev->cdev; + struct usb_request *req; + struct usb_ep *ep; + int i; + + DBG(cdev, "create_bulk_endpoints dev: %p\n", dev); + + ep = usb_ep_autoconfig(cdev->gadget, in_desc); + if (!ep) { + DBG(cdev, "usb_ep_autoconfig for ep_in failed\n"); + return -ENODEV; + } + DBG(cdev, "usb_ep_autoconfig for ep_in got %s\n", ep->name); + ep->driver_data = dev; /* claim the endpoint */ + dev->ep_in = ep; + + ep = usb_ep_autoconfig(cdev->gadget, out_desc); + if (!ep) { + DBG(cdev, "usb_ep_autoconfig for ep_out failed\n"); + return -ENODEV; + } + DBG(cdev, "usb_ep_autoconfig for adb ep_out got %s\n", ep->name); + ep->driver_data = dev; /* claim the endpoint */ + dev->ep_out = ep; + + /* now allocate requests for our endpoints */ + req = adb_request_new(dev->ep_out, BULK_BUFFER_SIZE); + if (!req) + goto fail; + req->complete = adb_complete_out; + dev->rx_req = req; + + for (i = 0; i < TX_REQ_MAX; i++) { + req = adb_request_new(dev->ep_in, BULK_BUFFER_SIZE); + if (!req) + goto fail; + req->complete = adb_complete_in; + req_put(dev, &dev->tx_idle, req); + } + + return 0; + +fail: + printk(KERN_ERR "adb_bind() could not allocate requests\n"); + return -1; +} + +static ssize_t adb_read(struct file *fp, char __user *buf, + size_t count, loff_t *pos) +{ + struct adb_dev *dev = fp->private_data; + struct usb_composite_dev *cdev = dev->cdev; + struct usb_request *req; + int r = count, xfer; + int ret; + + DBG(cdev, "adb_read(%d)\n", count); + + if (count > BULK_BUFFER_SIZE) + return -EINVAL; + + if (_lock(&dev->read_excl)) + return -EBUSY; + + /* we will block until we're online */ + while (!(dev->online || dev->error)) { + DBG(cdev, "adb_read: waiting for online state\n"); + ret = wait_event_interruptible(dev->read_wq, + (dev->online || dev->error)); + if (ret < 0) { + _unlock(&dev->read_excl); + return ret; + } + } + if (dev->error) { + r = -EIO; + goto done; + } + +requeue_req: + /* queue a request */ + req = dev->rx_req; + req->length = count; + dev->rx_done = 0; + ret = usb_ep_queue(dev->ep_out, req, GFP_ATOMIC); + if (ret < 0) { + DBG(cdev, "adb_read: failed to queue req %p (%d)\n", req, ret); + r = -EIO; + dev->error = 1; + goto done; + } else { + DBG(cdev, "rx %p queue\n", req); + } + + /* wait for a request to complete */ + ret = wait_event_interruptible(dev->read_wq, dev->rx_done); + if (ret < 0) { + dev->error = 1; + r = ret; + goto done; + } + if (!dev->error) { + /* If we got a 0-len packet, throw it back and try again. */ + if (req->actual == 0) + goto requeue_req; + + DBG(cdev, "rx %p %d\n", req, req->actual); + xfer = (req->actual < count) ? req->actual : count; + if (copy_to_user(buf, req->buf, xfer)) + r = -EFAULT; + } else + r = -EIO; + +done: + _unlock(&dev->read_excl); + DBG(cdev, "adb_read returning %d\n", r); + return r; +} + +static ssize_t adb_write(struct file *fp, const char __user *buf, + size_t count, loff_t *pos) +{ + struct adb_dev *dev = fp->private_data; + struct usb_composite_dev *cdev = dev->cdev; + struct usb_request *req = 0; + int r = count, xfer; + int ret; + + DBG(cdev, "adb_write(%d)\n", count); + + if (_lock(&dev->write_excl)) + return -EBUSY; + + while (count > 0) { + if (dev->error) { + DBG(cdev, "adb_write dev->error\n"); + r = -EIO; + break; + } + + /* get an idle tx request to use */ + req = 0; + ret = wait_event_interruptible(dev->write_wq, + ((req = req_get(dev, &dev->tx_idle)) || dev->error)); + + if (ret < 0) { + r = ret; + break; + } + + if (req != 0) { + if (count > BULK_BUFFER_SIZE) + xfer = BULK_BUFFER_SIZE; + else + xfer = count; + if (copy_from_user(req->buf, buf, xfer)) { + r = -EFAULT; + break; + } + + req->length = xfer; + ret = usb_ep_queue(dev->ep_in, req, GFP_ATOMIC); + if (ret < 0) { + DBG(cdev, "adb_write: xfer error %d\n", ret); + dev->error = 1; + r = -EIO; + break; + } + + buf += xfer; + count -= xfer; + + /* zero this so we don't try to free it on error exit */ + req = 0; + } + } + + if (req) + req_put(dev, &dev->tx_idle, req); + + _unlock(&dev->write_excl); + DBG(cdev, "adb_write returning %d\n", r); + return r; +} + +static int adb_open(struct inode *ip, struct file *fp) +{ + printk(KERN_INFO "adb_open\n"); + if (_lock(&_adb_dev->open_excl)) + return -EBUSY; + + fp->private_data = _adb_dev; + + /* clear the error latch */ + _adb_dev->error = 0; + + return 0; +} + +static int adb_release(struct inode *ip, struct file *fp) +{ + printk(KERN_INFO "adb_release\n"); + _unlock(&_adb_dev->open_excl); + return 0; +} + +/* file operations for ADB device /dev/android_adb */ +static struct file_operations adb_fops = { + .owner = THIS_MODULE, + .read = adb_read, + .write = adb_write, + .open = adb_open, + .release = adb_release, +}; + +static struct miscdevice adb_device = { + .minor = MISC_DYNAMIC_MINOR, + .name = shortname, + .fops = &adb_fops, +}; + +static int adb_enable_open(struct inode *ip, struct file *fp) +{ + if (atomic_inc_return(&adb_enable_excl) != 1) { + atomic_dec(&adb_enable_excl); + return -EBUSY; + } + + printk(KERN_INFO "enabling adb\n"); + android_enable_function(&_adb_dev->function, 1); + + return 0; +} + +static int adb_enable_release(struct inode *ip, struct file *fp) +{ + printk(KERN_INFO "disabling adb\n"); + android_enable_function(&_adb_dev->function, 0); + atomic_dec(&adb_enable_excl); + return 0; +} + +static const struct file_operations adb_enable_fops = { + .owner = THIS_MODULE, + .open = adb_enable_open, + .release = adb_enable_release, +}; + +static struct miscdevice adb_enable_device = { + .minor = MISC_DYNAMIC_MINOR, + .name = "android_adb_enable", + .fops = &adb_enable_fops, +}; + +static int +adb_function_bind(struct usb_configuration *c, struct usb_function *f) +{ + struct usb_composite_dev *cdev = c->cdev; + struct adb_dev *dev = func_to_dev(f); + int id; + int ret; + + dev->cdev = cdev; + DBG(cdev, "adb_function_bind dev: %p\n", dev); + + /* allocate interface ID(s) */ + id = usb_interface_id(c, f); + if (id < 0) + return id; + adb_interface_desc.bInterfaceNumber = id; + + /* allocate endpoints */ + ret = create_bulk_endpoints(dev, &adb_fullspeed_in_desc, + &adb_fullspeed_out_desc); + if (ret) + return ret; + + /* support high speed hardware */ + if (gadget_is_dualspeed(c->cdev->gadget)) { + adb_highspeed_in_desc.bEndpointAddress = + adb_fullspeed_in_desc.bEndpointAddress; + adb_highspeed_out_desc.bEndpointAddress = + adb_fullspeed_out_desc.bEndpointAddress; + } + + DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n", + gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full", + f->name, dev->ep_in->name, dev->ep_out->name); + return 0; +} + +static void +adb_function_unbind(struct usb_configuration *c, struct usb_function *f) +{ + struct adb_dev *dev = func_to_dev(f); + struct usb_request *req; + + spin_lock_irq(&dev->lock); + + adb_request_free(dev->rx_req, dev->ep_out); + while ((req = req_get(dev, &dev->tx_idle))) + adb_request_free(req, dev->ep_in); + + dev->online = 0; + dev->error = 1; + spin_unlock_irq(&dev->lock); + + misc_deregister(&adb_device); + misc_deregister(&adb_enable_device); + kfree(_adb_dev); + _adb_dev = NULL; +} + +static int adb_function_set_alt(struct usb_function *f, + unsigned intf, unsigned alt) +{ + struct adb_dev *dev = func_to_dev(f); + struct usb_composite_dev *cdev = f->config->cdev; + int ret; + + DBG(cdev, "adb_function_set_alt intf: %d alt: %d\n", intf, alt); + ret = usb_ep_enable(dev->ep_in, + ep_choose(cdev->gadget, + &adb_highspeed_in_desc, + &adb_fullspeed_in_desc)); + if (ret) + return ret; + ret = usb_ep_enable(dev->ep_out, + ep_choose(cdev->gadget, + &adb_highspeed_out_desc, + &adb_fullspeed_out_desc)); + if (ret) { + usb_ep_disable(dev->ep_in); + return ret; + } + dev->online = 1; + + /* readers may be blocked waiting for us to go online */ + wake_up(&dev->read_wq); + return 0; +} + +static void adb_function_disable(struct usb_function *f) +{ + struct adb_dev *dev = func_to_dev(f); + struct usb_composite_dev *cdev = dev->cdev; + + DBG(cdev, "adb_function_disable\n"); + dev->online = 0; + dev->error = 1; + usb_ep_disable(dev->ep_in); + usb_ep_disable(dev->ep_out); + + /* readers may be blocked waiting for us to go online */ + wake_up(&dev->read_wq); + + VDBG(cdev, "%s disabled\n", dev->function.name); +} + +static int adb_bind_config(struct usb_configuration *c) +{ + struct adb_dev *dev; + int ret; + + printk(KERN_INFO "adb_bind_config\n"); + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) + return -ENOMEM; + + spin_lock_init(&dev->lock); + + init_waitqueue_head(&dev->read_wq); + init_waitqueue_head(&dev->write_wq); + + atomic_set(&dev->open_excl, 0); + atomic_set(&dev->read_excl, 0); + atomic_set(&dev->write_excl, 0); + + INIT_LIST_HEAD(&dev->tx_idle); + + dev->cdev = c->cdev; + dev->function.name = "adb"; + dev->function.descriptors = fs_adb_descs; + dev->function.hs_descriptors = hs_adb_descs; + dev->function.bind = adb_function_bind; + dev->function.unbind = adb_function_unbind; + dev->function.set_alt = adb_function_set_alt; + dev->function.disable = adb_function_disable; + + /* start disabled */ + dev->function.disabled = 1; + + /* _adb_dev must be set before calling usb_gadget_register_driver */ + _adb_dev = dev; + + ret = misc_register(&adb_device); + if (ret) + goto err1; + ret = misc_register(&adb_enable_device); + if (ret) + goto err2; + + ret = usb_add_function(c, &dev->function); + if (ret) + goto err3; + + return 0; + +err3: + misc_deregister(&adb_enable_device); +err2: + misc_deregister(&adb_device); +err1: + kfree(dev); + printk(KERN_ERR "adb gadget driver failed to initialize\n"); + return ret; +} + +static struct android_usb_function adb_function = { + .name = "adb", + .bind_config = adb_bind_config, +}; + +static int __init init(void) +{ + printk(KERN_INFO "f_adb init\n"); + android_register_function(&adb_function); + return 0; +} +module_init(init); diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c index 32cce029f65c..9e37e0863143 100644 --- a/drivers/usb/gadget/f_mass_storage.c +++ b/drivers/usb/gadget/f_mass_storage.c @@ -295,7 +295,12 @@ #include "gadget_chips.h" +#ifdef CONFIG_USB_ANDROID_MASS_STORAGE +#include <linux/usb/android_composite.h> +#include <linux/platform_device.h> +#define FUNCTION_NAME "usb_mass_storage" +#endif /*------------------------------------------------------------------------*/ @@ -424,6 +429,10 @@ struct fsg_config { u16 release; char can_stall; + +#ifdef CONFIG_USB_ANDROID_MASS_STORAGE + struct platform_device *pdev; +#endif }; @@ -2746,7 +2755,13 @@ static struct fsg_common *fsg_common_init(struct fsg_common *common, curlun->ro = lcfg->cdrom || lcfg->ro; curlun->removable = lcfg->removable; curlun->dev.release = fsg_lun_release; + +#ifdef CONFIG_USB_ANDROID_MASS_STORAGE + /* use "usb_mass_storage" platform device as parent */ + curlun->dev.parent = &cfg->pdev->dev; +#else curlun->dev.parent = &gadget->dev; +#endif /* curlun->dev.driver = &fsg_driver.driver; XXX */ dev_set_drvdata(&curlun->dev, &common->filesem); dev_set_name(&curlun->dev, @@ -3029,7 +3044,11 @@ static int fsg_bind_config(struct usb_composite_dev *cdev, if (unlikely(!fsg)) return -ENOMEM; +#ifdef CONFIG_USB_ANDROID_MASS_STORAGE + fsg->function.name = FUNCTION_NAME; +#else fsg->function.name = FSG_DRIVER_DESC; +#endif fsg->function.strings = fsg_strings_array; fsg->function.bind = fsg_bind; fsg->function.unbind = fsg_unbind; @@ -3153,3 +3172,63 @@ fsg_common_from_params(struct fsg_common *common, return fsg_common_init(common, cdev, &cfg); } +#ifdef CONFIG_USB_ANDROID_MASS_STORAGE + +static struct fsg_config fsg_cfg; + +static int fsg_probe(struct platform_device *pdev) +{ + struct usb_mass_storage_platform_data *pdata = pdev->dev.platform_data; + int i, nluns; + + printk(KERN_INFO "fsg_probe pdev: %p, pdata: %p\n", pdev, pdata); + if (!pdata) + return -1; + + nluns = pdata->nluns; + if (nluns > FSG_MAX_LUNS) + nluns = FSG_MAX_LUNS; + fsg_cfg.nluns = nluns; + for (i = 0; i < nluns; i++) + fsg_cfg.luns[i].removable = 1; + + fsg_cfg.vendor_name = pdata->vendor; + fsg_cfg.product_name = pdata->product; + fsg_cfg.release = pdata->release; + fsg_cfg.can_stall = 0; + fsg_cfg.pdev = pdev; + + return 0; +} + +static struct platform_driver fsg_platform_driver = { + .driver = { .name = FUNCTION_NAME, }, + .probe = fsg_probe, +}; + +int mass_storage_bind_config(struct usb_configuration *c) +{ + struct fsg_common *common = fsg_common_init(NULL, c->cdev, &fsg_cfg); + if (IS_ERR(common)) + return -1; + return fsg_add(c->cdev, c, common); +} + +static struct android_usb_function mass_storage_function = { + .name = FUNCTION_NAME, + .bind_config = mass_storage_bind_config, +}; + +static int __init init(void) +{ + int rc; + printk(KERN_INFO "f_mass_storage init\n"); + rc = platform_driver_register(&fsg_platform_driver); + if (rc != 0) + return rc; + android_register_function(&mass_storage_function); + return 0; +}module_init(init); + +#endif /* CONFIG_USB_ANDROID_MASS_STORAGE */ + diff --git a/drivers/usb/gadget/f_mtp.c b/drivers/usb/gadget/f_mtp.c new file mode 100644 index 000000000000..2c61a2613475 --- /dev/null +++ b/drivers/usb/gadget/f_mtp.c @@ -0,0 +1,1273 @@ +/* + * Gadget Function Driver for MTP + * + * Copyright (C) 2010 Google, Inc. + * Author: Mike Lockwood <lockwood@android.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +/* #define DEBUG */ +/* #define VERBOSE_DEBUG */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/poll.h> +#include <linux/delay.h> +#include <linux/wait.h> +#include <linux/err.h> +#include <linux/interrupt.h> +#include <linux/kthread.h> +#include <linux/freezer.h> + +#include <linux/types.h> +#include <linux/file.h> +#include <linux/device.h> +#include <linux/miscdevice.h> + +#include <linux/usb.h> +#include <linux/usb_usual.h> +#include <linux/usb/ch9.h> +#include <linux/usb/android_composite.h> +#include <linux/usb/f_mtp.h> + +#define BULK_BUFFER_SIZE 16384 +#define INTR_BUFFER_SIZE 28 + +/* String IDs */ +#define INTERFACE_STRING_INDEX 0 + +/* values for mtp_dev.state */ +#define STATE_OFFLINE 0 /* initial state, disconnected */ +#define STATE_READY 1 /* ready for userspace calls */ +#define STATE_BUSY 2 /* processing userspace calls */ +#define STATE_CANCELED 3 /* transaction canceled by host */ +#define STATE_ERROR 4 /* error from completion routine */ + +/* number of tx and rx requests to allocate */ +#define TX_REQ_MAX 4 +#define RX_REQ_MAX 2 + +/* IO Thread commands */ +#define ANDROID_THREAD_QUIT 1 +#define ANDROID_THREAD_SEND_FILE 2 +#define ANDROID_THREAD_RECEIVE_FILE 3 + +/* ID for Microsoft MTP OS String */ +#define MTP_OS_STRING_ID 0xEE + +/* MTP class reqeusts */ +#define MTP_REQ_CANCEL 0x64 +#define MTP_REQ_GET_EXT_EVENT_DATA 0x65 +#define MTP_REQ_RESET 0x66 +#define MTP_REQ_GET_DEVICE_STATUS 0x67 + +/* constants for device status */ +#define MTP_RESPONSE_OK 0x2001 +#define MTP_RESPONSE_DEVICE_BUSY 0x2019 + +static const char shortname[] = "mtp_usb"; + +struct mtp_dev { + struct usb_function function; + struct usb_composite_dev *cdev; + spinlock_t lock; + + /* appear as MTP or PTP when enumerating */ + int interface_mode; + + struct usb_ep *ep_in; + struct usb_ep *ep_out; + struct usb_ep *ep_intr; + + int state; + + /* synchronize access to our device file */ + atomic_t open_excl; + + struct list_head tx_idle; + + wait_queue_head_t read_wq; + wait_queue_head_t write_wq; + wait_queue_head_t intr_wq; + struct usb_request *rx_req[RX_REQ_MAX]; + struct usb_request *intr_req; + int rx_done; + + /* synchronize access to interrupt endpoint */ + struct mutex intr_mutex; + /* true if interrupt endpoint is busy */ + int intr_busy; + + /* for our file IO thread */ + struct task_struct *thread; + /* current command for IO thread (or zero for none) */ + int thread_command; + struct file *thread_file; + loff_t thread_file_offset; + size_t thread_file_length; + /* used to wait for thread to complete current command */ + struct completion thread_wait; + /* result from current command */ + int thread_result; +}; + +static struct usb_interface_descriptor mtp_interface_desc = { + .bLength = USB_DT_INTERFACE_SIZE, + .bDescriptorType = USB_DT_INTERFACE, + .bInterfaceNumber = 0, + .bNumEndpoints = 3, + .bInterfaceClass = USB_CLASS_VENDOR_SPEC, + .bInterfaceSubClass = USB_SUBCLASS_VENDOR_SPEC, + .bInterfaceProtocol = 0, +}; + +static struct usb_interface_descriptor ptp_interface_desc = { + .bLength = USB_DT_INTERFACE_SIZE, + .bDescriptorType = USB_DT_INTERFACE, + .bInterfaceNumber = 0, + .bNumEndpoints = 3, + .bInterfaceClass = USB_CLASS_STILL_IMAGE, + .bInterfaceSubClass = 1, + .bInterfaceProtocol = 1, +}; + +static struct usb_endpoint_descriptor mtp_highspeed_in_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = __constant_cpu_to_le16(512), +}; + +static struct usb_endpoint_descriptor mtp_highspeed_out_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = __constant_cpu_to_le16(512), +}; + +static struct usb_endpoint_descriptor mtp_fullspeed_in_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, +}; + +static struct usb_endpoint_descriptor mtp_fullspeed_out_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, +}; + +static struct usb_endpoint_descriptor mtp_intr_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_INT, + .wMaxPacketSize = __constant_cpu_to_le16(INTR_BUFFER_SIZE), + .bInterval = 6, +}; + +static struct usb_descriptor_header *fs_mtp_descs[] = { + (struct usb_descriptor_header *) &mtp_interface_desc, + (struct usb_descriptor_header *) &mtp_fullspeed_in_desc, + (struct usb_descriptor_header *) &mtp_fullspeed_out_desc, + (struct usb_descriptor_header *) &mtp_intr_desc, + NULL, +}; + +static struct usb_descriptor_header *hs_mtp_descs[] = { + (struct usb_descriptor_header *) &mtp_interface_desc, + (struct usb_descriptor_header *) &mtp_highspeed_in_desc, + (struct usb_descriptor_header *) &mtp_highspeed_out_desc, + (struct usb_descriptor_header *) &mtp_intr_desc, + NULL, +}; + +static struct usb_descriptor_header *fs_ptp_descs[] = { + (struct usb_descriptor_header *) &ptp_interface_desc, + (struct usb_descriptor_header *) &mtp_fullspeed_in_desc, + (struct usb_descriptor_header *) &mtp_fullspeed_out_desc, + (struct usb_descriptor_header *) &mtp_intr_desc, + NULL, +}; + +static struct usb_descriptor_header *hs_ptp_descs[] = { + (struct usb_descriptor_header *) &ptp_interface_desc, + (struct usb_descriptor_header *) &mtp_highspeed_in_desc, + (struct usb_descriptor_header *) &mtp_highspeed_out_desc, + (struct usb_descriptor_header *) &mtp_intr_desc, + NULL, +}; + +static struct usb_string mtp_string_defs[] = { + /* Naming interface "MTP" so libmtp will recognize us */ + [INTERFACE_STRING_INDEX].s = "MTP", + { }, /* end of list */ +}; + +static struct usb_gadget_strings mtp_string_table = { + .language = 0x0409, /* en-US */ + .strings = mtp_string_defs, +}; + +static struct usb_gadget_strings *mtp_strings[] = { + &mtp_string_table, + NULL, +}; + +/* Microsoft MTP OS String */ +static u8 mtp_os_string[] = { + 18, /* sizeof(mtp_os_string) */ + USB_DT_STRING, + /* Signature field: "MSFT100" */ + 'M', 0, 'S', 0, 'F', 0, 'T', 0, '1', 0, '0', 0, '0', 0, + /* vendor code */ + 1, + /* padding */ + 0 +}; + +/* Microsoft Extended Configuration Descriptor Header Section */ +struct mtp_ext_config_desc_header { + __le32 dwLength; + __u16 bcdVersion; + __le16 wIndex; + __u8 bCount; + __u8 reserved[7]; +}; + +/* Microsoft Extended Configuration Descriptor Function Section */ +struct mtp_ext_config_desc_function { + __u8 bFirstInterfaceNumber; + __u8 bInterfaceCount; + __u8 compatibleID[8]; + __u8 subCompatibleID[8]; + __u8 reserved[6]; +}; + +/* MTP Extended Configuration Descriptor */ +struct { + struct mtp_ext_config_desc_header header; + struct mtp_ext_config_desc_function function; +} mtp_ext_config_desc = { + .header = { + .dwLength = __constant_cpu_to_le32(sizeof(mtp_ext_config_desc)), + .bcdVersion = __constant_cpu_to_le16(0x0100), + .wIndex = __constant_cpu_to_le16(4), + .bCount = __constant_cpu_to_le16(1), + }, + .function = { + .bFirstInterfaceNumber = 0, + .bInterfaceCount = 1, + .compatibleID = { 'M', 'T', 'P' }, + }, +}; + +struct mtp_device_status { + __le16 wLength; + __le16 wCode; +}; + +/* temporary variable used between mtp_open() and mtp_gadget_bind() */ +static struct mtp_dev *_mtp_dev; + +static inline struct mtp_dev *func_to_dev(struct usb_function *f) +{ + return container_of(f, struct mtp_dev, function); +} + +static struct usb_request *mtp_request_new(struct usb_ep *ep, int buffer_size) +{ + struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL); + if (!req) + return NULL; + + /* now allocate buffers for the requests */ + req->buf = kmalloc(buffer_size, GFP_KERNEL); + if (!req->buf) { + usb_ep_free_request(ep, req); + return NULL; + } + + return req; +} + +static void mtp_request_free(struct usb_request *req, struct usb_ep *ep) +{ + if (req) { + kfree(req->buf); + usb_ep_free_request(ep, req); + } +} + +static inline int _lock(atomic_t *excl) +{ + if (atomic_inc_return(excl) == 1) { + return 0; + } else { + atomic_dec(excl); + return -1; + } +} + +static inline void _unlock(atomic_t *excl) +{ + atomic_dec(excl); +} + +/* add a request to the tail of a list */ +static void req_put(struct mtp_dev *dev, struct list_head *head, + struct usb_request *req) +{ + unsigned long flags; + + spin_lock_irqsave(&dev->lock, flags); + list_add_tail(&req->list, head); + spin_unlock_irqrestore(&dev->lock, flags); +} + +/* remove a request from the head of a list */ +static struct usb_request *req_get(struct mtp_dev *dev, struct list_head *head) +{ + unsigned long flags; + struct usb_request *req; + + spin_lock_irqsave(&dev->lock, flags); + if (list_empty(head)) { + req = 0; + } else { + req = list_first_entry(head, struct usb_request, list); + list_del(&req->list); + } + spin_unlock_irqrestore(&dev->lock, flags); + return req; +} + +static void mtp_complete_in(struct usb_ep *ep, struct usb_request *req) +{ + struct mtp_dev *dev = _mtp_dev; + + if (req->status != 0) + dev->state = STATE_ERROR; + + req_put(dev, &dev->tx_idle, req); + + wake_up(&dev->write_wq); +} + +static void mtp_complete_out(struct usb_ep *ep, struct usb_request *req) +{ + struct mtp_dev *dev = _mtp_dev; + + dev->rx_done = 1; + if (req->status != 0) + dev->state = STATE_ERROR; + + wake_up(&dev->read_wq); +} + +static void mtp_complete_intr(struct usb_ep *ep, struct usb_request *req) +{ + struct mtp_dev *dev = _mtp_dev; + + DBG(dev->cdev, "mtp_complete_intr status: %d actual: %d\n", req->status, req->actual); + dev->intr_busy = 0; + if (req->status != 0) + dev->state = STATE_ERROR; + + wake_up(&dev->intr_wq); +} + +static int __init create_bulk_endpoints(struct mtp_dev *dev, + struct usb_endpoint_descriptor *in_desc, + struct usb_endpoint_descriptor *out_desc, + struct usb_endpoint_descriptor *intr_desc) +{ + struct usb_composite_dev *cdev = dev->cdev; + struct usb_request *req; + struct usb_ep *ep; + int i; + + DBG(cdev, "create_bulk_endpoints dev: %p\n", dev); + + ep = usb_ep_autoconfig(cdev->gadget, in_desc); + if (!ep) { + DBG(cdev, "usb_ep_autoconfig for ep_in failed\n"); + return -ENODEV; + } + DBG(cdev, "usb_ep_autoconfig for ep_in got %s\n", ep->name); + ep->driver_data = dev; /* claim the endpoint */ + dev->ep_in = ep; + + ep = usb_ep_autoconfig(cdev->gadget, out_desc); + if (!ep) { + DBG(cdev, "usb_ep_autoconfig for ep_out failed\n"); + return -ENODEV; + } + DBG(cdev, "usb_ep_autoconfig for mtp ep_out got %s\n", ep->name); + ep->driver_data = dev; /* claim the endpoint */ + dev->ep_out = ep; + + ep = usb_ep_autoconfig(cdev->gadget, out_desc); + if (!ep) { + DBG(cdev, "usb_ep_autoconfig for ep_out failed\n"); + return -ENODEV; + } + DBG(cdev, "usb_ep_autoconfig for mtp ep_out got %s\n", ep->name); + ep->driver_data = dev; /* claim the endpoint */ + dev->ep_out = ep; + + ep = usb_ep_autoconfig(cdev->gadget, intr_desc); + if (!ep) { + DBG(cdev, "usb_ep_autoconfig for ep_intr failed\n"); + return -ENODEV; + } + DBG(cdev, "usb_ep_autoconfig for mtp ep_intr got %s\n", ep->name); + ep->driver_data = dev; /* claim the endpoint */ + dev->ep_intr = ep; + + /* now allocate requests for our endpoints */ + for (i = 0; i < TX_REQ_MAX; i++) { + req = mtp_request_new(dev->ep_in, BULK_BUFFER_SIZE); + if (!req) + goto fail; + req->complete = mtp_complete_in; + req_put(dev, &dev->tx_idle, req); + } + for (i = 0; i < RX_REQ_MAX; i++) { + req = mtp_request_new(dev->ep_out, BULK_BUFFER_SIZE); + if (!req) + goto fail; + req->complete = mtp_complete_out; + dev->rx_req[i] = req; + } + req = mtp_request_new(dev->ep_intr, INTR_BUFFER_SIZE); + if (!req) + goto fail; + req->complete = mtp_complete_intr; + dev->intr_req = req; + + return 0; + +fail: + printk(KERN_ERR "mtp_bind() could not allocate requests\n"); + return -1; +} + +static ssize_t mtp_read(struct file *fp, char __user *buf, + size_t count, loff_t *pos) +{ + struct mtp_dev *dev = fp->private_data; + struct usb_composite_dev *cdev = dev->cdev; + struct usb_request *req; + int r = count, xfer; + int ret = 0; + + DBG(cdev, "mtp_read(%d)\n", count); + + if (count > BULK_BUFFER_SIZE) + return -EINVAL; + + /* we will block until we're online */ + DBG(cdev, "mtp_read: waiting for online state\n"); + ret = wait_event_interruptible(dev->read_wq, + dev->state != STATE_OFFLINE); + if (ret < 0) { + r = ret; + goto done; + } + spin_lock_irq(&dev->lock); + if (dev->state == STATE_CANCELED) { + /* report cancelation to userspace */ + dev->state = STATE_READY; + spin_unlock_irq(&dev->lock); + return -ECANCELED; + } + dev->state = STATE_BUSY; + spin_unlock_irq(&dev->lock); + +requeue_req: + /* queue a request */ + req = dev->rx_req[0]; + req->length = count; + dev->rx_done = 0; + ret = usb_ep_queue(dev->ep_out, req, GFP_KERNEL); + if (ret < 0) { + r = -EIO; + goto done; + } else { + DBG(cdev, "rx %p queue\n", req); + } + + /* wait for a request to complete */ + ret = wait_event_interruptible(dev->read_wq, dev->rx_done); + if (ret < 0) { + r = ret; + goto done; + } + if (dev->state == STATE_BUSY) { + /* If we got a 0-len packet, throw it back and try again. */ + if (req->actual == 0) + goto requeue_req; + + DBG(cdev, "rx %p %d\n", req, req->actual); + xfer = (req->actual < count) ? req->actual : count; + r = xfer; + if (copy_to_user(buf, req->buf, xfer)) + r = -EFAULT; + } else + r = -EIO; + +done: + spin_lock_irq(&dev->lock); + if (dev->state == STATE_CANCELED) + r = -ECANCELED; + else if (dev->state != STATE_OFFLINE) + dev->state = STATE_READY; + spin_unlock_irq(&dev->lock); + + DBG(cdev, "mtp_read returning %d\n", r); + return r; +} + +static ssize_t mtp_write(struct file *fp, const char __user *buf, + size_t count, loff_t *pos) +{ + struct mtp_dev *dev = fp->private_data; + struct usb_composite_dev *cdev = dev->cdev; + struct usb_request *req = 0; + int r = count, xfer; + int ret; + + DBG(cdev, "mtp_write(%d)\n", count); + + spin_lock_irq(&dev->lock); + if (dev->state == STATE_CANCELED) { + /* report cancelation to userspace */ + dev->state = STATE_READY; + spin_unlock_irq(&dev->lock); + return -ECANCELED; + } + if (dev->state == STATE_OFFLINE) { + spin_unlock_irq(&dev->lock); + return -ENODEV; + } + dev->state = STATE_BUSY; + spin_unlock_irq(&dev->lock); + + while (count > 0) { + if (dev->state != STATE_BUSY) { + DBG(cdev, "mtp_write dev->error\n"); + r = -EIO; + break; + } + + /* get an idle tx request to use */ + req = 0; + ret = wait_event_interruptible(dev->write_wq, + ((req = req_get(dev, &dev->tx_idle)) + || dev->state != STATE_BUSY)); + if (!req) { + r = ret; + break; + } + + if (count > BULK_BUFFER_SIZE) + xfer = BULK_BUFFER_SIZE; + else + xfer = count; + if (copy_from_user(req->buf, buf, xfer)) { + r = -EFAULT; + break; + } + + req->length = xfer; + ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL); + if (ret < 0) { + DBG(cdev, "mtp_write: xfer error %d\n", ret); + r = -EIO; + break; + } + + buf += xfer; + count -= xfer; + + /* zero this so we don't try to free it on error exit */ + req = 0; + } + + if (req) + req_put(dev, &dev->tx_idle, req); + + spin_lock_irq(&dev->lock); + if (dev->state == STATE_CANCELED) + r = -ECANCELED; + else if (dev->state != STATE_OFFLINE) + dev->state = STATE_READY; + spin_unlock_irq(&dev->lock); + + DBG(cdev, "mtp_write returning %d\n", r); + return r; +} + +static int mtp_send_file(struct mtp_dev *dev, struct file *filp, + loff_t offset, size_t count) +{ + struct usb_composite_dev *cdev = dev->cdev; + struct usb_request *req = 0; + int r = count, xfer, ret; + + DBG(cdev, "mtp_send_file(%lld %d)\n", offset, count); + + while (count > 0) { + /* get an idle tx request to use */ + req = 0; + ret = wait_event_interruptible(dev->write_wq, + (req = req_get(dev, &dev->tx_idle)) + || dev->state != STATE_BUSY); + if (!req) { + r = ret; + break; + } + + if (count > BULK_BUFFER_SIZE) + xfer = BULK_BUFFER_SIZE; + else + xfer = count; + ret = vfs_read(filp, req->buf, xfer, &offset); + if (ret < 0) { + r = ret; + break; + } + xfer = ret; + + req->length = xfer; + ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL); + if (ret < 0) { + DBG(cdev, "mtp_write: xfer error %d\n", ret); + dev->state = STATE_ERROR; + r = -EIO; + break; + } + + count -= xfer; + + /* zero this so we don't try to free it on error exit */ + req = 0; + } + + if (req) + req_put(dev, &dev->tx_idle, req); + + DBG(cdev, "mtp_write returning %d\n", r); + return r; +} + +static int mtp_receive_file(struct mtp_dev *dev, struct file *filp, + loff_t offset, size_t count) +{ + struct usb_composite_dev *cdev = dev->cdev; + struct usb_request *read_req = NULL, *write_req = NULL; + int r = count; + int ret; + int cur_buf = 0; + + DBG(cdev, "mtp_receive_file(%d)\n", count); + + while (count > 0 || write_req) { + if (count > 0) { + /* queue a request */ + read_req = dev->rx_req[cur_buf]; + cur_buf = (cur_buf + 1) % RX_REQ_MAX; + + read_req->length = (count > BULK_BUFFER_SIZE + ? BULK_BUFFER_SIZE : count); + dev->rx_done = 0; + ret = usb_ep_queue(dev->ep_out, read_req, GFP_KERNEL); + if (ret < 0) { + r = -EIO; + dev->state = STATE_ERROR; + break; + } + count -= ret; + } + + if (write_req) { + DBG(cdev, "rx %p %d\n", write_req, write_req->actual); + ret = vfs_write(filp, write_req->buf, write_req->actual, + &offset); + DBG(cdev, "vfs_write %d\n", ret); + if (ret != write_req->actual) { + r = -EIO; + dev->state = STATE_ERROR; + break; + } + write_req = NULL; + } + + if (read_req) { + /* wait for our last read to complete */ + ret = wait_event_interruptible(dev->read_wq, + dev->rx_done || dev->state != STATE_BUSY); + if (ret < 0 || dev->state != STATE_BUSY) { + r = ret; + break; + } + count -= read_req->actual; + write_req = read_req; + read_req = NULL; + } + } + + DBG(cdev, "mtp_read returning %d\n", r); + return r; +} + +/* Kernel thread for handling file IO operations */ +static int mtp_thread(void *data) +{ + struct mtp_dev *dev = (struct mtp_dev *)data; + struct usb_composite_dev *cdev = dev->cdev; + int flags; + + DBG(cdev, "mtp_thread started\n"); + + while (1) { + /* wait for a command */ + while (1) { + try_to_freeze(); + set_current_state(TASK_INTERRUPTIBLE); + if (dev->thread_command != 0) + break; + schedule(); + } + __set_current_state(TASK_RUNNING); + + if (dev->thread_command == ANDROID_THREAD_QUIT) { + DBG(cdev, "ANDROID_THREAD_QUIT\n"); + dev->thread_result = 0; + goto done; + } + + if (dev->thread_command == ANDROID_THREAD_SEND_FILE) + flags = O_RDONLY | O_LARGEFILE; + else + flags = O_WRONLY | O_LARGEFILE | O_CREAT; + + if (dev->thread_command == ANDROID_THREAD_SEND_FILE) { + dev->thread_result = mtp_send_file(dev, + dev->thread_file, + dev->thread_file_offset, + dev->thread_file_length); + } else { + dev->thread_result = mtp_receive_file(dev, + dev->thread_file, + dev->thread_file_offset, + dev->thread_file_length); + } + + if (dev->thread_file) { + fput(dev->thread_file); + dev->thread_file = NULL; + } + dev->thread_command = 0; + complete(&dev->thread_wait); + } + +done: + DBG(cdev, "android_thread done\n"); + complete_and_exit(&dev->thread_wait, 0); +} + +static int mtp_send_event(struct mtp_dev *dev, struct mtp_event *event) +{ + struct usb_request *req; + int ret; + int length = event->length; + + DBG(dev->cdev, "mtp_send_event(%d)\n", event->length); + + if (length < 0 || length > INTR_BUFFER_SIZE) + return -EINVAL; + + mutex_lock(&dev->intr_mutex); + + /* wait for a request to complete */ + ret = wait_event_interruptible(dev->intr_wq, !dev->intr_busy || dev->state == STATE_OFFLINE); + if (ret < 0) + goto done; + if (dev->state == STATE_OFFLINE) { + ret = -ENODEV; + goto done; + } + req = dev->intr_req; + if (copy_from_user(req->buf, (void __user *)event->data, length)) { + ret = -EFAULT; + goto done; + } + req->length = length; + dev->intr_busy = 1; + ret = usb_ep_queue(dev->ep_intr, req, GFP_KERNEL); + if (ret) + dev->intr_busy = 0; + +done: + mutex_unlock(&dev->intr_mutex); + return ret; +} + +static long mtp_ioctl(struct file *fp, unsigned code, unsigned long value) +{ + struct mtp_dev *dev = fp->private_data; + struct file *filp = NULL; + int ret = -EINVAL; + + switch (code) { + case MTP_SEND_FILE: + case MTP_RECEIVE_FILE: + { + struct mtp_file_range mfr; + + spin_lock_irq(&dev->lock); + if (dev->state == STATE_CANCELED) { + /* report cancelation to userspace */ + dev->state = STATE_READY; + spin_unlock_irq(&dev->lock); + return -ECANCELED; + } + if (dev->state == STATE_OFFLINE) { + spin_unlock_irq(&dev->lock); + return -ENODEV; + } + dev->state = STATE_BUSY; + spin_unlock_irq(&dev->lock); + + if (copy_from_user(&mfr, (void __user *)value, sizeof(mfr))) { + ret = -EFAULT; + goto fail; + } + filp = fget(mfr.fd); + if (!filp) { + ret = -EBADF; + goto fail; + } + + dev->thread_file = filp; + dev->thread_file_offset = mfr.offset; + dev->thread_file_length = mfr.length; + + if (code == MTP_SEND_FILE) + dev->thread_command = ANDROID_THREAD_SEND_FILE; + else + dev->thread_command = ANDROID_THREAD_RECEIVE_FILE; + + /* wake up the thread */ + init_completion(&dev->thread_wait); + wake_up_process(dev->thread); + + /* wait for the thread to complete the command */ + wait_for_completion(&dev->thread_wait); + ret = dev->thread_result; + DBG(dev->cdev, "thread returned %d\n", ret); + break; + } + case MTP_SET_INTERFACE_MODE: + if (value == MTP_INTERFACE_MODE_MTP || + value == MTP_INTERFACE_MODE_PTP) { + dev->interface_mode = value; + if (value == MTP_INTERFACE_MODE_PTP) { + dev->function.descriptors = fs_ptp_descs; + dev->function.hs_descriptors = hs_ptp_descs; + } else { + dev->function.descriptors = fs_mtp_descs; + dev->function.hs_descriptors = hs_mtp_descs; + } + ret = 0; + } + break; + case MTP_SEND_EVENT: + { + struct mtp_event event; + /* return here so we don't change dev->state below, + * which would interfere with bulk transfer state. + */ + if (copy_from_user(&event, (void __user *)value, sizeof(event))) + return -EFAULT; + else + return mtp_send_event(dev, &event); + } + } + +fail: + if (filp) + fput(filp); + spin_lock_irq(&dev->lock); + if (dev->state == STATE_CANCELED) + ret = -ECANCELED; + else if (dev->state != STATE_OFFLINE) + dev->state = STATE_READY; + spin_unlock_irq(&dev->lock); + DBG(dev->cdev, "ioctl returning %d\n", ret); + return ret; +} + +static int mtp_open(struct inode *ip, struct file *fp) +{ + printk(KERN_INFO "mtp_open\n"); + if (_lock(&_mtp_dev->open_excl)) + return -EBUSY; + + _mtp_dev->thread = kthread_create(mtp_thread, _mtp_dev, "f_mtp"); + if (IS_ERR(_mtp_dev->thread)) + return -ENOMEM; + + /* clear any error condition */ + if (_mtp_dev->state != STATE_OFFLINE) + _mtp_dev->state = STATE_READY; + + fp->private_data = _mtp_dev; + return 0; +} + +static int mtp_release(struct inode *ip, struct file *fp) +{ + printk(KERN_INFO "mtp_release\n"); + + /* tell the thread to quit */ + if (_mtp_dev->thread) { + _mtp_dev->thread_command = ANDROID_THREAD_QUIT; + init_completion(&_mtp_dev->thread_wait); + wake_up_process(_mtp_dev->thread); + wait_for_completion(&_mtp_dev->thread_wait); + } + + _unlock(&_mtp_dev->open_excl); + return 0; +} + +/* file operations for /dev/mtp_usb */ +static const struct file_operations mtp_fops = { + .owner = THIS_MODULE, + .read = mtp_read, + .write = mtp_write, + .unlocked_ioctl = mtp_ioctl, + .open = mtp_open, + .release = mtp_release, +}; + +static struct miscdevice mtp_device = { + .minor = MISC_DYNAMIC_MINOR, + .name = shortname, + .fops = &mtp_fops, +}; + +static int +mtp_function_bind(struct usb_configuration *c, struct usb_function *f) +{ + struct usb_composite_dev *cdev = c->cdev; + struct mtp_dev *dev = func_to_dev(f); + int id; + int ret; + + dev->cdev = cdev; + DBG(cdev, "mtp_function_bind dev: %p\n", dev); + + /* allocate interface ID(s) */ + id = usb_interface_id(c, f); + if (id < 0) + return id; + mtp_interface_desc.bInterfaceNumber = id; + + /* allocate endpoints */ + ret = create_bulk_endpoints(dev, &mtp_fullspeed_in_desc, + &mtp_fullspeed_out_desc, &mtp_intr_desc); + if (ret) + return ret; + + /* support high speed hardware */ + if (gadget_is_dualspeed(c->cdev->gadget)) { + mtp_highspeed_in_desc.bEndpointAddress = + mtp_fullspeed_in_desc.bEndpointAddress; + mtp_highspeed_out_desc.bEndpointAddress = + mtp_fullspeed_out_desc.bEndpointAddress; + } + + DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n", + gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full", + f->name, dev->ep_in->name, dev->ep_out->name); + return 0; +} + +static void +mtp_function_unbind(struct usb_configuration *c, struct usb_function *f) +{ + struct mtp_dev *dev = func_to_dev(f); + struct usb_request *req; + int i; + + spin_lock_irq(&dev->lock); + while ((req = req_get(dev, &dev->tx_idle))) + mtp_request_free(req, dev->ep_in); + for (i = 0; i < RX_REQ_MAX; i++) + mtp_request_free(dev->rx_req[i], dev->ep_out); + mtp_request_free(dev->intr_req, dev->ep_intr); + dev->state = STATE_OFFLINE; + spin_unlock_irq(&dev->lock); + wake_up(&dev->intr_wq); + + misc_deregister(&mtp_device); + kfree(_mtp_dev); + _mtp_dev = NULL; +} + +static int mtp_function_setup(struct usb_function *f, + const struct usb_ctrlrequest *ctrl) +{ + struct mtp_dev *dev = func_to_dev(f); + struct usb_composite_dev *cdev = dev->cdev; + int value = -EOPNOTSUPP; + u16 w_index = le16_to_cpu(ctrl->wIndex); + u16 w_value = le16_to_cpu(ctrl->wValue); + u16 w_length = le16_to_cpu(ctrl->wLength); + unsigned long flags; + + /* do nothing if we are disabled */ + if (dev->function.disabled) + return value; + + VDBG(cdev, "mtp_function_setup " + "%02x.%02x v%04x i%04x l%u\n", + ctrl->bRequestType, ctrl->bRequest, + w_value, w_index, w_length); + + /* Handle MTP OS string */ + if (dev->interface_mode == MTP_INTERFACE_MODE_MTP + && ctrl->bRequestType == + (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE) + && ctrl->bRequest == USB_REQ_GET_DESCRIPTOR + && (w_value >> 8) == USB_DT_STRING + && (w_value & 0xFF) == MTP_OS_STRING_ID) { + value = (w_length < sizeof(mtp_os_string) + ? w_length : sizeof(mtp_os_string)); + memcpy(cdev->req->buf, mtp_os_string, value); + /* return here since composite.c will send for us */ + return value; + } + if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_VENDOR) { + /* Handle MTP OS descriptor */ + DBG(cdev, "vendor request: %d index: %d value: %d length: %d\n", + ctrl->bRequest, w_index, w_value, w_length); + + if (dev->interface_mode == MTP_INTERFACE_MODE_MTP + && ctrl->bRequest == 1 + && (ctrl->bRequestType & USB_DIR_IN) + && (w_index == 4 || w_index == 5)) { + value = (w_length < sizeof(mtp_ext_config_desc) ? + w_length : sizeof(mtp_ext_config_desc)); + memcpy(cdev->req->buf, &mtp_ext_config_desc, value); + } + } + if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS) { + DBG(cdev, "class request: %d index: %d value: %d length: %d\n", + ctrl->bRequest, w_index, w_value, w_length); + + if (ctrl->bRequest == MTP_REQ_CANCEL && w_index == 0 + && w_value == 0) { + DBG(cdev, "MTP_REQ_CANCEL\n"); + + spin_lock_irqsave(&dev->lock, flags); + if (dev->state == STATE_BUSY) { + dev->state = STATE_CANCELED; + wake_up(&dev->read_wq); + wake_up(&dev->write_wq); + } + spin_unlock_irqrestore(&dev->lock, flags); + + /* We need to queue a request to read the remaining + * bytes, but we don't actually need to look at + * the contents. + */ + value = w_length; + } else if (ctrl->bRequest == MTP_REQ_GET_DEVICE_STATUS + && w_index == 0 && w_value == 0) { + struct mtp_device_status *status = cdev->req->buf; + status->wLength = + __constant_cpu_to_le16(sizeof(*status)); + + DBG(cdev, "MTP_REQ_GET_DEVICE_STATUS\n"); + spin_lock_irqsave(&dev->lock, flags); + /* device status is "busy" until we report + * the cancelation to userspace + */ + if (dev->state == STATE_BUSY + || dev->state == STATE_CANCELED) + status->wCode = + __cpu_to_le16(MTP_RESPONSE_DEVICE_BUSY); + else + status->wCode = + __cpu_to_le16(MTP_RESPONSE_OK); + spin_unlock_irqrestore(&dev->lock, flags); + value = sizeof(*status); + } + } + + /* respond with data transfer or status phase? */ + if (value >= 0) { + int rc; + cdev->req->zero = value < w_length; + cdev->req->length = value; + rc = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC); + if (rc < 0) + ERROR(cdev, "%s setup response queue error\n", __func__); + } + + if (value == -EOPNOTSUPP) + VDBG(cdev, + "unknown class-specific control req " + "%02x.%02x v%04x i%04x l%u\n", + ctrl->bRequestType, ctrl->bRequest, + w_value, w_index, w_length); + return value; +} + +static int mtp_function_set_alt(struct usb_function *f, + unsigned intf, unsigned alt) +{ + struct mtp_dev *dev = func_to_dev(f); + struct usb_composite_dev *cdev = f->config->cdev; + int ret; + + DBG(cdev, "mtp_function_set_alt intf: %d alt: %d\n", intf, alt); + ret = usb_ep_enable(dev->ep_in, + ep_choose(cdev->gadget, + &mtp_highspeed_in_desc, + &mtp_fullspeed_in_desc)); + if (ret) + return ret; + ret = usb_ep_enable(dev->ep_out, + ep_choose(cdev->gadget, + &mtp_highspeed_out_desc, + &mtp_fullspeed_out_desc)); + if (ret) { + usb_ep_disable(dev->ep_in); + return ret; + } + ret = usb_ep_enable(dev->ep_intr, &mtp_intr_desc); + if (ret) { + usb_ep_disable(dev->ep_out); + usb_ep_disable(dev->ep_in); + return ret; + } + dev->state = STATE_READY; + + /* readers may be blocked waiting for us to go online */ + wake_up(&dev->read_wq); + return 0; +} + +static void mtp_function_disable(struct usb_function *f) +{ + struct mtp_dev *dev = func_to_dev(f); + struct usb_composite_dev *cdev = dev->cdev; + + DBG(cdev, "mtp_function_disable\n"); + dev->state = STATE_OFFLINE; + usb_ep_disable(dev->ep_in); + usb_ep_disable(dev->ep_out); + usb_ep_disable(dev->ep_intr); + + /* readers may be blocked waiting for us to go online */ + wake_up(&dev->read_wq); + wake_up(&dev->intr_wq); + + VDBG(cdev, "%s disabled\n", dev->function.name); +} + +static int mtp_bind_config(struct usb_configuration *c) +{ + struct mtp_dev *dev; + int ret; + + printk(KERN_INFO "mtp_bind_config\n"); + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) + return -ENOMEM; + + /* allocate a string ID for our interface */ + if (mtp_string_defs[INTERFACE_STRING_INDEX].id == 0) { + ret = usb_string_id(c->cdev); + if (ret < 0) + return ret; + mtp_string_defs[INTERFACE_STRING_INDEX].id = ret; + mtp_interface_desc.iInterface = ret; + } + + spin_lock_init(&dev->lock); + init_completion(&dev->thread_wait); + init_waitqueue_head(&dev->read_wq); + init_waitqueue_head(&dev->write_wq); + init_waitqueue_head(&dev->intr_wq); + atomic_set(&dev->open_excl, 0); + INIT_LIST_HEAD(&dev->tx_idle); + mutex_init(&dev->intr_mutex); + + dev->cdev = c->cdev; + dev->function.name = "mtp"; + dev->function.strings = mtp_strings, + dev->function.descriptors = fs_mtp_descs; + dev->function.hs_descriptors = hs_mtp_descs; + dev->function.bind = mtp_function_bind; + dev->function.unbind = mtp_function_unbind; + dev->function.setup = mtp_function_setup; + dev->function.set_alt = mtp_function_set_alt; + dev->function.disable = mtp_function_disable; + + /* MTP mode by default */ + dev->interface_mode = MTP_INTERFACE_MODE_MTP; + + /* _mtp_dev must be set before calling usb_gadget_register_driver */ + _mtp_dev = dev; + + ret = misc_register(&mtp_device); + if (ret) + goto err1; + + ret = usb_add_function(c, &dev->function); + if (ret) + goto err2; + + return 0; + +err2: + misc_deregister(&mtp_device); +err1: + kfree(dev); + printk(KERN_ERR "mtp gadget driver failed to initialize\n"); + return ret; +} + +static struct android_usb_function mtp_function = { + .name = "mtp", + .bind_config = mtp_bind_config, +}; + +static int __init init(void) +{ + printk(KERN_INFO "f_mtp init\n"); + android_register_function(&mtp_function); + return 0; +} +module_init(init); diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c index 882484a40398..2d9d97ecce2b 100644 --- a/drivers/usb/gadget/f_rndis.c +++ b/drivers/usb/gadget/f_rndis.c @@ -26,8 +26,9 @@ #include <linux/slab.h> #include <linux/kernel.h> -#include <linux/device.h> +#include <linux/platform_device.h> #include <linux/etherdevice.h> +#include <linux/usb/android_composite.h> #include <asm/atomic.h> @@ -129,9 +130,16 @@ static struct usb_interface_descriptor rndis_control_intf = { /* .bInterfaceNumber = DYNAMIC */ /* status endpoint is optional; this could be patched later */ .bNumEndpoints = 1, +#ifdef CONFIG_USB_ANDROID_RNDIS_WCEIS + /* "Wireless" RNDIS; auto-detected by Windows */ + .bInterfaceClass = USB_CLASS_WIRELESS_CONTROLLER, + .bInterfaceSubClass = 1, + .bInterfaceProtocol = 3, +#else .bInterfaceClass = USB_CLASS_COMM, .bInterfaceSubClass = USB_CDC_SUBCLASS_ACM, .bInterfaceProtocol = USB_CDC_ACM_PROTO_VENDOR, +#endif /* .iInterface = DYNAMIC */ }; @@ -304,6 +312,10 @@ static struct usb_gadget_strings *rndis_strings[] = { NULL, }; +#ifdef CONFIG_USB_ANDROID_RNDIS +static struct usb_ether_platform_data *rndis_pdata; +#endif + /*-------------------------------------------------------------------------*/ static struct sk_buff *rndis_add_header(struct gether *port, @@ -487,10 +499,10 @@ static int rndis_set_alt(struct usb_function *f, unsigned intf, unsigned alt) usb_ep_disable(rndis->notify); } else { VDBG(cdev, "init rndis ctrl %d\n", intf); - rndis->notify_desc = ep_choose(cdev->gadget, - rndis->hs.notify, - rndis->fs.notify); } + rndis->notify_desc = ep_choose(cdev->gadget, + rndis->hs.notify, + rndis->fs.notify); usb_ep_enable(rndis->notify, rndis->notify_desc); rndis->notify->driver_data = rndis; @@ -504,11 +516,11 @@ static int rndis_set_alt(struct usb_function *f, unsigned intf, unsigned alt) if (!rndis->port.in) { DBG(cdev, "init rndis\n"); - rndis->port.in = ep_choose(cdev->gadget, - rndis->hs.in, rndis->fs.in); - rndis->port.out = ep_choose(cdev->gadget, - rndis->hs.out, rndis->fs.out); } + rndis->port.in = ep_choose(cdev->gadget, + rndis->hs.in, rndis->fs.in); + rndis->port.out = ep_choose(cdev->gadget, + rndis->hs.out, rndis->fs.out); /* Avoid ZLPs; they can be troublesome. */ rndis->port.is_zlp_ok = false; @@ -707,11 +719,12 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f) rndis_set_param_medium(rndis->config, NDIS_MEDIUM_802_3, 0); rndis_set_host_mac(rndis->config, rndis->ethaddr); -#if 0 -// FIXME - if (rndis_set_param_vendor(rndis->config, vendorID, - manufacturer)) - goto fail0; +#ifdef CONFIG_USB_ANDROID_RNDIS + if (rndis_pdata) { + if (rndis_set_param_vendor(rndis->config, rndis_pdata->vendorID, + rndis_pdata->vendorDescr)) + goto fail; + } #endif /* NOTE: all that is done without knowing or caring about @@ -850,6 +863,11 @@ rndis_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN]) rndis->port.func.setup = rndis_setup; rndis->port.func.disable = rndis_disable; +#ifdef CONFIG_USB_ANDROID_RNDIS + /* start disabled */ + rndis->port.func.disabled = 1; +#endif + status = usb_add_function(c, &rndis->port.func); if (status) { kfree(rndis); @@ -858,3 +876,54 @@ fail: } return status; } + +#ifdef CONFIG_USB_ANDROID_RNDIS +#include "rndis.c" + +static int rndis_probe(struct platform_device *pdev) +{ + rndis_pdata = pdev->dev.platform_data; + return 0; +} + +static struct platform_driver rndis_platform_driver = { + .driver = { .name = "rndis", }, + .probe = rndis_probe, +}; + +int rndis_function_bind_config(struct usb_configuration *c) +{ + int ret; + + if (!rndis_pdata) { + printk(KERN_ERR "rndis_pdata null in rndis_function_bind_config\n"); + return -1; + } + + printk(KERN_INFO + "rndis_function_bind_config MAC: %02X:%02X:%02X:%02X:%02X:%02X\n", + rndis_pdata->ethaddr[0], rndis_pdata->ethaddr[1], + rndis_pdata->ethaddr[2], rndis_pdata->ethaddr[3], + rndis_pdata->ethaddr[4], rndis_pdata->ethaddr[5]); + + ret = gether_setup(c->cdev->gadget, rndis_pdata->ethaddr); + if (ret == 0) + ret = rndis_bind_config(c, rndis_pdata->ethaddr); + return ret; +} + +static struct android_usb_function rndis_function = { + .name = "rndis", + .bind_config = rndis_function_bind_config, +}; + +static int __init init(void) +{ + printk(KERN_INFO "f_rndis init\n"); + platform_driver_register(&rndis_platform_driver); + android_register_function(&rndis_function); + return 0; +} +module_init(init); + +#endif /* CONFIG_USB_ANDROID_RNDIS */ diff --git a/drivers/usb/gadget/storage_common.c b/drivers/usb/gadget/storage_common.c index 484acfb1a7c5..2771166ac69a 100644 --- a/drivers/usb/gadget/storage_common.c +++ b/drivers/usb/gadget/storage_common.c @@ -805,10 +805,16 @@ static ssize_t fsg_store_file(struct device *dev, struct device_attribute *attr, struct rw_semaphore *filesem = dev_get_drvdata(dev); int rc = 0; + +#ifndef CONFIG_USB_ANDROID_MASS_STORAGE + /* disabled in android because we need to allow closing the backing file + * if the media was removed + */ if (curlun->prevent_medium_removal && fsg_lun_is_open(curlun)) { LDBG(curlun, "eject attempt prevented\n"); return -EBUSY; /* "Door is locked" */ } +#endif /* Remove a trailing newline */ if (count > 0 && buf[count-1] == '\n') diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c index 6bb876d65252..efdc8817f77d 100644 --- a/drivers/usb/gadget/u_ether.c +++ b/drivers/usb/gadget/u_ether.c @@ -936,7 +936,6 @@ void gether_disconnect(struct gether *link) struct eth_dev *dev = link->ioport; struct usb_request *req; - WARN_ON(!dev); if (!dev) return; diff --git a/drivers/usb/gadget/u_ether.h b/drivers/usb/gadget/u_ether.h index 3c8c0c9f9d72..99e4aa3b1374 100644 --- a/drivers/usb/gadget/u_ether.h +++ b/drivers/usb/gadget/u_ether.h @@ -105,7 +105,7 @@ int geth_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN]); int ecm_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN]); int eem_bind_config(struct usb_configuration *c); -#ifdef USB_ETH_RNDIS +#if defined(USB_ETH_RNDIS) || defined(CONFIG_USB_ANDROID_RNDIS) int rndis_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN]); diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c index 2839e281cd65..b7b5014ff714 100644 --- a/drivers/w1/w1.c +++ b/drivers/w1/w1.c @@ -517,10 +517,10 @@ static W1_MASTER_ATTR_RO(max_slave_count, S_IRUGO); static W1_MASTER_ATTR_RO(attempts, S_IRUGO); static W1_MASTER_ATTR_RO(timeout, S_IRUGO); static W1_MASTER_ATTR_RO(pointer, S_IRUGO); -static W1_MASTER_ATTR_RW(search, S_IRUGO | S_IWUGO); -static W1_MASTER_ATTR_RW(pullup, S_IRUGO | S_IWUGO); -static W1_MASTER_ATTR_RW(add, S_IRUGO | S_IWUGO); -static W1_MASTER_ATTR_RW(remove, S_IRUGO | S_IWUGO); +static W1_MASTER_ATTR_RW(search, S_IRUGO | S_IWUSR | S_IWGRP); +static W1_MASTER_ATTR_RW(pullup, S_IRUGO | S_IWUSR | S_IWGRP); +static W1_MASTER_ATTR_RW(add, S_IRUGO | S_IWUSR | S_IWGRP); +static W1_MASTER_ATTR_RW(remove, S_IRUGO | S_IWUSR | S_IWGRP); static struct attribute *w1_master_default_attrs[] = { &w1_master_attribute_name.attr, diff --git a/fs/Kconfig b/fs/Kconfig index 3d185308ec88..0082f66128e9 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -174,6 +174,10 @@ source "fs/hfsplus/Kconfig" source "fs/befs/Kconfig" source "fs/bfs/Kconfig" source "fs/efs/Kconfig" + +# Patched by YAFFS +source "fs/yaffs2/Kconfig" + source "fs/jffs2/Kconfig" # UBIFS File system configuration source "fs/ubifs/Kconfig" diff --git a/fs/Makefile b/fs/Makefile index e6ec1d309b1d..e87294884144 100644 --- a/fs/Makefile +++ b/fs/Makefile @@ -126,3 +126,6 @@ obj-$(CONFIG_BTRFS_FS) += btrfs/ obj-$(CONFIG_GFS2_FS) += gfs2/ obj-$(CONFIG_EXOFS_FS) += exofs/ obj-$(CONFIG_CEPH_FS) += ceph/ + +# Patched by YAFFS +obj-$(CONFIG_YAFFS_FS) += yaffs2/ diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 3817149919cb..c6341501f30c 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -77,9 +77,6 @@ /* Maximum number of nesting allowed inside epoll sets */ #define EP_MAX_NESTS 4 -/* Maximum msec timeout value storeable in a long int */ -#define EP_MAX_MSTIMEO min(1000ULL * MAX_SCHEDULE_TIMEOUT / HZ, (LONG_MAX - 999ULL) / HZ) - #define EP_MAX_EVENTS (INT_MAX / sizeof(struct epoll_event)) #define EP_UNACTIVE_PTR ((void *) -1L) @@ -1116,18 +1113,22 @@ static int ep_send_events(struct eventpoll *ep, static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, int maxevents, long timeout) { - int res, eavail; + int res, eavail, timed_out = 0; unsigned long flags; - long jtimeout; + long slack; wait_queue_t wait; - - /* - * Calculate the timeout by checking for the "infinite" value (-1) - * and the overflow condition. The passed timeout is in milliseconds, - * that why (t * HZ) / 1000. - */ - jtimeout = (timeout < 0 || timeout >= EP_MAX_MSTIMEO) ? - MAX_SCHEDULE_TIMEOUT : (timeout * HZ + 999) / 1000; + struct timespec end_time; + ktime_t expires, *to = NULL; + + if (timeout > 0) { + ktime_get_ts(&end_time); + timespec_add_ns(&end_time, (u64)timeout * NSEC_PER_MSEC); + slack = select_estimate_accuracy(&end_time); + to = &expires; + *to = timespec_to_ktime(end_time); + } else if (timeout == 0) { + timed_out = 1; + } retry: spin_lock_irqsave(&ep->lock, flags); @@ -1149,7 +1150,7 @@ retry: * to TASK_INTERRUPTIBLE before doing the checks. */ set_current_state(TASK_INTERRUPTIBLE); - if (!list_empty(&ep->rdllist) || !jtimeout) + if (!list_empty(&ep->rdllist) || timed_out) break; if (signal_pending(current)) { res = -EINTR; @@ -1157,7 +1158,9 @@ retry: } spin_unlock_irqrestore(&ep->lock, flags); - jtimeout = schedule_timeout(jtimeout); + if (!schedule_hrtimeout_range(to, slack, HRTIMER_MODE_ABS)) + timed_out = 1; + spin_lock_irqsave(&ep->lock, flags); } __remove_wait_queue(&ep->wq, &wait); @@ -1175,7 +1178,7 @@ retry: * more luck. */ if (!res && eavail && - !(res = ep_send_events(ep, events, maxevents)) && jtimeout) + !(res = ep_send_events(ep, events, maxevents)) && !timed_out) goto retry; return res; diff --git a/fs/fat/dir.c b/fs/fat/dir.c index ee42b9e0b16a..2242ac239866 100644 --- a/fs/fat/dir.c +++ b/fs/fat/dir.c @@ -753,6 +753,13 @@ static int fat_ioctl_readdir(struct inode *inode, struct file *filp, return ret; } +static int fat_ioctl_volume_id(struct inode *dir) +{ + struct super_block *sb = dir->i_sb; + struct msdos_sb_info *sbi = MSDOS_SB(sb); + return sbi->vol_id; +} + static long fat_dir_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { @@ -769,6 +776,8 @@ static long fat_dir_ioctl(struct file *filp, unsigned int cmd, short_only = 0; both = 1; break; + case VFAT_IOCTL_GET_VOLUME_ID: + return fat_ioctl_volume_id(inode); default: return fat_generic_ioctl(filp, cmd, arg); } diff --git a/fs/fat/fat.h b/fs/fat/fat.h index d75a77f85c28..91cfdda6fb7b 100644 --- a/fs/fat/fat.h +++ b/fs/fat/fat.h @@ -78,6 +78,7 @@ struct msdos_sb_info { const void *dir_ops; /* Opaque; default directory operations */ int dir_per_block; /* dir entries per block */ int dir_per_block_bits; /* log2(dir_per_block) */ + unsigned long vol_id; /* volume ID */ int fatent_shift; struct fatent_operations *fatent_ops; diff --git a/fs/fat/inode.c b/fs/fat/inode.c index 830058057d33..ab81a7e31157 100644 --- a/fs/fat/inode.c +++ b/fs/fat/inode.c @@ -1247,6 +1247,7 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, struct inode *root_inode = NULL, *fat_inode = NULL; struct buffer_head *bh; struct fat_boot_sector *b; + struct fat_boot_bsx *bsx; struct msdos_sb_info *sbi; u16 logical_sector_size; u32 total_sectors, total_clusters, fat_clusters, rootdir_sectors; @@ -1391,6 +1392,8 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, goto out_fail; } + bsx = (struct fat_boot_bsx *)(bh->b_data + FAT32_BSX_OFFSET); + fsinfo = (struct fat_boot_fsinfo *)fsinfo_bh->b_data; if (!IS_FSINFO(fsinfo)) { printk(KERN_WARNING "FAT: Invalid FSINFO signature: " @@ -1406,8 +1409,14 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, } brelse(fsinfo_bh); + } else { + bsx = (struct fat_boot_bsx *)(bh->b_data + FAT16_BSX_OFFSET); } + /* interpret volume ID as a little endian 32 bit integer */ + sbi->vol_id = (((u32)bsx->vol_id[0]) | ((u32)bsx->vol_id[1] << 8) | + ((u32)bsx->vol_id[2] << 16) | ((u32)bsx->vol_id[3] << 24)); + sbi->dir_per_block = sb->s_blocksize / sizeof(struct msdos_dir_entry); sbi->dir_per_block_bits = ffs(sbi->dir_per_block) - 1; diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 5581122bd2c0..72f4d0babc64 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -951,7 +951,7 @@ void __mark_inode_dirty(struct inode *inode, int flags) if ((inode->i_state & flags) == flags) return; - if (unlikely(block_dump)) + if (unlikely(block_dump > 1)) block_dump___mark_inode_dirty(inode); spin_lock(&inode_lock); diff --git a/fs/partitions/Kconfig b/fs/partitions/Kconfig index cb5f0a3f1b03..6115cde646fa 100644 --- a/fs/partitions/Kconfig +++ b/fs/partitions/Kconfig @@ -249,3 +249,9 @@ config SYSV68_PARTITION partition table format used by Motorola Delta machines (using sysv68). Otherwise, say N. + +config CMDLINE_PARTITION + bool "Kernel command line partition table support" if PARTITION_ADVANCED + help + Say Y here if you would like to pass the partition table for a device + on the kernel command line. diff --git a/fs/partitions/Makefile b/fs/partitions/Makefile index 03af8eac51da..11e3a899559b 100644 --- a/fs/partitions/Makefile +++ b/fs/partitions/Makefile @@ -18,3 +18,4 @@ obj-$(CONFIG_IBM_PARTITION) += ibm.o obj-$(CONFIG_EFI_PARTITION) += efi.o obj-$(CONFIG_KARMA_PARTITION) += karma.o obj-$(CONFIG_SYSV68_PARTITION) += sysv68.o +obj-$(CONFIG_CMDLINE_PARTITION) += cmdline.o diff --git a/fs/partitions/check.c b/fs/partitions/check.c index 79fbf3f390f0..6ffb1ac7de0c 100644 --- a/fs/partitions/check.c +++ b/fs/partitions/check.c @@ -38,6 +38,7 @@ #include "efi.h" #include "karma.h" #include "sysv68.h" +#include "cmdline.h" #ifdef CONFIG_BLK_DEV_MD extern void md_autodetect_dev(dev_t dev); @@ -46,6 +47,9 @@ extern void md_autodetect_dev(dev_t dev); int warn_no_part = 1; /*This is ugly: should make genhd removable media aware*/ static int (*check_part[])(struct parsed_partitions *) = { +#ifdef CONFIG_CMDLINE_PARTITION + cmdline_partition, +#endif /* * Probe partition formats with tables at disk address 0 * that also have an ADFS boot block at 0xdc0. @@ -286,6 +290,13 @@ ssize_t part_inflight_show(struct device *dev, return sprintf(buf, "%8u %8u\n", p->in_flight[0], p->in_flight[1]); } +ssize_t part_partition_name_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hd_struct *p = dev_to_part(dev); + return sprintf(buf, "%s\n", p->partition_name); +} + #ifdef CONFIG_FAIL_MAKE_REQUEST ssize_t part_fail_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -317,6 +328,8 @@ static DEVICE_ATTR(discard_alignment, S_IRUGO, part_discard_alignment_show, NULL); static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL); static DEVICE_ATTR(inflight, S_IRUGO, part_inflight_show, NULL); +static DEVICE_ATTR(partition_name, S_IRUGO, part_partition_name_show, NULL); + #ifdef CONFIG_FAIL_MAKE_REQUEST static struct device_attribute dev_attr_fail = __ATTR(make-it-fail, S_IRUGO|S_IWUSR, part_fail_show, part_fail_store); @@ -330,6 +343,7 @@ static struct attribute *part_attrs[] = { &dev_attr_discard_alignment.attr, &dev_attr_stat.attr, &dev_attr_inflight.attr, + &dev_attr_partition_name.attr, #ifdef CONFIG_FAIL_MAKE_REQUEST &dev_attr_fail.attr, #endif @@ -355,10 +369,21 @@ static void part_release(struct device *dev) kfree(p); } +static int part_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + struct hd_struct *part = dev_to_part(dev); + + add_uevent_var(env, "PARTN=%u", part->partno); + if (part->partition_name) + add_uevent_var(env, "PARTNAME=%s", part->partition_name); + return 0; +} + struct device_type part_type = { .name = "partition", .groups = part_attr_groups, .release = part_release, + .uevent = part_uevent, }; static void delete_partition_rcu_cb(struct rcu_head *head) @@ -400,6 +425,11 @@ static ssize_t whole_disk_show(struct device *dev, static DEVICE_ATTR(whole_disk, S_IRUSR | S_IRGRP | S_IROTH, whole_disk_show, NULL); +static void name_partition(struct hd_struct *p, const char *name) +{ + strlcpy(p->partition_name, name, GENHD_PART_NAME_SIZE); +} + struct hd_struct *add_partition(struct gendisk *disk, int partno, sector_t start, sector_t len, int flags) { @@ -682,6 +712,7 @@ rescan: disk->disk_name, p, -PTR_ERR(part)); continue; } + name_partition(part, state->parts[p].name); #ifdef CONFIG_BLK_DEV_MD if (state->parts[p].flags & ADDPART_FLAG_RAID) md_autodetect_dev(part_to_dev(part)->devt); diff --git a/fs/partitions/check.h b/fs/partitions/check.h index 8e4e103ba216..5ef9da15197b 100644 --- a/fs/partitions/check.h +++ b/fs/partitions/check.h @@ -1,6 +1,8 @@ #include <linux/pagemap.h> #include <linux/blkdev.h> +#define PART_NAME_SIZE 128 + /* * add_gd_partition adds a partitions details to the devices partition * description. @@ -12,6 +14,7 @@ struct parsed_partitions { sector_t from; sector_t size; int flags; + char name[PART_NAME_SIZE]; } parts[DISK_MAX_PARTS]; int next; int limit; @@ -30,7 +33,8 @@ static inline void *read_part_sector(struct parsed_partitions *state, } static inline void -put_partition(struct parsed_partitions *p, int n, sector_t from, sector_t size) +put_named_partition(struct parsed_partitions *p, int n, sector_t from, + sector_t size, const char *name, size_t name_size) { if (n < p->limit) { char tmp[1 + BDEVNAME_SIZE + 10 + 1]; @@ -39,8 +43,22 @@ put_partition(struct parsed_partitions *p, int n, sector_t from, sector_t size) p->parts[n].size = size; snprintf(tmp, sizeof(tmp), " %s%d", p->name, n); strlcat(p->pp_buf, tmp, PAGE_SIZE); + if (name) { + if (name_size > PART_NAME_SIZE - 1) + name_size = PART_NAME_SIZE - 1; + memcpy(p->parts[n].name, name, name_size); + p->parts[n].name[name_size] = 0; + snprintf(tmp, sizeof(tmp), " (%s)", p->parts[n].name); + strlcat(p->pp_buf, tmp, PAGE_SIZE); + } } } +static inline void +put_partition(struct parsed_partitions *p, int n, sector_t from, sector_t size) +{ + put_named_partition(p, n, from, size, NULL, 0); +} + extern int warn_no_part; diff --git a/fs/partitions/cmdline.c b/fs/partitions/cmdline.c new file mode 100644 index 000000000000..7b3a8407e385 --- /dev/null +++ b/fs/partitions/cmdline.c @@ -0,0 +1,194 @@ +/* + * fs/partitions/cmdline.c + * + * Copyright (C) 2010 Google, Inc. + * Author: Colin Cross <ccross@android.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +/*#define DEBUG 1*/ + +#include <linux/init.h> +#include <linux/fs.h> +#include <linux/blkdev.h> +#include <linux/slab.h> + +#include "check.h" +#include "cmdline.h" + +static char *cmdline; +static int cmdline_parsed; +static struct part_device *cmdline_device; + +struct part { + char *name; + unsigned long from; + unsigned long size; + unsigned long sector_size; + struct part *next_part; +}; + +struct part_device { + char *name; + struct part *first_part; + struct part_device *next_device; +}; + + +/* Passed a string like: + * system:3600:10000:800 + */ +static struct part *parse_partition(char *s, int alloc_size, void **alloc) +{ + char *p; + struct part *this_part; + pr_debug("%s: '%s', %d, %p\n", __func__, s, alloc_size, alloc); + + if (*alloc == NULL) + *alloc = kzalloc(alloc_size, GFP_KERNEL); + + this_part = *alloc; + *alloc += sizeof(*this_part); + + /* Name */ + p = strchr(s, ':'); + if (!p) + return this_part; + *p = 0; + this_part->name = s; + + /* From */ + s = p+1; + p = strchr(s, ':'); + if (!p) + return this_part; + *p = 0; + this_part->from = simple_strtoul(s, NULL, 16); + + /* Size */ + s = p+1; + p = strchr(s, ':'); + if (!p) + return this_part; + *p = 0; + this_part->size = simple_strtoul(s, NULL, 16); + + /* Sector size */ + s = p+1; + this_part->sector_size = simple_strtoul(s, NULL, 16); + pr_debug("%s: Found %s %lu %lu %lu\n", __func__, this_part->name, + this_part->from, this_part->size, this_part->sector_size); + return this_part; +} + +/* Passed a string like: + * system:3600:10000:800,cache:13600:4000:800,userdata:17600:80000:800 + * Could be an empty string + */ +static struct part *parse_partition_list(char *s, int alloc_size, void **alloc) +{ + char *p; + struct part *this_part; + struct part *next_part = NULL; + pr_debug("%s: '%s', %d, %p\n", __func__, s, alloc_size, alloc); + + alloc_size += sizeof(struct part); + p = strchr(s, ','); + if (p) { + *p = 0; + next_part = parse_partition_list(p+1, alloc_size, alloc); + if (!next_part) + BUG(); + } + this_part = parse_partition(s, alloc_size, alloc); + this_part->next_part = next_part; + return this_part; +} +/* Passed a string like: + * sdhci.0=system:3600:10000:800,cache:13600:4000:800,userdata:17600:80000:800 + */ +static struct part_device *parse_device(char *s, int alloc_size, void **alloc) { + char *p; + struct part *part; + struct part_device *device; + + pr_debug("%s: '%s', %d, %p\n", __func__, s, alloc_size, alloc); + p = strchr(s, '='); + if (!p) + return NULL; + *p = 0; + alloc_size += sizeof(struct part_device); + part = parse_partition_list(p+1, alloc_size, alloc); + if (part) { + device = *alloc; + *alloc += sizeof(struct part_device); + device->name = s; + device->first_part = part; + } + return device; +} + + +static void parse_cmdline(void) { + char *s = cmdline; + void *alloc = 0; + if (cmdline_parsed) + return; + cmdline_device = parse_device(s, 0, &alloc); + cmdline_parsed = 1; +} + + +int copy_partitions_to_state(struct part_device *device, + struct parsed_partitions *state, unsigned int ssz) +{ + int i = 0; + struct part *part = device->first_part; + while (part) { + sector_t from = part->from * (part->sector_size / ssz); + sector_t size = part->size * (part->sector_size / ssz); + put_named_partition(state, i+1, from, size, part->name, + strlen(part->name)); + i++; + part = part->next_part; + } + return i; +} + +int cmdline_partition(struct parsed_partitions *state) +{ + struct block_device *bdev = state->bdev; + unsigned int ssz = bdev_logical_block_size(bdev); + parse_cmdline(); + pr_debug("%s: %s\n", __func__, dev_name(disk_to_dev(bdev->bd_disk))); + + if (!cmdline_device) + return 0; + + if (strcmp(cmdline_device->name, dev_name(disk_to_dev(bdev->bd_disk)))) + return 0; + + /* We have a command line partition that matches this device */ + copy_partitions_to_state(cmdline_device, state, ssz); + return 1; +} + + +__init static int cmdline_partition_setup(char *s) +{ + cmdline = s; + return 1; +} + +__setup("tegrapart=", cmdline_partition_setup); + + diff --git a/fs/partitions/cmdline.h b/fs/partitions/cmdline.h new file mode 100644 index 000000000000..68dfd2c51b28 --- /dev/null +++ b/fs/partitions/cmdline.h @@ -0,0 +1,23 @@ +/* + * fs/partitions/cmdline.h + * + * Copyright (C) 2010 Google, Inc. + * Author: Colin Cross <ccross@android.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef FS_PART_CMDLINE_H +#define FS_PART_CMDLINE_H + +extern int cmdline_partition(struct parsed_partitions *state); + +#endif diff --git a/fs/partitions/efi.c b/fs/partitions/efi.c index dbb44d4bb8a7..35bbbd279d73 100644 --- a/fs/partitions/efi.c +++ b/fs/partitions/efi.c @@ -96,6 +96,7 @@ #include <linux/crc32.h> #include <linux/math64.h> #include <linux/slab.h> +#include <linux/nls.h> #include "check.h" #include "efi.h" @@ -617,11 +618,20 @@ int efi_partition(struct parsed_partitions *state) u64 start = le64_to_cpu(ptes[i].starting_lba); u64 size = le64_to_cpu(ptes[i].ending_lba) - le64_to_cpu(ptes[i].starting_lba) + 1ULL; + u8 name[sizeof(ptes->partition_name) / sizeof(efi_char16_t)]; + int len; if (!is_pte_valid(&ptes[i], last_lba(state->bdev))) continue; - put_partition(state, i+1, start * ssz, size * ssz); + len = utf16s_to_utf8s(ptes[i].partition_name, + sizeof(ptes[i].partition_name) / + sizeof(efi_char16_t), + UTF16_LITTLE_ENDIAN, name, + sizeof(name)); + + put_named_partition(state, i+1, start * ssz, size * ssz, + name, len); /* If this is a RAID volume, tell md */ if (!efi_guidcmp(ptes[i].partition_type_guid, diff --git a/fs/proc/base.c b/fs/proc/base.c index a1c43e7c8a7b..a495be632cf7 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -130,6 +130,12 @@ struct pid_entry { NULL, &proc_single_file_operations, \ { .proc_show = show } ) +/* ANDROID is for special files in /proc. */ +#define ANDROID(NAME, MODE, OTYPE) \ + NOD(NAME, (S_IFREG|(MODE)), \ + &proc_##OTYPE##_inode_operations, \ + &proc_##OTYPE##_operations, {}) + /* * Count the number of hardlinks for the pid_entry table, excluding the . * and .. links. @@ -231,7 +237,8 @@ struct mm_struct *mm_for_maps(struct task_struct *task) mm = get_task_mm(task); if (mm && mm != current->mm && - !ptrace_may_access(task, PTRACE_MODE_READ)) { + !ptrace_may_access(task, PTRACE_MODE_READ) && + !capable(CAP_SYS_RESOURCE)) { mmput(mm); mm = NULL; } @@ -1071,6 +1078,33 @@ static ssize_t oom_adjust_write(struct file *file, const char __user *buf, return count; } +static int oom_adjust_permission(struct inode *inode, int mask) +{ + uid_t uid; + struct task_struct *p = get_proc_task(inode); + if(p) { + uid = task_uid(p); + put_task_struct(p); + } + + /* + * System Server (uid == 1000) is granted access to oom_adj of all + * android applications (uid > 10000) as and services (uid >= 1000) + */ + if (p && (current_fsuid() == 1000) && (uid >= 1000)) { + if (inode->i_mode >> 6 & mask) { + return 0; + } + } + + /* Fall back to default. */ + return generic_permission(inode, mask, NULL); +} + +static const struct inode_operations proc_oom_adjust_inode_operations = { + .permission = oom_adjust_permission, +}; + static const struct file_operations proc_oom_adjust_operations = { .read = oom_adjust_read, .write = oom_adjust_write, @@ -2724,7 +2758,7 @@ static const struct pid_entry tgid_base_stuff[] = { REG("cgroup", S_IRUGO, proc_cgroup_operations), #endif INF("oom_score", S_IRUGO, proc_oom_score), - REG("oom_adj", S_IRUGO|S_IWUSR, proc_oom_adjust_operations), + ANDROID("oom_adj",S_IRUGO|S_IWUSR, oom_adjust), REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations), #ifdef CONFIG_AUDITSYSCALL REG("loginuid", S_IWUSR|S_IRUGO, proc_loginuid_operations), diff --git a/fs/select.c b/fs/select.c index 500a669f7790..b7b10aa30861 100644 --- a/fs/select.c +++ b/fs/select.c @@ -67,7 +67,7 @@ static long __estimate_accuracy(struct timespec *tv) return slack; } -static long estimate_accuracy(struct timespec *tv) +long select_estimate_accuracy(struct timespec *tv) { unsigned long ret; struct timespec now; @@ -417,7 +417,7 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time) } if (end_time && !timed_out) - slack = estimate_accuracy(end_time); + slack = select_estimate_accuracy(end_time); retval = 0; for (;;) { @@ -769,7 +769,7 @@ static int do_poll(unsigned int nfds, struct poll_list *list, } if (end_time && !timed_out) - slack = estimate_accuracy(end_time); + slack = select_estimate_accuracy(end_time); for (;;) { struct poll_list *walk; diff --git a/fs/yaffs2/Kconfig b/fs/yaffs2/Kconfig new file mode 100644 index 000000000000..d4363edcdbfe --- /dev/null +++ b/fs/yaffs2/Kconfig @@ -0,0 +1,180 @@ +# +# YAFFS file system configurations +# + +config YAFFS_FS + tristate "YAFFS2 file system support" + default n + depends on MTD_BLOCK + select YAFFS_YAFFS1 + select YAFFS_YAFFS2 + help + YAFFS2, or Yet Another Flash Filing System, is a filing system + optimised for NAND Flash chips. + + To compile the YAFFS2 file system support as a module, choose M + here: the module will be called yaffs2. + + If unsure, say N. + + Further information on YAFFS2 is available at + <http://www.aleph1.co.uk/yaffs/>. + +config YAFFS_YAFFS1 + bool "512 byte / page devices" + depends on YAFFS_FS + default y + help + Enable YAFFS1 support -- yaffs for 512 byte / page devices + + Not needed for 2K-page devices. + + If unsure, say Y. + +config YAFFS_9BYTE_TAGS + bool "Use older-style on-NAND data format with pageStatus byte" + depends on YAFFS_YAFFS1 + default n + help + + Older-style on-NAND data format has a "pageStatus" byte to record + chunk/page state. This byte is zero when the page is discarded. + Choose this option if you have existing on-NAND data using this + format that you need to continue to support. New data written + also uses the older-style format. Note: Use of this option + generally requires that MTD's oob layout be adjusted to use the + older-style format. See notes on tags formats and MTD versions + in yaffs_mtdif1.c. + + If unsure, say N. + +config YAFFS_DOES_ECC + bool "Lets Yaffs do its own ECC" + depends on YAFFS_FS && YAFFS_YAFFS1 && !YAFFS_9BYTE_TAGS + default n + help + This enables Yaffs to use its own ECC functions instead of using + the ones from the generic MTD-NAND driver. + + If unsure, say N. + +config YAFFS_ECC_WRONG_ORDER + bool "Use the same ecc byte order as Steven Hill's nand_ecc.c" + depends on YAFFS_FS && YAFFS_DOES_ECC && !YAFFS_9BYTE_TAGS + default n + help + This makes yaffs_ecc.c use the same ecc byte order as Steven + Hill's nand_ecc.c. If not set, then you get the same ecc byte + order as SmartMedia. + + If unsure, say N. + +config YAFFS_YAFFS2 + bool "2048 byte (or larger) / page devices" + depends on YAFFS_FS + default y + help + Enable YAFFS2 support -- yaffs for >= 2K bytes per page devices + + If unsure, say Y. + +config YAFFS_AUTO_YAFFS2 + bool "Autoselect yaffs2 format" + depends on YAFFS_YAFFS2 + default y + help + Without this, you need to explicitely use yaffs2 as the file + system type. With this, you can say "yaffs" and yaffs or yaffs2 + will be used depending on the device page size (yaffs on + 512-byte page devices, yaffs2 on 2K page devices). + + If unsure, say Y. + +config YAFFS_DISABLE_TAGS_ECC + bool "Disable YAFFS from doing ECC on tags by default" + depends on YAFFS_FS && YAFFS_YAFFS2 + default n + help + This defaults Yaffs to using its own ECC calculations on tags instead of + just relying on the MTD. + This behavior can also be overridden with tags_ecc_on and + tags_ecc_off mount options. + + If unsure, say N. + + +config YAFFS_DISABLE_WIDE_TNODES + bool "Turn off wide tnodes" + depends on YAFFS_FS + default n + help + Wide tnodes are only used for NAND arrays >=32MB for 512-byte + page devices and >=128MB for 2k page devices. They use slightly + more RAM but are faster since they eliminate chunk group + searching. + + Setting this to 'y' will force tnode width to 16 bits and save + memory but make large arrays slower. + + If unsure, say N. + +config YAFFS_ALWAYS_CHECK_CHUNK_ERASED + bool "Force chunk erase check" + depends on YAFFS_FS + default n + help + Normally YAFFS only checks chunks before writing until an erased + chunk is found. This helps to detect any partially written + chunks that might have happened due to power loss. + + Enabling this forces on the test that chunks are erased in flash + before writing to them. This takes more time but is potentially + a bit more secure. + + Suggest setting Y during development and ironing out driver + issues etc. Suggest setting to N if you want faster writing. + + If unsure, say Y. + +config YAFFS_SHORT_NAMES_IN_RAM + bool "Cache short names in RAM" + depends on YAFFS_FS + default y + help + If this config is set, then short names are stored with the + yaffs_Object. This costs an extra 16 bytes of RAM per object, + but makes look-ups faster. + + If unsure, say Y. + +config YAFFS_EMPTY_LOST_AND_FOUND + bool "Empty lost and found on boot" + depends on YAFFS_FS + default n + help + If this is enabled then the contents of lost and found is + automatically dumped at mount. + + If unsure, say N. + +config YAFFS_DISABLE_BLOCK_REFRESHING + bool "Disable yaffs2 block refreshing" + depends on YAFFS_FS + default n + help + If this is set, then block refreshing is disabled. + Block refreshing infrequently refreshes the oldest block in + a yaffs2 file system. This mechanism helps to refresh flash to + mitigate against data loss. This is particularly useful for MLC. + + If unsure, say N. + +config YAFFS_DISABLE_BACKGROUND + bool "Disable yaffs2 background processing" + depends on YAFFS_FS + default n + help + If this is set, then background processing is disabled. + Background processing makes many foreground activities faster. + + If unsure, say N. diff --git a/fs/yaffs2/Makefile b/fs/yaffs2/Makefile new file mode 100644 index 000000000000..382ee6142cf6 --- /dev/null +++ b/fs/yaffs2/Makefile @@ -0,0 +1,10 @@ +# +# Makefile for the linux YAFFS filesystem routines. +# + +obj-$(CONFIG_YAFFS_FS) += yaffs.o + +yaffs-y := yaffs_ecc.o yaffs_fs.o yaffs_guts.o yaffs_checkptrw.o +yaffs-y += yaffs_packedtags1.o yaffs_packedtags2.o yaffs_nand.o yaffs_qsort.o +yaffs-y += yaffs_tagscompat.o yaffs_tagsvalidity.o +yaffs-y += yaffs_mtdif.o yaffs_mtdif1.o yaffs_mtdif2.o diff --git a/fs/yaffs2/devextras.h b/fs/yaffs2/devextras.h new file mode 100644 index 000000000000..215caa50f178 --- /dev/null +++ b/fs/yaffs2/devextras.h @@ -0,0 +1,196 @@ +/* + * YAFFS: Yet another Flash File System . A NAND-flash specific file system. + * + * Copyright (C) 2002-2010 Aleph One Ltd. + * for Toby Churchill Ltd and Brightstar Engineering + * + * Created by Charles Manning <charles@aleph1.co.uk> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License version 2.1 as + * published by the Free Software Foundation. + * + * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL. + */ + +/* + * This file is just holds extra declarations of macros that would normally + * be providesd in the Linux kernel. These macros have been written from + * scratch but are functionally equivalent to the Linux ones. + * + */ + +#ifndef __EXTRAS_H__ +#define __EXTRAS_H__ + + +#if !(defined __KERNEL__) + +/* Definition of types */ +typedef unsigned char __u8; +typedef unsigned short __u16; +typedef unsigned __u32; + +#endif + +/* + * This is a simple doubly linked list implementation that matches the + * way the Linux kernel doubly linked list implementation works. + */ + +struct ylist_head { + struct ylist_head *next; /* next in chain */ + struct ylist_head *prev; /* previous in chain */ +}; + + +/* Initialise a static list */ +#define YLIST_HEAD(name) \ +struct ylist_head name = { &(name), &(name)} + + + +/* Initialise a list head to an empty list */ +#define YINIT_LIST_HEAD(p) \ +do { \ + (p)->next = (p);\ + (p)->prev = (p); \ +} while (0) + + +/* Add an element to a list */ +static __inline__ void ylist_add(struct ylist_head *newEntry, + struct ylist_head *list) +{ + struct ylist_head *listNext = list->next; + + list->next = newEntry; + newEntry->prev = list; + newEntry->next = listNext; + listNext->prev = newEntry; + +} + +static __inline__ void ylist_add_tail(struct ylist_head *newEntry, + struct ylist_head *list) +{ + struct ylist_head *listPrev = list->prev; + + list->prev = newEntry; + newEntry->next = list; + newEntry->prev = listPrev; + listPrev->next = newEntry; + +} + + +/* Take an element out of its current list, with or without + * reinitialising the links.of the entry*/ +static __inline__ void ylist_del(struct ylist_head *entry) +{ + struct ylist_head *listNext = entry->next; + struct ylist_head *listPrev = entry->prev; + + listNext->prev = listPrev; + listPrev->next = listNext; + +} + +static __inline__ void ylist_del_init(struct ylist_head *entry) +{ + ylist_del(entry); + entry->next = entry->prev = entry; +} + + +/* Test if the list is empty */ +static __inline__ int ylist_empty(struct ylist_head *entry) +{ + return (entry->next == entry); +} + + +/* ylist_entry takes a pointer to a list entry and offsets it to that + * we can find a pointer to the object it is embedded in. + */ + + +#define ylist_entry(entry, type, member) \ + ((type *)((char *)(entry)-(unsigned long)(&((type *)NULL)->member))) + + +/* ylist_for_each and list_for_each_safe iterate over lists. + * ylist_for_each_safe uses temporary storage to make the list delete safe + */ + +#define ylist_for_each(itervar, list) \ + for (itervar = (list)->next; itervar != (list); itervar = itervar->next) + +#define ylist_for_each_safe(itervar, saveVar, list) \ + for (itervar = (list)->next, saveVar = (list)->next->next; \ + itervar != (list); itervar = saveVar, saveVar = saveVar->next) + + +#if !(defined __KERNEL__) + + +#ifndef WIN32 +#include <sys/stat.h> +#endif + + +#ifdef CONFIG_YAFFS_PROVIDE_DEFS +/* File types */ + + +#define DT_UNKNOWN 0 +#define DT_FIFO 1 +#define DT_CHR 2 +#define DT_DIR 4 +#define DT_BLK 6 +#define DT_REG 8 +#define DT_LNK 10 +#define DT_SOCK 12 +#define DT_WHT 14 + + +#ifndef WIN32 +#include <sys/stat.h> +#endif + +/* + * Attribute flags. These should be or-ed together to figure out what + * has been changed! + */ +#define ATTR_MODE 1 +#define ATTR_UID 2 +#define ATTR_GID 4 +#define ATTR_SIZE 8 +#define ATTR_ATIME 16 +#define ATTR_MTIME 32 +#define ATTR_CTIME 64 + +struct iattr { + unsigned int ia_valid; + unsigned ia_mode; + unsigned ia_uid; + unsigned ia_gid; + unsigned ia_size; + unsigned ia_atime; + unsigned ia_mtime; + unsigned ia_ctime; + unsigned int ia_attr_flags; +}; + +#endif + +#else + +#include <linux/types.h> +#include <linux/fs.h> +#include <linux/stat.h> + +#endif + + +#endif diff --git a/fs/yaffs2/moduleconfig.h b/fs/yaffs2/moduleconfig.h new file mode 100644 index 000000000000..e8efd67586a7 --- /dev/null +++ b/fs/yaffs2/moduleconfig.h @@ -0,0 +1,82 @@ +/* + * YAFFS: Yet another Flash File System . A NAND-flash specific file system. + * + * Copyright (C) 2002-2010 Aleph One Ltd. + * for Toby Churchill Ltd and Brightstar Engineering + * + * Created by Martin Fouts <Martin.Fouts@palmsource.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License version 2.1 as + * published by the Free Software Foundation. + * + * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL. + */ + +#ifndef __YAFFS_CONFIG_H__ +#define __YAFFS_CONFIG_H__ + +#ifdef YAFFS_OUT_OF_TREE + +/* DO NOT UNSET THESE THREE. YAFFS2 will not compile if you do. */ +#define CONFIG_YAFFS_FS +#define CONFIG_YAFFS_YAFFS1 +#define CONFIG_YAFFS_YAFFS2 + +/* These options are independent of each other. Select those that matter. */ + +/* Default: Not selected */ +/* Meaning: Yaffs does its own ECC, rather than using MTD ECC */ +/* #define CONFIG_YAFFS_DOES_ECC */ + +/* Default: Selected */ +/* Meaning: Yaffs does its own ECC on tags for packed tags rather than use mtd */ +#define CONFIG_YAFFS_DOES_TAGS_ECC + +/* Default: Not selected */ +/* Meaning: ECC byte order is 'wrong'. Only meaningful if */ +/* CONFIG_YAFFS_DOES_ECC is set */ +/* #define CONFIG_YAFFS_ECC_WRONG_ORDER */ + +/* Default: Not selected */ +/* Meaning: Always test whether chunks are erased before writing to them. + Use during mtd debugging and init. */ +/* #define CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED */ + +/* Default: Not Selected */ +/* Meaning: At mount automatically empty all files from lost and found. */ +/* This is done to fix an old problem where rmdir was not checking for an */ +/* empty directory. This can also be achieved with a mount option. */ +#define CONFIG_YAFFS_EMPTY_LOST_AND_FOUND + +/* Default: Selected */ +/* Meaning: Cache short names, taking more RAM, but faster look-ups */ +#define CONFIG_YAFFS_SHORT_NAMES_IN_RAM + +/* Default: Unselected */ +/* Meaning: Select to disable block refreshing. */ +/* Block Refreshing periodically rewrites the oldest block. */ +/* #define CONFIG_DISABLE_BLOCK_REFRESHING */ + +/* Default: Unselected */ +/* Meaning: Select to disable background processing */ +/* #define CONFIG_DISABLE_BACKGROUND */ + + +/* +Older-style on-NAND data format has a "pageStatus" byte to record +chunk/page state. This byte is zeroed when the page is discarded. +Choose this option if you have existing on-NAND data in this format +that you need to continue to support. New data written also uses the +older-style format. +Note: Use of this option generally requires that MTD's oob layout be +adjusted to use the older-style format. See notes on tags formats and +MTD versions in yaffs_mtdif1.c. +*/ +/* Default: Not selected */ +/* Meaning: Use older-style on-NAND data format with pageStatus byte */ +/* #define CONFIG_YAFFS_9BYTE_TAGS */ + +#endif /* YAFFS_OUT_OF_TREE */ + +#endif /* __YAFFS_CONFIG_H__ */ diff --git a/fs/yaffs2/yaffs_checkptrw.c b/fs/yaffs2/yaffs_checkptrw.c new file mode 100644 index 000000000000..1304c19c185a --- /dev/null +++ b/fs/yaffs2/yaffs_checkptrw.c @@ -0,0 +1,401 @@ +/* + * YAFFS: Yet Another Flash File System. A NAND-flash specific file system. + * + * Copyright (C) 2002-2010 Aleph One Ltd. + * for Toby Churchill Ltd and Brightstar Engineering + * + * Created by Charles Manning <charles@aleph1.co.uk> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include "yaffs_checkptrw.h" +#include "yaffs_getblockinfo.h" + +static int yaffs_CheckpointSpaceOk(yaffs_Device *dev) +{ + int blocksAvailable = dev->nErasedBlocks - dev->param.nReservedBlocks; + + T(YAFFS_TRACE_CHECKPOINT, + (TSTR("checkpt blocks available = %d" TENDSTR), + blocksAvailable)); + + return (blocksAvailable <= 0) ? 0 : 1; +} + + +static int yaffs_CheckpointErase(yaffs_Device *dev) +{ + int i; + + if (!dev->param.eraseBlockInNAND) + return 0; + T(YAFFS_TRACE_CHECKPOINT, (TSTR("checking blocks %d to %d"TENDSTR), + dev->internalStartBlock, dev->internalEndBlock)); + + for (i = dev->internalStartBlock; i <= dev->internalEndBlock; i++) { + yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, i); + if (bi->blockState == YAFFS_BLOCK_STATE_CHECKPOINT) { + T(YAFFS_TRACE_CHECKPOINT, (TSTR("erasing checkpt block %d"TENDSTR), i)); + + dev->nBlockErasures++; + + if (dev->param.eraseBlockInNAND(dev, i - dev->blockOffset /* realign */)) { + bi->blockState = YAFFS_BLOCK_STATE_EMPTY; + dev->nErasedBlocks++; + dev->nFreeChunks += dev->param.nChunksPerBlock; + } else { + dev->param.markNANDBlockBad(dev, i); + bi->blockState = YAFFS_BLOCK_STATE_DEAD; + } + } + } + + dev->blocksInCheckpoint = 0; + + return 1; +} + + +static void yaffs_CheckpointFindNextErasedBlock(yaffs_Device *dev) +{ + int i; + int blocksAvailable = dev->nErasedBlocks - dev->param.nReservedBlocks; + T(YAFFS_TRACE_CHECKPOINT, + (TSTR("allocating checkpt block: erased %d reserved %d avail %d next %d "TENDSTR), + dev->nErasedBlocks, dev->param.nReservedBlocks, blocksAvailable, dev->checkpointNextBlock)); + + if (dev->checkpointNextBlock >= 0 && + dev->checkpointNextBlock <= dev->internalEndBlock && + blocksAvailable > 0) { + + for (i = dev->checkpointNextBlock; i <= dev->internalEndBlock; i++) { + yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, i); + if (bi->blockState == YAFFS_BLOCK_STATE_EMPTY) { + dev->checkpointNextBlock = i + 1; + dev->checkpointCurrentBlock = i; + T(YAFFS_TRACE_CHECKPOINT, (TSTR("allocating checkpt block %d"TENDSTR), i)); + return; + } + } + } + T(YAFFS_TRACE_CHECKPOINT, (TSTR("out of checkpt blocks"TENDSTR))); + + dev->checkpointNextBlock = -1; + dev->checkpointCurrentBlock = -1; +} + +static void yaffs_CheckpointFindNextCheckpointBlock(yaffs_Device *dev) +{ + int i; + yaffs_ExtendedTags tags; + + T(YAFFS_TRACE_CHECKPOINT, (TSTR("find next checkpt block: start: blocks %d next %d" TENDSTR), + dev->blocksInCheckpoint, dev->checkpointNextBlock)); + + if (dev->blocksInCheckpoint < dev->checkpointMaxBlocks) + for (i = dev->checkpointNextBlock; i <= dev->internalEndBlock; i++) { + int chunk = i * dev->param.nChunksPerBlock; + int realignedChunk = chunk - dev->chunkOffset; + + dev->param.readChunkWithTagsFromNAND(dev, realignedChunk, + NULL, &tags); + T(YAFFS_TRACE_CHECKPOINT, (TSTR("find next checkpt block: search: block %d oid %d seq %d eccr %d" TENDSTR), + i, tags.objectId, tags.sequenceNumber, tags.eccResult)); + + if (tags.sequenceNumber == YAFFS_SEQUENCE_CHECKPOINT_DATA) { + /* Right kind of block */ + dev->checkpointNextBlock = tags.objectId; + dev->checkpointCurrentBlock = i; + dev->checkpointBlockList[dev->blocksInCheckpoint] = i; + dev->blocksInCheckpoint++; + T(YAFFS_TRACE_CHECKPOINT, (TSTR("found checkpt block %d"TENDSTR), i)); + return; + } + } + + T(YAFFS_TRACE_CHECKPOINT, (TSTR("found no more checkpt blocks"TENDSTR))); + + dev->checkpointNextBlock = -1; + dev->checkpointCurrentBlock = -1; +} + + +int yaffs_CheckpointOpen(yaffs_Device *dev, int forWriting) +{ + + + dev->checkpointOpenForWrite = forWriting; + + /* Got the functions we need? */ + if (!dev->param.writeChunkWithTagsToNAND || + !dev->param.readChunkWithTagsFromNAND || + !dev->param.eraseBlockInNAND || + !dev->param.markNANDBlockBad) + return 0; + + if (forWriting && !yaffs_CheckpointSpaceOk(dev)) + return 0; + + if (!dev->checkpointBuffer) + dev->checkpointBuffer = YMALLOC_DMA(dev->param.totalBytesPerChunk); + if (!dev->checkpointBuffer) + return 0; + + + dev->checkpointPageSequence = 0; + dev->checkpointByteCount = 0; + dev->checkpointSum = 0; + dev->checkpointXor = 0; + dev->checkpointCurrentBlock = -1; + dev->checkpointCurrentChunk = -1; + dev->checkpointNextBlock = dev->internalStartBlock; + + /* Erase all the blocks in the checkpoint area */ + if (forWriting) { + memset(dev->checkpointBuffer, 0, dev->nDataBytesPerChunk); + dev->checkpointByteOffset = 0; + return yaffs_CheckpointErase(dev); + } else { + int i; + /* Set to a value that will kick off a read */ + dev->checkpointByteOffset = dev->nDataBytesPerChunk; + /* A checkpoint block list of 1 checkpoint block per 16 block is (hopefully) + * going to be way more than we need */ + dev->blocksInCheckpoint = 0; + dev->checkpointMaxBlocks = (dev->internalEndBlock - dev->internalStartBlock)/16 + 2; + dev->checkpointBlockList = YMALLOC(sizeof(int) * dev->checkpointMaxBlocks); + if(!dev->checkpointBlockList) + return 0; + + for (i = 0; i < dev->checkpointMaxBlocks; i++) + dev->checkpointBlockList[i] = -1; + } + + return 1; +} + +int yaffs_GetCheckpointSum(yaffs_Device *dev, __u32 *sum) +{ + __u32 compositeSum; + compositeSum = (dev->checkpointSum << 8) | (dev->checkpointXor & 0xFF); + *sum = compositeSum; + return 1; +} + +static int yaffs_CheckpointFlushBuffer(yaffs_Device *dev) +{ + int chunk; + int realignedChunk; + + yaffs_ExtendedTags tags; + + if (dev->checkpointCurrentBlock < 0) { + yaffs_CheckpointFindNextErasedBlock(dev); + dev->checkpointCurrentChunk = 0; + } + + if (dev->checkpointCurrentBlock < 0) + return 0; + + tags.chunkDeleted = 0; + tags.objectId = dev->checkpointNextBlock; /* Hint to next place to look */ + tags.chunkId = dev->checkpointPageSequence + 1; + tags.sequenceNumber = YAFFS_SEQUENCE_CHECKPOINT_DATA; + tags.byteCount = dev->nDataBytesPerChunk; + if (dev->checkpointCurrentChunk == 0) { + /* First chunk we write for the block? Set block state to + checkpoint */ + yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, dev->checkpointCurrentBlock); + bi->blockState = YAFFS_BLOCK_STATE_CHECKPOINT; + dev->blocksInCheckpoint++; + } + + chunk = dev->checkpointCurrentBlock * dev->param.nChunksPerBlock + dev->checkpointCurrentChunk; + + + T(YAFFS_TRACE_CHECKPOINT, (TSTR("checkpoint wite buffer nand %d(%d:%d) objid %d chId %d" TENDSTR), + chunk, dev->checkpointCurrentBlock, dev->checkpointCurrentChunk, tags.objectId, tags.chunkId)); + + realignedChunk = chunk - dev->chunkOffset; + + dev->nPageWrites++; + + dev->param.writeChunkWithTagsToNAND(dev, realignedChunk, + dev->checkpointBuffer, &tags); + dev->checkpointByteOffset = 0; + dev->checkpointPageSequence++; + dev->checkpointCurrentChunk++; + if (dev->checkpointCurrentChunk >= dev->param.nChunksPerBlock) { + dev->checkpointCurrentChunk = 0; + dev->checkpointCurrentBlock = -1; + } + memset(dev->checkpointBuffer, 0, dev->nDataBytesPerChunk); + + return 1; +} + + +int yaffs_CheckpointWrite(yaffs_Device *dev, const void *data, int nBytes) +{ + int i = 0; + int ok = 1; + + + __u8 * dataBytes = (__u8 *)data; + + + + if (!dev->checkpointBuffer) + return 0; + + if (!dev->checkpointOpenForWrite) + return -1; + + while (i < nBytes && ok) { + dev->checkpointBuffer[dev->checkpointByteOffset] = *dataBytes; + dev->checkpointSum += *dataBytes; + dev->checkpointXor ^= *dataBytes; + + dev->checkpointByteOffset++; + i++; + dataBytes++; + dev->checkpointByteCount++; + + + if (dev->checkpointByteOffset < 0 || + dev->checkpointByteOffset >= dev->nDataBytesPerChunk) + ok = yaffs_CheckpointFlushBuffer(dev); + } + + return i; +} + +int yaffs_CheckpointRead(yaffs_Device *dev, void *data, int nBytes) +{ + int i = 0; + int ok = 1; + yaffs_ExtendedTags tags; + + + int chunk; + int realignedChunk; + + __u8 *dataBytes = (__u8 *)data; + + if (!dev->checkpointBuffer) + return 0; + + if (dev->checkpointOpenForWrite) + return -1; + + while (i < nBytes && ok) { + + + if (dev->checkpointByteOffset < 0 || + dev->checkpointByteOffset >= dev->nDataBytesPerChunk) { + + if (dev->checkpointCurrentBlock < 0) { + yaffs_CheckpointFindNextCheckpointBlock(dev); + dev->checkpointCurrentChunk = 0; + } + + if (dev->checkpointCurrentBlock < 0) + ok = 0; + else { + chunk = dev->checkpointCurrentBlock * + dev->param.nChunksPerBlock + + dev->checkpointCurrentChunk; + + realignedChunk = chunk - dev->chunkOffset; + + dev->nPageReads++; + + /* read in the next chunk */ + /* printf("read checkpoint page %d\n",dev->checkpointPage); */ + dev->param.readChunkWithTagsFromNAND(dev, + realignedChunk, + dev->checkpointBuffer, + &tags); + + if (tags.chunkId != (dev->checkpointPageSequence + 1) || + tags.eccResult > YAFFS_ECC_RESULT_FIXED || + tags.sequenceNumber != YAFFS_SEQUENCE_CHECKPOINT_DATA) + ok = 0; + + dev->checkpointByteOffset = 0; + dev->checkpointPageSequence++; + dev->checkpointCurrentChunk++; + + if (dev->checkpointCurrentChunk >= dev->param.nChunksPerBlock) + dev->checkpointCurrentBlock = -1; + } + } + + if (ok) { + *dataBytes = dev->checkpointBuffer[dev->checkpointByteOffset]; + dev->checkpointSum += *dataBytes; + dev->checkpointXor ^= *dataBytes; + dev->checkpointByteOffset++; + i++; + dataBytes++; + dev->checkpointByteCount++; + } + } + + return i; +} + +int yaffs_CheckpointClose(yaffs_Device *dev) +{ + + if (dev->checkpointOpenForWrite) { + if (dev->checkpointByteOffset != 0) + yaffs_CheckpointFlushBuffer(dev); + } else if(dev->checkpointBlockList){ + int i; + for (i = 0; i < dev->blocksInCheckpoint && dev->checkpointBlockList[i] >= 0; i++) { + int blk = dev->checkpointBlockList[i]; + yaffs_BlockInfo *bi = NULL; + if( dev->internalStartBlock <= blk && blk <= dev->internalEndBlock) + bi = yaffs_GetBlockInfo(dev, blk); + if (bi && bi->blockState == YAFFS_BLOCK_STATE_EMPTY) + bi->blockState = YAFFS_BLOCK_STATE_CHECKPOINT; + else { + /* Todo this looks odd... */ + } + } + YFREE(dev->checkpointBlockList); + dev->checkpointBlockList = NULL; + } + + dev->nFreeChunks -= dev->blocksInCheckpoint * dev->param.nChunksPerBlock; + dev->nErasedBlocks -= dev->blocksInCheckpoint; + + + T(YAFFS_TRACE_CHECKPOINT, (TSTR("checkpoint byte count %d" TENDSTR), + dev->checkpointByteCount)); + + if (dev->checkpointBuffer) { + /* free the buffer */ + YFREE(dev->checkpointBuffer); + dev->checkpointBuffer = NULL; + return 1; + } else + return 0; +} + +int yaffs_CheckpointInvalidateStream(yaffs_Device *dev) +{ + /* Erase the checkpoint data */ + + T(YAFFS_TRACE_CHECKPOINT, (TSTR("checkpoint invalidate of %d blocks"TENDSTR), + dev->blocksInCheckpoint)); + + return yaffs_CheckpointErase(dev); +} + + + diff --git a/fs/yaffs2/yaffs_checkptrw.h b/fs/yaffs2/yaffs_checkptrw.h new file mode 100644 index 000000000000..881c3659c8a9 --- /dev/null +++ b/fs/yaffs2/yaffs_checkptrw.h @@ -0,0 +1,34 @@ +/* + * YAFFS: Yet another Flash File System . A NAND-flash specific file system. + * + * Copyright (C) 2002-2010 Aleph One Ltd. + * for Toby Churchill Ltd and Brightstar Engineering + * + * Created by Charles Manning <charles@aleph1.co.uk> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License version 2.1 as + * published by the Free Software Foundation. + * + * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL. + */ + +#ifndef __YAFFS_CHECKPTRW_H__ +#define __YAFFS_CHECKPTRW_H__ + +#include "yaffs_guts.h" + +int yaffs_CheckpointOpen(yaffs_Device *dev, int forWriting); + +int yaffs_CheckpointWrite(yaffs_Device *dev, const void *data, int nBytes); + +int yaffs_CheckpointRead(yaffs_Device *dev, void *data, int nBytes); + +int yaffs_GetCheckpointSum(yaffs_Device *dev, __u32 *sum); + +int yaffs_CheckpointClose(yaffs_Device *dev); + +int yaffs_CheckpointInvalidateStream(yaffs_Device *dev); + + +#endif diff --git a/fs/yaffs2/yaffs_ecc.c b/fs/yaffs2/yaffs_ecc.c new file mode 100644 index 000000000000..da6145aac27a --- /dev/null +++ b/fs/yaffs2/yaffs_ecc.c @@ -0,0 +1,323 @@ +/* + * YAFFS: Yet Another Flash File System. A NAND-flash specific file system. + * + * Copyright (C) 2002-2010 Aleph One Ltd. + * for Toby Churchill Ltd and Brightstar Engineering + * + * Created by Charles Manning <charles@aleph1.co.uk> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +/* + * This code implements the ECC algorithm used in SmartMedia. + * + * The ECC comprises 22 bits of parity information and is stuffed into 3 bytes. + * The two unused bit are set to 1. + * The ECC can correct single bit errors in a 256-byte page of data. Thus, two such ECC + * blocks are used on a 512-byte NAND page. + * + */ + +/* Table generated by gen-ecc.c + * Using a table means we do not have to calculate p1..p4 and p1'..p4' + * for each byte of data. These are instead provided in a table in bits7..2. + * Bit 0 of each entry indicates whether the entry has an odd or even parity, and therefore + * this bytes influence on the line parity. + */ + +#include "yportenv.h" + +#include "yaffs_ecc.h" + +static const unsigned char column_parity_table[] = { + 0x00, 0x55, 0x59, 0x0c, 0x65, 0x30, 0x3c, 0x69, + 0x69, 0x3c, 0x30, 0x65, 0x0c, 0x59, 0x55, 0x00, + 0x95, 0xc0, 0xcc, 0x99, 0xf0, 0xa5, 0xa9, 0xfc, + 0xfc, 0xa9, 0xa5, 0xf0, 0x99, 0xcc, 0xc0, 0x95, + 0x99, 0xcc, 0xc0, 0x95, 0xfc, 0xa9, 0xa5, 0xf0, + 0xf0, 0xa5, 0xa9, 0xfc, 0x95, 0xc0, 0xcc, 0x99, + 0x0c, 0x59, 0x55, 0x00, 0x69, 0x3c, 0x30, 0x65, + 0x65, 0x30, 0x3c, 0x69, 0x00, 0x55, 0x59, 0x0c, + 0xa5, 0xf0, 0xfc, 0xa9, 0xc0, 0x95, 0x99, 0xcc, + 0xcc, 0x99, 0x95, 0xc0, 0xa9, 0xfc, 0xf0, 0xa5, + 0x30, 0x65, 0x69, 0x3c, 0x55, 0x00, 0x0c, 0x59, + 0x59, 0x0c, 0x00, 0x55, 0x3c, 0x69, 0x65, 0x30, + 0x3c, 0x69, 0x65, 0x30, 0x59, 0x0c, 0x00, 0x55, + 0x55, 0x00, 0x0c, 0x59, 0x30, 0x65, 0x69, 0x3c, + 0xa9, 0xfc, 0xf0, 0xa5, 0xcc, 0x99, 0x95, 0xc0, + 0xc0, 0x95, 0x99, 0xcc, 0xa5, 0xf0, 0xfc, 0xa9, + 0xa9, 0xfc, 0xf0, 0xa5, 0xcc, 0x99, 0x95, 0xc0, + 0xc0, 0x95, 0x99, 0xcc, 0xa5, 0xf0, 0xfc, 0xa9, + 0x3c, 0x69, 0x65, 0x30, 0x59, 0x0c, 0x00, 0x55, + 0x55, 0x00, 0x0c, 0x59, 0x30, 0x65, 0x69, 0x3c, + 0x30, 0x65, 0x69, 0x3c, 0x55, 0x00, 0x0c, 0x59, + 0x59, 0x0c, 0x00, 0x55, 0x3c, 0x69, 0x65, 0x30, + 0xa5, 0xf0, 0xfc, 0xa9, 0xc0, 0x95, 0x99, 0xcc, + 0xcc, 0x99, 0x95, 0xc0, 0xa9, 0xfc, 0xf0, 0xa5, + 0x0c, 0x59, 0x55, 0x00, 0x69, 0x3c, 0x30, 0x65, + 0x65, 0x30, 0x3c, 0x69, 0x00, 0x55, 0x59, 0x0c, + 0x99, 0xcc, 0xc0, 0x95, 0xfc, 0xa9, 0xa5, 0xf0, + 0xf0, 0xa5, 0xa9, 0xfc, 0x95, 0xc0, 0xcc, 0x99, + 0x95, 0xc0, 0xcc, 0x99, 0xf0, 0xa5, 0xa9, 0xfc, + 0xfc, 0xa9, 0xa5, 0xf0, 0x99, 0xcc, 0xc0, 0x95, + 0x00, 0x55, 0x59, 0x0c, 0x65, 0x30, 0x3c, 0x69, + 0x69, 0x3c, 0x30, 0x65, 0x0c, 0x59, 0x55, 0x00, +}; + +/* Count the bits in an unsigned char or a U32 */ + +static int yaffs_CountBits(unsigned char x) +{ + int r = 0; + while (x) { + if (x & 1) + r++; + x >>= 1; + } + return r; +} + +static int yaffs_CountBits32(unsigned x) +{ + int r = 0; + while (x) { + if (x & 1) + r++; + x >>= 1; + } + return r; +} + +/* Calculate the ECC for a 256-byte block of data */ +void yaffs_ECCCalculate(const unsigned char *data, unsigned char *ecc) +{ + unsigned int i; + + unsigned char col_parity = 0; + unsigned char line_parity = 0; + unsigned char line_parity_prime = 0; + unsigned char t; + unsigned char b; + + for (i = 0; i < 256; i++) { + b = column_parity_table[*data++]; + col_parity ^= b; + + if (b & 0x01) { /* odd number of bits in the byte */ + line_parity ^= i; + line_parity_prime ^= ~i; + } + } + + ecc[2] = (~col_parity) | 0x03; + + t = 0; + if (line_parity & 0x80) + t |= 0x80; + if (line_parity_prime & 0x80) + t |= 0x40; + if (line_parity & 0x40) + t |= 0x20; + if (line_parity_prime & 0x40) + t |= 0x10; + if (line_parity & 0x20) + t |= 0x08; + if (line_parity_prime & 0x20) + t |= 0x04; + if (line_parity & 0x10) + t |= 0x02; + if (line_parity_prime & 0x10) + t |= 0x01; + ecc[1] = ~t; + + t = 0; + if (line_parity & 0x08) + t |= 0x80; + if (line_parity_prime & 0x08) + t |= 0x40; + if (line_parity & 0x04) + t |= 0x20; + if (line_parity_prime & 0x04) + t |= 0x10; + if (line_parity & 0x02) + t |= 0x08; + if (line_parity_prime & 0x02) + t |= 0x04; + if (line_parity & 0x01) + t |= 0x02; + if (line_parity_prime & 0x01) + t |= 0x01; + ecc[0] = ~t; + +#ifdef CONFIG_YAFFS_ECC_WRONG_ORDER + /* Swap the bytes into the wrong order */ + t = ecc[0]; + ecc[0] = ecc[1]; + ecc[1] = t; +#endif +} + + +/* Correct the ECC on a 256 byte block of data */ + +int yaffs_ECCCorrect(unsigned char *data, unsigned char *read_ecc, + const unsigned char *test_ecc) +{ + unsigned char d0, d1, d2; /* deltas */ + + d0 = read_ecc[0] ^ test_ecc[0]; + d1 = read_ecc[1] ^ test_ecc[1]; + d2 = read_ecc[2] ^ test_ecc[2]; + + if ((d0 | d1 | d2) == 0) + return 0; /* no error */ + + if (((d0 ^ (d0 >> 1)) & 0x55) == 0x55 && + ((d1 ^ (d1 >> 1)) & 0x55) == 0x55 && + ((d2 ^ (d2 >> 1)) & 0x54) == 0x54) { + /* Single bit (recoverable) error in data */ + + unsigned byte; + unsigned bit; + +#ifdef CONFIG_YAFFS_ECC_WRONG_ORDER + /* swap the bytes to correct for the wrong order */ + unsigned char t; + + t = d0; + d0 = d1; + d1 = t; +#endif + + bit = byte = 0; + + if (d1 & 0x80) + byte |= 0x80; + if (d1 & 0x20) + byte |= 0x40; + if (d1 & 0x08) + byte |= 0x20; + if (d1 & 0x02) + byte |= 0x10; + if (d0 & 0x80) + byte |= 0x08; + if (d0 & 0x20) + byte |= 0x04; + if (d0 & 0x08) + byte |= 0x02; + if (d0 & 0x02) + byte |= 0x01; + + if (d2 & 0x80) + bit |= 0x04; + if (d2 & 0x20) + bit |= 0x02; + if (d2 & 0x08) + bit |= 0x01; + + data[byte] ^= (1 << bit); + + return 1; /* Corrected the error */ + } + + if ((yaffs_CountBits(d0) + + yaffs_CountBits(d1) + + yaffs_CountBits(d2)) == 1) { + /* Reccoverable error in ecc */ + + read_ecc[0] = test_ecc[0]; + read_ecc[1] = test_ecc[1]; + read_ecc[2] = test_ecc[2]; + + return 1; /* Corrected the error */ + } + + /* Unrecoverable error */ + + return -1; + +} + + +/* + * ECCxxxOther does ECC calcs on arbitrary n bytes of data + */ +void yaffs_ECCCalculateOther(const unsigned char *data, unsigned nBytes, + yaffs_ECCOther *eccOther) +{ + unsigned int i; + + unsigned char col_parity = 0; + unsigned line_parity = 0; + unsigned line_parity_prime = 0; + unsigned char b; + + for (i = 0; i < nBytes; i++) { + b = column_parity_table[*data++]; + col_parity ^= b; + + if (b & 0x01) { + /* odd number of bits in the byte */ + line_parity ^= i; + line_parity_prime ^= ~i; + } + + } + + eccOther->colParity = (col_parity >> 2) & 0x3f; + eccOther->lineParity = line_parity; + eccOther->lineParityPrime = line_parity_prime; +} + +int yaffs_ECCCorrectOther(unsigned char *data, unsigned nBytes, + yaffs_ECCOther *read_ecc, + const yaffs_ECCOther *test_ecc) +{ + unsigned char cDelta; /* column parity delta */ + unsigned lDelta; /* line parity delta */ + unsigned lDeltaPrime; /* line parity delta */ + unsigned bit; + + cDelta = read_ecc->colParity ^ test_ecc->colParity; + lDelta = read_ecc->lineParity ^ test_ecc->lineParity; + lDeltaPrime = read_ecc->lineParityPrime ^ test_ecc->lineParityPrime; + + if ((cDelta | lDelta | lDeltaPrime) == 0) + return 0; /* no error */ + + if (lDelta == ~lDeltaPrime && + (((cDelta ^ (cDelta >> 1)) & 0x15) == 0x15)) { + /* Single bit (recoverable) error in data */ + + bit = 0; + + if (cDelta & 0x20) + bit |= 0x04; + if (cDelta & 0x08) + bit |= 0x02; + if (cDelta & 0x02) + bit |= 0x01; + + if (lDelta >= nBytes) + return -1; + + data[lDelta] ^= (1 << bit); + + return 1; /* corrected */ + } + + if ((yaffs_CountBits32(lDelta) + yaffs_CountBits32(lDeltaPrime) + + yaffs_CountBits(cDelta)) == 1) { + /* Reccoverable error in ecc */ + + *read_ecc = *test_ecc; + return 1; /* corrected */ + } + + /* Unrecoverable error */ + + return -1; +} diff --git a/fs/yaffs2/yaffs_ecc.h b/fs/yaffs2/yaffs_ecc.h new file mode 100644 index 000000000000..a1ee69ac7f24 --- /dev/null +++ b/fs/yaffs2/yaffs_ecc.h @@ -0,0 +1,44 @@ +/* + * YAFFS: Yet another Flash File System . A NAND-flash specific file system. + * + * Copyright (C) 2002-2010 Aleph One Ltd. + * for Toby Churchill Ltd and Brightstar Engineering + * + * Created by Charles Manning <charles@aleph1.co.uk> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License version 2.1 as + * published by the Free Software Foundation. + * + * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL. + */ + +/* + * This code implements the ECC algorithm used in SmartMedia. + * + * The ECC comprises 22 bits of parity information and is stuffed into 3 bytes. + * The two unused bit are set to 1. + * The ECC can correct single bit errors in a 256-byte page of data. Thus, two such ECC + * blocks are used on a 512-byte NAND page. + * + */ + +#ifndef __YAFFS_ECC_H__ +#define __YAFFS_ECC_H__ + +typedef struct { + unsigned char colParity; + unsigned lineParity; + unsigned lineParityPrime; +} yaffs_ECCOther; + +void yaffs_ECCCalculate(const unsigned char *data, unsigned char *ecc); +int yaffs_ECCCorrect(unsigned char *data, unsigned char *read_ecc, + const unsigned char *test_ecc); + +void yaffs_ECCCalculateOther(const unsigned char *data, unsigned nBytes, + yaffs_ECCOther *ecc); +int yaffs_ECCCorrectOther(unsigned char *data, unsigned nBytes, + yaffs_ECCOther *read_ecc, + const yaffs_ECCOther *test_ecc); +#endif diff --git a/fs/yaffs2/yaffs_fs.c b/fs/yaffs2/yaffs_fs.c new file mode 100644 index 000000000000..11eca48d064f --- /dev/null +++ b/fs/yaffs2/yaffs_fs.c @@ -0,0 +1,3279 @@ +/* + * YAFFS: Yet Another Flash File System. A NAND-flash specific file system. + * + * Copyright (C) 2002-2010 Aleph One Ltd. + * for Toby Churchill Ltd and Brightstar Engineering + * + * Created by Charles Manning <charles@aleph1.co.uk> + * Acknowledgements: + * Luc van OostenRyck for numerous patches. + * Nick Bane for numerous patches. + * Nick Bane for 2.5/2.6 integration. + * Andras Toth for mknod rdev issue. + * Michael Fischer for finding the problem with inode inconsistency. + * Some code bodily lifted from JFFS + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +/* + * + * This is the file system front-end to YAFFS that hooks it up to + * the VFS. + * + * Special notes: + * >> 2.4: sb->u.generic_sbp points to the yaffs_Device associated with + * this superblock + * >> 2.6: sb->s_fs_info points to the yaffs_Device associated with this + * superblock + * >> inode->u.generic_ip points to the associated yaffs_Object. + */ + +#include <linux/version.h> + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 10)) +#define YAFFS_COMPILE_BACKGROUND +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)) +#define YAFFS_COMPILE_EXPORTFS +#endif + + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19)) +#include <linux/config.h> +#endif + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/init.h> +#include <linux/fs.h> +#include <linux/proc_fs.h> +#include <linux/smp_lock.h> +#include <linux/pagemap.h> +#include <linux/mtd/mtd.h> +#include <linux/interrupt.h> +#include <linux/string.h> +#include <linux/ctype.h> + +#ifdef YAFFS_COMPILE_EXPORTFS +#include <linux/exportfs.h> +#endif + +#ifdef YAFFS_COMPILE_BACKGROUND +#include <linux/kthread.h> +#include <linux/delay.h> +#include <linux/freezer.h> +#endif + +#include <asm/div64.h> + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)) + +#include <linux/statfs.h> + +#define UnlockPage(p) unlock_page(p) +#define Page_Uptodate(page) test_bit(PG_uptodate, &(page)->flags) + +/* FIXME: use sb->s_id instead ? */ +#define yaffs_devname(sb, buf) bdevname(sb->s_bdev, buf) + +#else + +#include <linux/locks.h> +#define BDEVNAME_SIZE 0 +#define yaffs_devname(sb, buf) kdevname(sb->s_dev) + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)) +/* added NCB 26/5/2006 for 2.4.25-vrs2-tcl1 kernel */ +#define __user +#endif + +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)) +#define YPROC_ROOT (&proc_root) +#else +#define YPROC_ROOT NULL +#endif + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17)) +#define WRITE_SIZE_STR "writesize" +#define WRITE_SIZE(mtd) ((mtd)->writesize) +#else +#define WRITE_SIZE_STR "oobblock" +#define WRITE_SIZE(mtd) ((mtd)->oobblock) +#endif + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 27)) +#define YAFFS_USE_WRITE_BEGIN_END 1 +#else +#define YAFFS_USE_WRITE_BEGIN_END 0 +#endif + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 28)) +static uint32_t YCALCBLOCKS(uint64_t partition_size, uint32_t block_size) +{ + uint64_t result = partition_size; + do_div(result, block_size); + return (uint32_t)result; +} +#else +#define YCALCBLOCKS(s, b) ((s)/(b)) +#endif + +#include <linux/uaccess.h> +#include <linux/mtd/mtd.h> + +#include "yportenv.h" +#include "yaffs_trace.h" +#include "yaffs_guts.h" + +#include "yaffs_linux.h" + +#include "yaffs_mtdif.h" +#include "yaffs_mtdif1.h" +#include "yaffs_mtdif2.h" + +unsigned int yaffs_traceMask = YAFFS_TRACE_BAD_BLOCKS | YAFFS_TRACE_ALWAYS; +unsigned int yaffs_wr_attempts = YAFFS_WR_ATTEMPTS; +unsigned int yaffs_auto_checkpoint = 1; +unsigned int yaffs_gc_control = 1; + +/* Module Parameters */ +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)) +module_param(yaffs_traceMask, uint, 0644); +module_param(yaffs_wr_attempts, uint, 0644); +module_param(yaffs_auto_checkpoint, uint, 0644); +module_param(yaffs_gc_control, uint, 0644); +#else +MODULE_PARM(yaffs_traceMask, "i"); +MODULE_PARM(yaffs_wr_attempts, "i"); +MODULE_PARM(yaffs_auto_checkpoint, "i"); +MODULE_PARM(yaffs_gc_control, "i"); +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 25)) +/* use iget and read_inode */ +#define Y_IGET(sb, inum) iget((sb), (inum)) +static void yaffs_read_inode(struct inode *inode); + +#else +/* Call local equivalent */ +#define YAFFS_USE_OWN_IGET +#define Y_IGET(sb, inum) yaffs_iget((sb), (inum)) + +static struct inode *yaffs_iget(struct super_block *sb, unsigned long ino); +#endif + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18)) +#define yaffs_InodeToObjectLV(iptr) ((iptr)->i_private) +#else +#define yaffs_InodeToObjectLV(iptr) ((iptr)->u.generic_ip) +#endif + +#define yaffs_InodeToObject(iptr) ((yaffs_Object *)(yaffs_InodeToObjectLV(iptr))) +#define yaffs_DentryToObject(dptr) yaffs_InodeToObject((dptr)->d_inode) + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)) +#define yaffs_SuperToDevice(sb) ((yaffs_Device *)sb->s_fs_info) +#else +#define yaffs_SuperToDevice(sb) ((yaffs_Device *)sb->u.generic_sbp) +#endif + + +#define update_dir_time(dir) do {\ + (dir)->i_ctime = (dir)->i_mtime = CURRENT_TIME; \ + } while(0) + +static void yaffs_put_super(struct super_block *sb); + +static ssize_t yaffs_file_write(struct file *f, const char *buf, size_t n, + loff_t *pos); +static ssize_t yaffs_hold_space(struct file *f); +static void yaffs_release_space(struct file *f); + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17)) +static int yaffs_file_flush(struct file *file, fl_owner_t id); +#else +static int yaffs_file_flush(struct file *file); +#endif + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 34)) +static int yaffs_sync_object(struct file *file, int datasync); +#else +static int yaffs_sync_object(struct file *file, struct dentry *dentry, + int datasync); +#endif + +static int yaffs_readdir(struct file *f, void *dirent, filldir_t filldir); + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)) +static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode, + struct nameidata *n); +static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry, + struct nameidata *n); +#else +static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode); +static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry); +#endif +static int yaffs_link(struct dentry *old_dentry, struct inode *dir, + struct dentry *dentry); +static int yaffs_unlink(struct inode *dir, struct dentry *dentry); +static int yaffs_symlink(struct inode *dir, struct dentry *dentry, + const char *symname); +static int yaffs_mkdir(struct inode *dir, struct dentry *dentry, int mode); + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)) +static int yaffs_mknod(struct inode *dir, struct dentry *dentry, int mode, + dev_t dev); +#else +static int yaffs_mknod(struct inode *dir, struct dentry *dentry, int mode, + int dev); +#endif +static int yaffs_rename(struct inode *old_dir, struct dentry *old_dentry, + struct inode *new_dir, struct dentry *new_dentry); +static int yaffs_setattr(struct dentry *dentry, struct iattr *attr); + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17)) +static int yaffs_sync_fs(struct super_block *sb, int wait); +static void yaffs_write_super(struct super_block *sb); +#else +static int yaffs_sync_fs(struct super_block *sb); +static int yaffs_write_super(struct super_block *sb); +#endif + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17)) +static int yaffs_statfs(struct dentry *dentry, struct kstatfs *buf); +#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)) +static int yaffs_statfs(struct super_block *sb, struct kstatfs *buf); +#else +static int yaffs_statfs(struct super_block *sb, struct statfs *buf); +#endif + +#ifdef YAFFS_HAS_PUT_INODE +static void yaffs_put_inode(struct inode *inode); +#endif + +static void yaffs_delete_inode(struct inode *); +static void yaffs_clear_inode(struct inode *); + +static int yaffs_readpage(struct file *file, struct page *page); +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)) +static int yaffs_writepage(struct page *page, struct writeback_control *wbc); +#else +static int yaffs_writepage(struct page *page); +#endif + + +#if (YAFFS_USE_WRITE_BEGIN_END != 0) +static int yaffs_write_begin(struct file *filp, struct address_space *mapping, + loff_t pos, unsigned len, unsigned flags, + struct page **pagep, void **fsdata); +static int yaffs_write_end(struct file *filp, struct address_space *mapping, + loff_t pos, unsigned len, unsigned copied, + struct page *pg, void *fsdadata); +#else +static int yaffs_prepare_write(struct file *f, struct page *pg, + unsigned offset, unsigned to); +static int yaffs_commit_write(struct file *f, struct page *pg, unsigned offset, + unsigned to); + +#endif + +static int yaffs_readlink(struct dentry *dentry, char __user *buffer, + int buflen); +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 13)) +static void *yaffs_follow_link(struct dentry *dentry, struct nameidata *nd); +#else +static int yaffs_follow_link(struct dentry *dentry, struct nameidata *nd); +#endif +static loff_t yaffs_dir_llseek(struct file *file, loff_t offset, int origin); + +static struct address_space_operations yaffs_file_address_operations = { + .readpage = yaffs_readpage, + .writepage = yaffs_writepage, +#if (YAFFS_USE_WRITE_BEGIN_END > 0) + .write_begin = yaffs_write_begin, + .write_end = yaffs_write_end, +#else + .prepare_write = yaffs_prepare_write, + .commit_write = yaffs_commit_write, +#endif +}; + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 22)) +static const struct file_operations yaffs_file_operations = { + .read = do_sync_read, + .write = do_sync_write, + .aio_read = generic_file_aio_read, + .aio_write = generic_file_aio_write, + .mmap = generic_file_mmap, + .flush = yaffs_file_flush, + .fsync = yaffs_sync_object, + .splice_read = generic_file_splice_read, + .splice_write = generic_file_splice_write, + .llseek = generic_file_llseek, +}; + +#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18)) + +static const struct file_operations yaffs_file_operations = { + .read = do_sync_read, + .write = do_sync_write, + .aio_read = generic_file_aio_read, + .aio_write = generic_file_aio_write, + .mmap = generic_file_mmap, + .flush = yaffs_file_flush, + .fsync = yaffs_sync_object, + .sendfile = generic_file_sendfile, +}; + +#else + +static const struct file_operations yaffs_file_operations = { + .read = generic_file_read, + .write = generic_file_write, + .mmap = generic_file_mmap, + .flush = yaffs_file_flush, + .fsync = yaffs_sync_object, +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)) + .sendfile = generic_file_sendfile, +#endif +}; +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)) +static void zero_user_segment(struct page *page, unsigned start, unsigned end) +{ + void * kaddr = kmap_atomic(page, KM_USER0); + memset(kaddr + start, 0, end - start); + kunmap_atomic(kaddr, KM_USER0); + flush_dcache_page(page); +} +#endif + + +static const struct inode_operations yaffs_file_inode_operations = { + .setattr = yaffs_setattr, +}; + +static const struct inode_operations yaffs_symlink_inode_operations = { + .readlink = yaffs_readlink, + .follow_link = yaffs_follow_link, + .setattr = yaffs_setattr, +}; + +static const struct inode_operations yaffs_dir_inode_operations = { + .create = yaffs_create, + .lookup = yaffs_lookup, + .link = yaffs_link, + .unlink = yaffs_unlink, + .symlink = yaffs_symlink, + .mkdir = yaffs_mkdir, + .rmdir = yaffs_unlink, + .mknod = yaffs_mknod, + .rename = yaffs_rename, + .setattr = yaffs_setattr, +}; + +static const struct file_operations yaffs_dir_operations = { + .read = generic_read_dir, + .readdir = yaffs_readdir, + .fsync = yaffs_sync_object, + .llseek = yaffs_dir_llseek, +}; + +static const struct super_operations yaffs_super_ops = { + .statfs = yaffs_statfs, + +#ifndef YAFFS_USE_OWN_IGET + .read_inode = yaffs_read_inode, +#endif +#ifdef YAFFS_HAS_PUT_INODE + .put_inode = yaffs_put_inode, +#endif + .put_super = yaffs_put_super, + .delete_inode = yaffs_delete_inode, + .clear_inode = yaffs_clear_inode, + .sync_fs = yaffs_sync_fs, + .write_super = yaffs_write_super, +}; + +static unsigned yaffs_gc_control_callback(yaffs_Device *dev) +{ + return yaffs_gc_control; +} + +static void yaffs_GrossLock(yaffs_Device *dev) +{ + T(YAFFS_TRACE_LOCK, (TSTR("yaffs locking %p\n"), current)); + down(&(yaffs_DeviceToContext(dev)->grossLock)); + T(YAFFS_TRACE_LOCK, (TSTR("yaffs locked %p\n"), current)); +} + +static void yaffs_GrossUnlock(yaffs_Device *dev) +{ + T(YAFFS_TRACE_LOCK, (TSTR("yaffs unlocking %p\n"), current)); + up(&(yaffs_DeviceToContext(dev)->grossLock)); +} + +#ifdef YAFFS_COMPILE_EXPORTFS + +static struct inode * +yaffs2_nfs_get_inode(struct super_block *sb, uint64_t ino, uint32_t generation) +{ + return Y_IGET(sb, ino); +} + +static struct dentry * +yaffs2_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) +{ + return generic_fh_to_dentry(sb, fid, fh_len, fh_type, yaffs2_nfs_get_inode) ; +} + +static struct dentry * + yaffs2_fh_to_parent(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) +{ + return generic_fh_to_parent(sb, fid, fh_len, fh_type, yaffs2_nfs_get_inode); +} + +struct dentry *yaffs2_get_parent(struct dentry *dentry) +{ + + struct super_block *sb = dentry->d_inode->i_sb; + struct dentry *parent = ERR_PTR(-ENOENT); + struct inode *inode; + unsigned long parent_ino; + yaffs_Object *d_obj; + yaffs_Object *parent_obj; + + d_obj = yaffs_InodeToObject(dentry->d_inode); + + if (d_obj) { + parent_obj = d_obj->parent; + if (parent_obj) { + parent_ino = yaffs_GetObjectInode(parent_obj); + inode = Y_IGET(sb, parent_ino); + + if (IS_ERR(inode)) { + parent = ERR_CAST(inode); + } else { + parent = d_obtain_alias(inode); + if (!IS_ERR(parent)) { + parent = ERR_PTR(-ENOMEM); + iput(inode); + } + } + } + } + + return parent; +} + +/* Just declare a zero structure as a NULL value implies + * using the default functions of exportfs. + */ + +static struct export_operations yaffs_export_ops = +{ + .fh_to_dentry = yaffs2_fh_to_dentry, + .fh_to_parent = yaffs2_fh_to_parent, + .get_parent = yaffs2_get_parent, +} ; + +#endif + +/*-----------------------------------------------------------------*/ +/* Directory search context allows us to unlock access to yaffs during + * filldir without causing problems with the directory being modified. + * This is similar to the tried and tested mechanism used in yaffs direct. + * + * A search context iterates along a doubly linked list of siblings in the + * directory. If the iterating object is deleted then this would corrupt + * the list iteration, likely causing a crash. The search context avoids + * this by using the removeObjectCallback to move the search context to the + * next object before the object is deleted. + * + * Many readdirs (and thus seach conexts) may be alive simulateously so + * each yaffs_Device has a list of these. + * + * A seach context lives for the duration of a readdir. + * + * All these functions must be called while yaffs is locked. + */ + +struct yaffs_SearchContext { + yaffs_Device *dev; + yaffs_Object *dirObj; + yaffs_Object *nextReturn; + struct ylist_head others; +}; + +/* + * yaffs_NewSearch() creates a new search context, initialises it and + * adds it to the device's search context list. + * + * Called at start of readdir. + */ +static struct yaffs_SearchContext * yaffs_NewSearch(yaffs_Object *dir) +{ + yaffs_Device *dev = dir->myDev; + struct yaffs_SearchContext *sc = YMALLOC(sizeof(struct yaffs_SearchContext)); + if(sc){ + sc->dirObj = dir; + sc->dev = dev; + if( ylist_empty(&sc->dirObj->variant.directoryVariant.children)) + sc->nextReturn = NULL; + else + sc->nextReturn = ylist_entry( + dir->variant.directoryVariant.children.next, + yaffs_Object,siblings); + YINIT_LIST_HEAD(&sc->others); + ylist_add(&sc->others,&(yaffs_DeviceToContext(dev)->searchContexts)); + } + return sc; +} + +/* + * yaffs_EndSearch() disposes of a search context and cleans up. + */ +static void yaffs_EndSearch(struct yaffs_SearchContext * sc) +{ + if(sc){ + ylist_del(&sc->others); + YFREE(sc); + } +} + +/* + * yaffs_SearchAdvance() moves a search context to the next object. + * Called when the search iterates or when an object removal causes + * the search context to be moved to the next object. + */ +static void yaffs_SearchAdvance(struct yaffs_SearchContext *sc) +{ + if(!sc) + return; + + if( sc->nextReturn == NULL || + ylist_empty(&sc->dirObj->variant.directoryVariant.children)) + sc->nextReturn = NULL; + else { + struct ylist_head *next = sc->nextReturn->siblings.next; + + if( next == &sc->dirObj->variant.directoryVariant.children) + sc->nextReturn = NULL; /* end of list */ + else + sc->nextReturn = ylist_entry(next,yaffs_Object,siblings); + } +} + +/* + * yaffs_RemoveObjectCallback() is called when an object is unlinked. + * We check open search contexts and advance any which are currently + * on the object being iterated. + */ +static void yaffs_RemoveObjectCallback(yaffs_Object *obj) +{ + + struct ylist_head *i; + struct yaffs_SearchContext *sc; + struct ylist_head *search_contexts = &(yaffs_DeviceToContext(obj->myDev)->searchContexts); + + + /* Iterate through the directory search contexts. + * If any are currently on the object being removed, then advance + * the search context to the next object to prevent a hanging pointer. + */ + ylist_for_each(i, search_contexts) { + if (i) { + sc = ylist_entry(i, struct yaffs_SearchContext,others); + if(sc->nextReturn == obj) + yaffs_SearchAdvance(sc); + } + } + +} + + +/*-----------------------------------------------------------------*/ + +static int yaffs_readlink(struct dentry *dentry, char __user *buffer, + int buflen) +{ + unsigned char *alias; + int ret; + + yaffs_Device *dev = yaffs_DentryToObject(dentry)->myDev; + + yaffs_GrossLock(dev); + + alias = yaffs_GetSymlinkAlias(yaffs_DentryToObject(dentry)); + + yaffs_GrossUnlock(dev); + + if (!alias) + return -ENOMEM; + + ret = vfs_readlink(dentry, buffer, buflen, alias); + kfree(alias); + return ret; +} + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 13)) +static void *yaffs_follow_link(struct dentry *dentry, struct nameidata *nd) +#else +static int yaffs_follow_link(struct dentry *dentry, struct nameidata *nd) +#endif +{ + unsigned char *alias; + int ret; + yaffs_Device *dev = yaffs_DentryToObject(dentry)->myDev; + + yaffs_GrossLock(dev); + + alias = yaffs_GetSymlinkAlias(yaffs_DentryToObject(dentry)); + + yaffs_GrossUnlock(dev); + + if (!alias) { + ret = -ENOMEM; + goto out; + } + + ret = vfs_follow_link(nd, alias); + kfree(alias); +out: +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 13)) + return ERR_PTR(ret); +#else + return ret; +#endif +} + +struct inode *yaffs_get_inode(struct super_block *sb, int mode, int dev, + yaffs_Object *obj); + +/* + * Lookup is used to find objects in the fs + */ +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)) + +static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry, + struct nameidata *n) +#else +static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry) +#endif +{ + yaffs_Object *obj; + struct inode *inode = NULL; /* NCB 2.5/2.6 needs NULL here */ + + yaffs_Device *dev = yaffs_InodeToObject(dir)->myDev; + + if(current != yaffs_DeviceToContext(dev)->readdirProcess) + yaffs_GrossLock(dev); + + T(YAFFS_TRACE_OS, + (TSTR("yaffs_lookup for %d:%s\n"), + yaffs_InodeToObject(dir)->objectId, dentry->d_name.name)); + + obj = yaffs_FindObjectByName(yaffs_InodeToObject(dir), + dentry->d_name.name); + + obj = yaffs_GetEquivalentObject(obj); /* in case it was a hardlink */ + + /* Can't hold gross lock when calling yaffs_get_inode() */ + if(current != yaffs_DeviceToContext(dev)->readdirProcess) + yaffs_GrossUnlock(dev); + + if (obj) { + T(YAFFS_TRACE_OS, + (TSTR("yaffs_lookup found %d\n"), obj->objectId)); + + inode = yaffs_get_inode(dir->i_sb, obj->yst_mode, 0, obj); + + if (inode) { + T(YAFFS_TRACE_OS, + (TSTR("yaffs_loookup dentry \n"))); +/* #if 0 asserted by NCB for 2.5/6 compatability - falls through to + * d_add even if NULL inode */ +#if 0 + /*dget(dentry); // try to solve directory bug */ + d_add(dentry, inode); + + /* return dentry; */ + return NULL; +#endif + } + + } else { + T(YAFFS_TRACE_OS,(TSTR("yaffs_lookup not found\n"))); + + } + +/* added NCB for 2.5/6 compatability - forces add even if inode is + * NULL which creates dentry hash */ + d_add(dentry, inode); + + return NULL; +} + + +#ifdef YAFFS_HAS_PUT_INODE + +/* For now put inode is just for debugging + * Put inode is called when the inode **structure** is put. + */ +static void yaffs_put_inode(struct inode *inode) +{ + T(YAFFS_TRACE_OS, + (TSTR("yaffs_put_inode: ino %d, count %d\n"), (int)inode->i_ino, + atomic_read(&inode->i_count))); + +} +#endif + +/* clear is called to tell the fs to release any per-inode data it holds */ +static void yaffs_clear_inode(struct inode *inode) +{ + yaffs_Object *obj; + yaffs_Device *dev; + + obj = yaffs_InodeToObject(inode); + + T(YAFFS_TRACE_OS, + (TSTR("yaffs_clear_inode: ino %d, count %d %s\n"), (int)inode->i_ino, + atomic_read(&inode->i_count), + obj ? "object exists" : "null object")); + + if (obj) { + dev = obj->myDev; + yaffs_GrossLock(dev); + + /* Clear the association between the inode and + * the yaffs_Object. + */ + obj->myInode = NULL; + yaffs_InodeToObjectLV(inode) = NULL; + + /* If the object freeing was deferred, then the real + * free happens now. + * This should fix the inode inconsistency problem. + */ + + yaffs_HandleDeferedFree(obj); + + yaffs_GrossUnlock(dev); + } + +} + +/* delete is called when the link count is zero and the inode + * is put (ie. nobody wants to know about it anymore, time to + * delete the file). + * NB Must call clear_inode() + */ +static void yaffs_delete_inode(struct inode *inode) +{ + yaffs_Object *obj = yaffs_InodeToObject(inode); + yaffs_Device *dev; + + T(YAFFS_TRACE_OS, + (TSTR("yaffs_delete_inode: ino %d, count %d %s\n"), (int)inode->i_ino, + atomic_read(&inode->i_count), + obj ? "object exists" : "null object")); + + if (obj) { + dev = obj->myDev; + yaffs_GrossLock(dev); + yaffs_DeleteObject(obj); + yaffs_GrossUnlock(dev); + } +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 13)) + truncate_inode_pages(&inode->i_data, 0); +#endif + clear_inode(inode); +} + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17)) +static int yaffs_file_flush(struct file *file, fl_owner_t id) +#else +static int yaffs_file_flush(struct file *file) +#endif +{ + yaffs_Object *obj = yaffs_DentryToObject(file->f_dentry); + + yaffs_Device *dev = obj->myDev; + + T(YAFFS_TRACE_OS, + (TSTR("yaffs_file_flush object %d (%s)\n"), obj->objectId, + obj->dirty ? "dirty" : "clean")); + + yaffs_GrossLock(dev); + + yaffs_FlushFile(obj, 1, 0); + + yaffs_GrossUnlock(dev); + + return 0; +} + +static int yaffs_readpage_nolock(struct file *f, struct page *pg) +{ + /* Lifted from jffs2 */ + + yaffs_Object *obj; + unsigned char *pg_buf; + int ret; + + yaffs_Device *dev; + + T(YAFFS_TRACE_OS, + (TSTR("yaffs_readpage_nolock at %08x, size %08x\n"), + (unsigned)(pg->index << PAGE_CACHE_SHIFT), + (unsigned)PAGE_CACHE_SIZE)); + + obj = yaffs_DentryToObject(f->f_dentry); + + dev = obj->myDev; + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)) + BUG_ON(!PageLocked(pg)); +#else + if (!PageLocked(pg)) + PAGE_BUG(pg); +#endif + + pg_buf = kmap(pg); + /* FIXME: Can kmap fail? */ + + yaffs_GrossLock(dev); + + ret = yaffs_ReadDataFromFile(obj, pg_buf, + pg->index << PAGE_CACHE_SHIFT, + PAGE_CACHE_SIZE); + + yaffs_GrossUnlock(dev); + + if (ret >= 0) + ret = 0; + + if (ret) { + ClearPageUptodate(pg); + SetPageError(pg); + } else { + SetPageUptodate(pg); + ClearPageError(pg); + } + + flush_dcache_page(pg); + kunmap(pg); + + T(YAFFS_TRACE_OS, (TSTR("yaffs_readpage_nolock done\n"))); + return ret; +} + +static int yaffs_readpage_unlock(struct file *f, struct page *pg) +{ + int ret = yaffs_readpage_nolock(f, pg); + UnlockPage(pg); + return ret; +} + +static int yaffs_readpage(struct file *f, struct page *pg) +{ + int ret; + + T(YAFFS_TRACE_OS, (TSTR("yaffs_readpage\n"))); + ret=yaffs_readpage_unlock(f, pg); + T(YAFFS_TRACE_OS, (TSTR("yaffs_readpage done\n"))); + return ret; +} + +/* writepage inspired by/stolen from smbfs */ + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)) +static int yaffs_writepage(struct page *page, struct writeback_control *wbc) +#else +static int yaffs_writepage(struct page *page) +#endif +{ + struct address_space *mapping = page->mapping; + struct inode *inode; + unsigned long end_index; + char *buffer; + yaffs_Object *obj; + int nWritten = 0; + unsigned nBytes; + loff_t i_size; + + if (!mapping) + BUG(); + inode = mapping->host; + if (!inode) + BUG(); + i_size = i_size_read(inode); + + end_index = i_size >> PAGE_CACHE_SHIFT; + + if(page->index < end_index) + nBytes = PAGE_CACHE_SIZE; + else { + nBytes = i_size & (PAGE_CACHE_SIZE -1); + + if (page->index > end_index || !nBytes) { + T(YAFFS_TRACE_OS, + (TSTR("yaffs_writepage at %08x, inode size = %08x!!!\n"), + (unsigned)(page->index << PAGE_CACHE_SHIFT), + (unsigned)inode->i_size)); + T(YAFFS_TRACE_OS, + (TSTR(" -> don't care!!\n"))); + + zero_user_segment(page,0,PAGE_CACHE_SIZE); + set_page_writeback(page); + unlock_page(page); + end_page_writeback(page); + return 0; + } + } + + if(nBytes != PAGE_CACHE_SIZE) + zero_user_segment(page,nBytes,PAGE_CACHE_SIZE); + + get_page(page); + + buffer = kmap(page); + + obj = yaffs_InodeToObject(inode); + yaffs_GrossLock(obj->myDev); + + T(YAFFS_TRACE_OS, + (TSTR("yaffs_writepage at %08x, size %08x\n"), + (unsigned)(page->index << PAGE_CACHE_SHIFT), nBytes)); + T(YAFFS_TRACE_OS, + (TSTR("writepag0: obj = %05x, ino = %05x\n"), + (int)obj->variant.fileVariant.fileSize, (int)inode->i_size)); + + nWritten = yaffs_WriteDataToFile(obj, buffer, + page->index << PAGE_CACHE_SHIFT, nBytes, 0); + + T(YAFFS_TRACE_OS, + (TSTR("writepag1: obj = %05x, ino = %05x\n"), + (int)obj->variant.fileVariant.fileSize, (int)inode->i_size)); + + yaffs_GrossUnlock(obj->myDev); + + kunmap(page); + set_page_writeback(page); + unlock_page(page); + end_page_writeback(page); + put_page(page); + + return (nWritten == nBytes) ? 0 : -ENOSPC; +} + + +#if (YAFFS_USE_WRITE_BEGIN_END > 0) +static int yaffs_write_begin(struct file *filp, struct address_space *mapping, + loff_t pos, unsigned len, unsigned flags, + struct page **pagep, void **fsdata) +{ + struct page *pg = NULL; + pgoff_t index = pos >> PAGE_CACHE_SHIFT; + + int ret = 0; + int space_held = 0; + + /* Get a page */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28) + pg = grab_cache_page_write_begin(mapping, index, flags); +#else + pg = __grab_cache_page(mapping, index); +#endif + + *pagep = pg; + if (!pg) { + ret = -ENOMEM; + goto out; + } + T(YAFFS_TRACE_OS, + (TSTR("start yaffs_write_begin index %d(%x) uptodate %d\n"), + (int)index,(int)index,Page_Uptodate(pg) ? 1 : 0)); + + /* Get fs space */ + space_held = yaffs_hold_space(filp); + + if (!space_held) { + ret = -ENOSPC; + goto out; + } + + /* Update page if required */ + + if (!Page_Uptodate(pg)) + ret = yaffs_readpage_nolock(filp, pg); + + if (ret) + goto out; + + /* Happy path return */ + T(YAFFS_TRACE_OS, (TSTR("end yaffs_write_begin - ok\n"))); + + return 0; + +out: + T(YAFFS_TRACE_OS, + (TSTR("end yaffs_write_begin fail returning %d\n"), ret)); + if (space_held) + yaffs_release_space(filp); + if (pg) { + unlock_page(pg); + page_cache_release(pg); + } + return ret; +} + +#else + +static int yaffs_prepare_write(struct file *f, struct page *pg, + unsigned offset, unsigned to) +{ + T(YAFFS_TRACE_OS, (TSTR("yaffs_prepair_write\n"))); + + if (!Page_Uptodate(pg)) + return yaffs_readpage_nolock(f, pg); + return 0; +} +#endif + +#if (YAFFS_USE_WRITE_BEGIN_END > 0) +static int yaffs_write_end(struct file *filp, struct address_space *mapping, + loff_t pos, unsigned len, unsigned copied, + struct page *pg, void *fsdadata) +{ + int ret = 0; + void *addr, *kva; + uint32_t offset_into_page = pos & (PAGE_CACHE_SIZE - 1); + + kva = kmap(pg); + addr = kva + offset_into_page; + + T(YAFFS_TRACE_OS, + ("yaffs_write_end addr %p pos %x nBytes %d\n", + addr,(unsigned)pos, copied)); + + ret = yaffs_file_write(filp, addr, copied, &pos); + + if (ret != copied) { + T(YAFFS_TRACE_OS, + (TSTR("yaffs_write_end not same size ret %d copied %d\n"), + ret, copied)); + SetPageError(pg); + } else { + /* Nothing */ + } + + kunmap(pg); + + yaffs_release_space(filp); + unlock_page(pg); + page_cache_release(pg); + return ret; +} +#else + +static int yaffs_commit_write(struct file *f, struct page *pg, unsigned offset, + unsigned to) +{ + void *addr, *kva; + + loff_t pos = (((loff_t) pg->index) << PAGE_CACHE_SHIFT) + offset; + int nBytes = to - offset; + int nWritten; + + unsigned spos = pos; + unsigned saddr; + + kva = kmap(pg); + addr = kva + offset; + + saddr = (unsigned) addr; + + T(YAFFS_TRACE_OS, + (TSTR("yaffs_commit_write addr %x pos %x nBytes %d\n"), + saddr, spos, nBytes)); + + nWritten = yaffs_file_write(f, addr, nBytes, &pos); + + if (nWritten != nBytes) { + T(YAFFS_TRACE_OS, + (TSTR("yaffs_commit_write not same size nWritten %d nBytes %d\n"), + nWritten, nBytes)); + SetPageError(pg); + } else { + /* Nothing */ + } + + kunmap(pg); + + T(YAFFS_TRACE_OS, + (TSTR("yaffs_commit_write returning %d\n"), + nWritten == nBytes ? 0 : nWritten)); + + return nWritten == nBytes ? 0 : nWritten; +} +#endif + + +static void yaffs_FillInodeFromObject(struct inode *inode, yaffs_Object *obj) +{ + if (inode && obj) { + + + /* Check mode against the variant type and attempt to repair if broken. */ + __u32 mode = obj->yst_mode; + switch (obj->variantType) { + case YAFFS_OBJECT_TYPE_FILE: + if (!S_ISREG(mode)) { + obj->yst_mode &= ~S_IFMT; + obj->yst_mode |= S_IFREG; + } + + break; + case YAFFS_OBJECT_TYPE_SYMLINK: + if (!S_ISLNK(mode)) { + obj->yst_mode &= ~S_IFMT; + obj->yst_mode |= S_IFLNK; + } + + break; + case YAFFS_OBJECT_TYPE_DIRECTORY: + if (!S_ISDIR(mode)) { + obj->yst_mode &= ~S_IFMT; + obj->yst_mode |= S_IFDIR; + } + + break; + case YAFFS_OBJECT_TYPE_UNKNOWN: + case YAFFS_OBJECT_TYPE_HARDLINK: + case YAFFS_OBJECT_TYPE_SPECIAL: + default: + /* TODO? */ + break; + } + + inode->i_flags |= S_NOATIME; + + inode->i_ino = obj->objectId; + inode->i_mode = obj->yst_mode; + inode->i_uid = obj->yst_uid; + inode->i_gid = obj->yst_gid; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19)) + inode->i_blksize = inode->i_sb->s_blocksize; +#endif +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)) + + inode->i_rdev = old_decode_dev(obj->yst_rdev); + inode->i_atime.tv_sec = (time_t) (obj->yst_atime); + inode->i_atime.tv_nsec = 0; + inode->i_mtime.tv_sec = (time_t) obj->yst_mtime; + inode->i_mtime.tv_nsec = 0; + inode->i_ctime.tv_sec = (time_t) obj->yst_ctime; + inode->i_ctime.tv_nsec = 0; +#else + inode->i_rdev = obj->yst_rdev; + inode->i_atime = obj->yst_atime; + inode->i_mtime = obj->yst_mtime; + inode->i_ctime = obj->yst_ctime; +#endif + inode->i_size = yaffs_GetObjectFileLength(obj); + inode->i_blocks = (inode->i_size + 511) >> 9; + + inode->i_nlink = yaffs_GetObjectLinkCount(obj); + + T(YAFFS_TRACE_OS, + (TSTR("yaffs_FillInode mode %x uid %d gid %d size %d count %d\n"), + inode->i_mode, inode->i_uid, inode->i_gid, + (int)inode->i_size, atomic_read(&inode->i_count))); + + switch (obj->yst_mode & S_IFMT) { + default: /* fifo, device or socket */ +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)) + init_special_inode(inode, obj->yst_mode, + old_decode_dev(obj->yst_rdev)); +#else + init_special_inode(inode, obj->yst_mode, + (dev_t) (obj->yst_rdev)); +#endif + break; + case S_IFREG: /* file */ + inode->i_op = &yaffs_file_inode_operations; + inode->i_fop = &yaffs_file_operations; + inode->i_mapping->a_ops = + &yaffs_file_address_operations; + break; + case S_IFDIR: /* directory */ + inode->i_op = &yaffs_dir_inode_operations; + inode->i_fop = &yaffs_dir_operations; + break; + case S_IFLNK: /* symlink */ + inode->i_op = &yaffs_symlink_inode_operations; + break; + } + + yaffs_InodeToObjectLV(inode) = obj; + + obj->myInode = inode; + + } else { + T(YAFFS_TRACE_OS, + (TSTR("yaffs_FileInode invalid parameters\n"))); + } + +} + +struct inode *yaffs_get_inode(struct super_block *sb, int mode, int dev, + yaffs_Object *obj) +{ + struct inode *inode; + + if (!sb) { + T(YAFFS_TRACE_OS, + (TSTR("yaffs_get_inode for NULL super_block!!\n"))); + return NULL; + + } + + if (!obj) { + T(YAFFS_TRACE_OS, + (TSTR("yaffs_get_inode for NULL object!!\n"))); + return NULL; + + } + + T(YAFFS_TRACE_OS, + (TSTR("yaffs_get_inode for object %d\n"), obj->objectId)); + + inode = Y_IGET(sb, obj->objectId); + if (IS_ERR(inode)) + return NULL; + + /* NB Side effect: iget calls back to yaffs_read_inode(). */ + /* iget also increments the inode's i_count */ + /* NB You can't be holding grossLock or deadlock will happen! */ + + return inode; +} + +static ssize_t yaffs_file_write(struct file *f, const char *buf, size_t n, + loff_t *pos) +{ + yaffs_Object *obj; + int nWritten, ipos; + struct inode *inode; + yaffs_Device *dev; + + obj = yaffs_DentryToObject(f->f_dentry); + + dev = obj->myDev; + + yaffs_GrossLock(dev); + + inode = f->f_dentry->d_inode; + + if (!S_ISBLK(inode->i_mode) && f->f_flags & O_APPEND) + ipos = inode->i_size; + else + ipos = *pos; + + if (!obj) + T(YAFFS_TRACE_OS, + (TSTR("yaffs_file_write: hey obj is null!\n"))); + else + T(YAFFS_TRACE_OS, + (TSTR("yaffs_file_write about to write writing %u(%x) bytes" + "to object %d at %d(%x)\n"), + (unsigned) n, (unsigned) n, obj->objectId, ipos,ipos)); + + nWritten = yaffs_WriteDataToFile(obj, buf, ipos, n, 0); + + T(YAFFS_TRACE_OS, + (TSTR("yaffs_file_write: %d(%x) bytes written\n"), + (unsigned )n,(unsigned)n)); + + if (nWritten > 0) { + ipos += nWritten; + *pos = ipos; + if (ipos > inode->i_size) { + inode->i_size = ipos; + inode->i_blocks = (ipos + 511) >> 9; + + T(YAFFS_TRACE_OS, + (TSTR("yaffs_file_write size updated to %d bytes, " + "%d blocks\n"), + ipos, (int)(inode->i_blocks))); + } + + } + yaffs_GrossUnlock(dev); + return (nWritten == 0) && (n > 0) ? -ENOSPC : nWritten; +} + +/* Space holding and freeing is done to ensure we have space available for write_begin/end */ +/* For now we just assume few parallel writes and check against a small number. */ +/* Todo: need to do this with a counter to handle parallel reads better */ + +static ssize_t yaffs_hold_space(struct file *f) +{ + yaffs_Object *obj; + yaffs_Device *dev; + + int nFreeChunks; + + + obj = yaffs_DentryToObject(f->f_dentry); + + dev = obj->myDev; + + yaffs_GrossLock(dev); + + nFreeChunks = yaffs_GetNumberOfFreeChunks(dev); + + yaffs_GrossUnlock(dev); + + return (nFreeChunks > 20) ? 1 : 0; +} + +static void yaffs_release_space(struct file *f) +{ + yaffs_Object *obj; + yaffs_Device *dev; + + + obj = yaffs_DentryToObject(f->f_dentry); + + dev = obj->myDev; + + yaffs_GrossLock(dev); + + + yaffs_GrossUnlock(dev); +} + + +static loff_t yaffs_dir_llseek(struct file *file, loff_t offset, int origin) +{ + long long retval; + + lock_kernel(); + + switch (origin){ + case 2: + offset += i_size_read(file->f_path.dentry->d_inode); + break; + case 1: + offset += file->f_pos; + } + retval = -EINVAL; + + if (offset >= 0){ + if (offset != file->f_pos) + file->f_pos = offset; + + retval = offset; + } + unlock_kernel(); + return retval; +} + + +static int yaffs_readdir(struct file *f, void *dirent, filldir_t filldir) +{ + yaffs_Object *obj; + yaffs_Device *dev; + struct yaffs_SearchContext *sc; + struct inode *inode = f->f_dentry->d_inode; + unsigned long offset, curoffs; + yaffs_Object *l; + int retVal = 0; + + char name[YAFFS_MAX_NAME_LENGTH + 1]; + + obj = yaffs_DentryToObject(f->f_dentry); + dev = obj->myDev; + + yaffs_GrossLock(dev); + + yaffs_DeviceToContext(dev)->readdirProcess = current; + + offset = f->f_pos; + + sc = yaffs_NewSearch(obj); + if(!sc){ + retVal = -ENOMEM; + goto unlock_out; + } + + T(YAFFS_TRACE_OS, (TSTR("yaffs_readdir: starting at %d\n"), (int)offset)); + + if (offset == 0) { + T(YAFFS_TRACE_OS, + (TSTR("yaffs_readdir: entry . ino %d \n"), + (int)inode->i_ino)); + yaffs_GrossUnlock(dev); + if (filldir(dirent, ".", 1, offset, inode->i_ino, DT_DIR) < 0) + goto out; + yaffs_GrossLock(dev); + offset++; + f->f_pos++; + } + if (offset == 1) { + T(YAFFS_TRACE_OS, + (TSTR("yaffs_readdir: entry .. ino %d \n"), + (int)f->f_dentry->d_parent->d_inode->i_ino)); + yaffs_GrossUnlock(dev); + if (filldir(dirent, "..", 2, offset, + f->f_dentry->d_parent->d_inode->i_ino, DT_DIR) < 0) + goto out; + yaffs_GrossLock(dev); + offset++; + f->f_pos++; + } + + curoffs = 1; + + /* If the directory has changed since the open or last call to + readdir, rewind to after the 2 canned entries. */ + if (f->f_version != inode->i_version) { + offset = 2; + f->f_pos = offset; + f->f_version = inode->i_version; + } + + while(sc->nextReturn){ + curoffs++; + l = sc->nextReturn; + if (curoffs >= offset) { + int this_inode = yaffs_GetObjectInode(l); + int this_type = yaffs_GetObjectType(l); + + yaffs_GetObjectName(l, name, + YAFFS_MAX_NAME_LENGTH + 1); + T(YAFFS_TRACE_OS, + (TSTR("yaffs_readdir: %s inode %d\n"), + name, yaffs_GetObjectInode(l))); + + yaffs_GrossUnlock(dev); + + if (filldir(dirent, + name, + strlen(name), + offset, + this_inode, + this_type) < 0) + goto out; + + yaffs_GrossLock(dev); + + offset++; + f->f_pos++; + } + yaffs_SearchAdvance(sc); + } + +unlock_out: + yaffs_DeviceToContext(dev)->readdirProcess = NULL; + + yaffs_GrossUnlock(dev); +out: + yaffs_EndSearch(sc); + + return retVal; +} + + + +/* + * File creation. Allocate an inode, and we're done.. + */ + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29) +#define YCRED(x) x +#else +#define YCRED(x) (x->cred) +#endif + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)) +static int yaffs_mknod(struct inode *dir, struct dentry *dentry, int mode, + dev_t rdev) +#else +static int yaffs_mknod(struct inode *dir, struct dentry *dentry, int mode, + int rdev) +#endif +{ + struct inode *inode; + + yaffs_Object *obj = NULL; + yaffs_Device *dev; + + yaffs_Object *parent = yaffs_InodeToObject(dir); + + int error = -ENOSPC; + uid_t uid = YCRED(current)->fsuid; + gid_t gid = (dir->i_mode & S_ISGID) ? dir->i_gid : YCRED(current)->fsgid; + + if ((dir->i_mode & S_ISGID) && S_ISDIR(mode)) + mode |= S_ISGID; + + if (parent) { + T(YAFFS_TRACE_OS, + (TSTR("yaffs_mknod: parent object %d type %d\n"), + parent->objectId, parent->variantType)); + } else { + T(YAFFS_TRACE_OS, + (TSTR("yaffs_mknod: could not get parent object\n"))); + return -EPERM; + } + + T(YAFFS_TRACE_OS, (TSTR("yaffs_mknod: making oject for %s, " + "mode %x dev %x\n"), + dentry->d_name.name, mode, rdev)); + + dev = parent->myDev; + + yaffs_GrossLock(dev); + + switch (mode & S_IFMT) { + default: + /* Special (socket, fifo, device...) */ + T(YAFFS_TRACE_OS, (TSTR("yaffs_mknod: making special\n"))); +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)) + obj = yaffs_MknodSpecial(parent, dentry->d_name.name, mode, uid, + gid, old_encode_dev(rdev)); +#else + obj = yaffs_MknodSpecial(parent, dentry->d_name.name, mode, uid, + gid, rdev); +#endif + break; + case S_IFREG: /* file */ + T(YAFFS_TRACE_OS, (TSTR("yaffs_mknod: making file\n"))); + obj = yaffs_MknodFile(parent, dentry->d_name.name, mode, uid, + gid); + break; + case S_IFDIR: /* directory */ + T(YAFFS_TRACE_OS, + (TSTR("yaffs_mknod: making directory\n"))); + obj = yaffs_MknodDirectory(parent, dentry->d_name.name, mode, + uid, gid); + break; + case S_IFLNK: /* symlink */ + T(YAFFS_TRACE_OS, (TSTR("yaffs_mknod: making symlink\n"))); + obj = NULL; /* Do we ever get here? */ + break; + } + + /* Can not call yaffs_get_inode() with gross lock held */ + yaffs_GrossUnlock(dev); + + if (obj) { + inode = yaffs_get_inode(dir->i_sb, mode, rdev, obj); + d_instantiate(dentry, inode); + update_dir_time(dir); + T(YAFFS_TRACE_OS, + (TSTR("yaffs_mknod created object %d count = %d\n"), + obj->objectId, atomic_read(&inode->i_count))); + error = 0; + yaffs_FillInodeFromObject(dir,parent); + } else { + T(YAFFS_TRACE_OS, + (TSTR("yaffs_mknod failed making object\n"))); + error = -ENOMEM; + } + + return error; +} + +static int yaffs_mkdir(struct inode *dir, struct dentry *dentry, int mode) +{ + int retVal; + T(YAFFS_TRACE_OS, (TSTR("yaffs_mkdir\n"))); + retVal = yaffs_mknod(dir, dentry, mode | S_IFDIR, 0); + return retVal; +} + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)) +static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode, + struct nameidata *n) +#else +static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode) +#endif +{ + T(YAFFS_TRACE_OS,(TSTR("yaffs_create\n"))); + return yaffs_mknod(dir, dentry, mode | S_IFREG, 0); +} + +static int yaffs_unlink(struct inode *dir, struct dentry *dentry) +{ + int retVal; + + yaffs_Device *dev; + yaffs_Object *obj; + + T(YAFFS_TRACE_OS, + (TSTR("yaffs_unlink %d:%s\n"), + (int)(dir->i_ino), + dentry->d_name.name)); + obj = yaffs_InodeToObject(dir); + dev = obj->myDev; + + yaffs_GrossLock(dev); + + retVal = yaffs_Unlink(obj, dentry->d_name.name); + + if (retVal == YAFFS_OK) { + dentry->d_inode->i_nlink--; + dir->i_version++; + yaffs_GrossUnlock(dev); + mark_inode_dirty(dentry->d_inode); + update_dir_time(dir); + return 0; + } + yaffs_GrossUnlock(dev); + return -ENOTEMPTY; +} + +/* + * Create a link... + */ +static int yaffs_link(struct dentry *old_dentry, struct inode *dir, + struct dentry *dentry) +{ + struct inode *inode = old_dentry->d_inode; + yaffs_Object *obj = NULL; + yaffs_Object *link = NULL; + yaffs_Device *dev; + + T(YAFFS_TRACE_OS, (TSTR("yaffs_link\n"))); + + obj = yaffs_InodeToObject(inode); + dev = obj->myDev; + + yaffs_GrossLock(dev); + + if (!S_ISDIR(inode->i_mode)) /* Don't link directories */ + link = yaffs_Link(yaffs_InodeToObject(dir), dentry->d_name.name, + obj); + + if (link) { + old_dentry->d_inode->i_nlink = yaffs_GetObjectLinkCount(obj); + d_instantiate(dentry, old_dentry->d_inode); + atomic_inc(&old_dentry->d_inode->i_count); + T(YAFFS_TRACE_OS, + (TSTR("yaffs_link link count %d i_count %d\n"), + old_dentry->d_inode->i_nlink, + atomic_read(&old_dentry->d_inode->i_count))); + } + + yaffs_GrossUnlock(dev); + + if (link){ + update_dir_time(dir); + return 0; + } + + return -EPERM; +} + +static int yaffs_symlink(struct inode *dir, struct dentry *dentry, + const char *symname) +{ + yaffs_Object *obj; + yaffs_Device *dev; + uid_t uid = YCRED(current)->fsuid; + gid_t gid = (dir->i_mode & S_ISGID) ? dir->i_gid : YCRED(current)->fsgid; + + T(YAFFS_TRACE_OS, (TSTR("yaffs_symlink\n"))); + + dev = yaffs_InodeToObject(dir)->myDev; + yaffs_GrossLock(dev); + obj = yaffs_MknodSymLink(yaffs_InodeToObject(dir), dentry->d_name.name, + S_IFLNK | S_IRWXUGO, uid, gid, symname); + yaffs_GrossUnlock(dev); + + if (obj) { + struct inode *inode; + + inode = yaffs_get_inode(dir->i_sb, obj->yst_mode, 0, obj); + d_instantiate(dentry, inode); + update_dir_time(dir); + T(YAFFS_TRACE_OS, (TSTR("symlink created OK\n"))); + return 0; + } else { + T(YAFFS_TRACE_OS, (TSTR("symlink not created\n"))); + } + + return -ENOMEM; +} + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 34)) +static int yaffs_sync_object(struct file *file, int datasync) +#else +static int yaffs_sync_object(struct file *file, struct dentry *dentry, + int datasync) +#endif +{ + + yaffs_Object *obj; + yaffs_Device *dev; +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 34)) + struct dentry *dentry = file->f_path.dentry; +#endif + + obj = yaffs_DentryToObject(dentry); + + dev = obj->myDev; + + T(YAFFS_TRACE_OS | YAFFS_TRACE_SYNC, + (TSTR("yaffs_sync_object\n"))); + yaffs_GrossLock(dev); + yaffs_FlushFile(obj, 1, datasync); + yaffs_GrossUnlock(dev); + return 0; +} + +/* + * The VFS layer already does all the dentry stuff for rename. + * + * NB: POSIX says you can rename an object over an old object of the same name + */ +static int yaffs_rename(struct inode *old_dir, struct dentry *old_dentry, + struct inode *new_dir, struct dentry *new_dentry) +{ + yaffs_Device *dev; + int retVal = YAFFS_FAIL; + yaffs_Object *target; + + T(YAFFS_TRACE_OS, (TSTR("yaffs_rename\n"))); + dev = yaffs_InodeToObject(old_dir)->myDev; + + yaffs_GrossLock(dev); + + /* Check if the target is an existing directory that is not empty. */ + target = yaffs_FindObjectByName(yaffs_InodeToObject(new_dir), + new_dentry->d_name.name); + + + + if (target && target->variantType == YAFFS_OBJECT_TYPE_DIRECTORY && + !ylist_empty(&target->variant.directoryVariant.children)) { + + T(YAFFS_TRACE_OS, (TSTR("target is non-empty dir\n"))); + + retVal = YAFFS_FAIL; + } else { + /* Now does unlinking internally using shadowing mechanism */ + T(YAFFS_TRACE_OS, (TSTR("calling yaffs_RenameObject\n"))); + + retVal = yaffs_RenameObject(yaffs_InodeToObject(old_dir), + old_dentry->d_name.name, + yaffs_InodeToObject(new_dir), + new_dentry->d_name.name); + } + yaffs_GrossUnlock(dev); + + if (retVal == YAFFS_OK) { + if (target) { + new_dentry->d_inode->i_nlink--; + mark_inode_dirty(new_dentry->d_inode); + } + + update_dir_time(old_dir); + if(old_dir != new_dir) + update_dir_time(new_dir); + return 0; + } else { + return -ENOTEMPTY; + } +} + +static int yaffs_setattr(struct dentry *dentry, struct iattr *attr) +{ + struct inode *inode = dentry->d_inode; + int error = 0; + yaffs_Device *dev; + + T(YAFFS_TRACE_OS, + (TSTR("yaffs_setattr of object %d\n"), + yaffs_InodeToObject(inode)->objectId)); + + /* Fail if a requested resize >= 2GB */ + if (attr->ia_valid & ATTR_SIZE && + (attr->ia_size >> 31)) + error = -EINVAL; + + if (error == 0) + error = inode_change_ok(inode, attr); + if (error == 0) { + int result; + if (!error){ + error = inode_setattr(inode, attr); + T(YAFFS_TRACE_OS,(TSTR("inode_setattr called\n"))); + if (attr->ia_valid & ATTR_SIZE) + truncate_inode_pages(&inode->i_data,attr->ia_size); + } + dev = yaffs_InodeToObject(inode)->myDev; + if (attr->ia_valid & ATTR_SIZE){ + T(YAFFS_TRACE_OS,(TSTR("resize to %d(%x)\n"), + (int)(attr->ia_size),(int)(attr->ia_size))); + } + yaffs_GrossLock(dev); + result = yaffs_SetAttributes(yaffs_InodeToObject(inode), attr); + if(result == YAFFS_OK) { + error = 0; + } else { + error = -EPERM; + } + yaffs_GrossUnlock(dev); + + } + + T(YAFFS_TRACE_OS, + (TSTR("yaffs_setattr done returning %d\n"),error)); + + return error; +} + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17)) +static int yaffs_statfs(struct dentry *dentry, struct kstatfs *buf) +{ + yaffs_Device *dev = yaffs_DentryToObject(dentry)->myDev; + struct super_block *sb = dentry->d_sb; +#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)) +static int yaffs_statfs(struct super_block *sb, struct kstatfs *buf) +{ + yaffs_Device *dev = yaffs_SuperToDevice(sb); +#else +static int yaffs_statfs(struct super_block *sb, struct statfs *buf) +{ + yaffs_Device *dev = yaffs_SuperToDevice(sb); +#endif + + T(YAFFS_TRACE_OS, (TSTR("yaffs_statfs\n"))); + + yaffs_GrossLock(dev); + + buf->f_type = YAFFS_MAGIC; + buf->f_bsize = sb->s_blocksize; + buf->f_namelen = 255; + + if (dev->nDataBytesPerChunk & (dev->nDataBytesPerChunk - 1)) { + /* Do this if chunk size is not a power of 2 */ + + uint64_t bytesInDev; + uint64_t bytesFree; + + bytesInDev = ((uint64_t)((dev->param.endBlock - dev->param.startBlock + 1))) * + ((uint64_t)(dev->param.nChunksPerBlock * dev->nDataBytesPerChunk)); + + do_div(bytesInDev, sb->s_blocksize); /* bytesInDev becomes the number of blocks */ + buf->f_blocks = bytesInDev; + + bytesFree = ((uint64_t)(yaffs_GetNumberOfFreeChunks(dev))) * + ((uint64_t)(dev->nDataBytesPerChunk)); + + do_div(bytesFree, sb->s_blocksize); + + buf->f_bfree = bytesFree; + + } else if (sb->s_blocksize > dev->nDataBytesPerChunk) { + + buf->f_blocks = + (dev->param.endBlock - dev->param.startBlock + 1) * + dev->param.nChunksPerBlock / + (sb->s_blocksize / dev->nDataBytesPerChunk); + buf->f_bfree = + yaffs_GetNumberOfFreeChunks(dev) / + (sb->s_blocksize / dev->nDataBytesPerChunk); + } else { + buf->f_blocks = + (dev->param.endBlock - dev->param.startBlock + 1) * + dev->param.nChunksPerBlock * + (dev->nDataBytesPerChunk / sb->s_blocksize); + + buf->f_bfree = + yaffs_GetNumberOfFreeChunks(dev) * + (dev->nDataBytesPerChunk / sb->s_blocksize); + } + + buf->f_files = 0; + buf->f_ffree = 0; + buf->f_bavail = buf->f_bfree; + + yaffs_GrossUnlock(dev); + return 0; +} + + + +static void yaffs_FlushInodes(struct super_block *sb) +{ + struct inode *iptr; + yaffs_Object *obj; + + list_for_each_entry(iptr,&sb->s_inodes, i_sb_list){ + obj = yaffs_InodeToObject(iptr); + if(obj){ + T(YAFFS_TRACE_OS, (TSTR("flushing obj %d\n"), + obj->objectId)); + yaffs_FlushFile(obj,1,0); + } + } +} + + +static void yaffs_FlushSuperBlock(struct super_block *sb, int do_checkpoint) +{ + yaffs_Device *dev = yaffs_SuperToDevice(sb); + if(!dev) + return; + + yaffs_FlushInodes(sb); + yaffs_UpdateDirtyDirectories(dev); + yaffs_FlushEntireDeviceCache(dev); + if(do_checkpoint) + yaffs_CheckpointSave(dev); +} + + +static unsigned yaffs_bg_gc_urgency(yaffs_Device *dev) +{ + unsigned erasedChunks = dev->nErasedBlocks * dev->param.nChunksPerBlock; + struct yaffs_LinuxContext *context = yaffs_DeviceToContext(dev); + unsigned scatteredFree = 0; /* Free chunks not in an erased block */ + + if(erasedChunks < dev->nFreeChunks) + scatteredFree = (dev->nFreeChunks - erasedChunks); + + if(!context->bgRunning) + return 0; + else if(scatteredFree < (dev->param.nChunksPerBlock * 2)) + return 0; + else if(erasedChunks > dev->nFreeChunks/2) + return 0; + else if(erasedChunks > dev->nFreeChunks/4) + return 1; + else + return 2; +} + +static int yaffs_do_sync_fs(struct super_block *sb, + int request_checkpoint) +{ + + yaffs_Device *dev = yaffs_SuperToDevice(sb); + unsigned int oneshot_checkpoint = (yaffs_auto_checkpoint & 4); + unsigned gc_urgent = yaffs_bg_gc_urgency(dev); + int do_checkpoint; + + T(YAFFS_TRACE_OS | YAFFS_TRACE_SYNC | YAFFS_TRACE_BACKGROUND, + (TSTR("yaffs_do_sync_fs: gc-urgency %d %s %s%s\n"), + gc_urgent, + sb->s_dirt ? "dirty" : "clean", + request_checkpoint ? "checkpoint requested" : "no checkpoint", + oneshot_checkpoint ? " one-shot" : "" )); + + yaffs_GrossLock(dev); + do_checkpoint = ((request_checkpoint && !gc_urgent) || + oneshot_checkpoint) && + !dev->isCheckpointed; + + if (sb->s_dirt || do_checkpoint) { + yaffs_FlushSuperBlock(sb, !dev->isCheckpointed && do_checkpoint); + sb->s_dirt = 0; + if(oneshot_checkpoint) + yaffs_auto_checkpoint &= ~4; + } + yaffs_GrossUnlock(dev); + + return 0; +} + +/* + * yaffs background thread functions . + * yaffs_BackgroundThread() the thread function + * yaffs_BackgroundStart() launches the background thread. + * yaffs_BackgroundStop() cleans up the background thread. + * + * NB: + * The thread should only run after the yaffs is initialised + * The thread should be stopped before yaffs is unmounted. + * The thread should not do any writing while the fs is in read only. + */ + +#ifdef YAFFS_COMPILE_BACKGROUND + +void yaffs_background_waker(unsigned long data) +{ + wake_up_process((struct task_struct *)data); +} + +static int yaffs_BackgroundThread(void *data) +{ + yaffs_Device *dev = (yaffs_Device *)data; + struct yaffs_LinuxContext *context = yaffs_DeviceToContext(dev); + unsigned long now = jiffies; + unsigned long next_dir_update = now; + unsigned long next_gc = now; + unsigned long expires; + unsigned int urgency; + + int gcResult; + struct timer_list timer; + + T(YAFFS_TRACE_BACKGROUND, + (TSTR("yaffs_background starting for dev %p\n"), + (void *)dev)); + + set_freezable(); + + while(context->bgRunning){ + T(YAFFS_TRACE_BACKGROUND, + (TSTR("yaffs_background\n"))); + + if(kthread_should_stop()) + break; + + if(try_to_freeze()) + continue; + + yaffs_GrossLock(dev); + + now = jiffies; + + if(time_after(now, next_dir_update)){ + yaffs_UpdateDirtyDirectories(dev); + next_dir_update = now + HZ; + } + + if(time_after(now,next_gc)){ + if(!dev->isCheckpointed){ + urgency = yaffs_bg_gc_urgency(dev); + gcResult = yaffs_BackgroundGarbageCollect(dev, urgency); + if(urgency > 1) + next_gc = now + HZ/20+1; + else if(urgency > 0) + next_gc = now + HZ/10+1; + else + next_gc = now + HZ * 2; + } else /* + * gc not running so set to next_dir_update + * to cut down on wake ups + */ + next_gc = next_dir_update; + } + yaffs_GrossUnlock(dev); +#if 1 + expires = next_dir_update; + if (time_before(next_gc,expires)) + expires = next_gc; + if(time_before(expires,now)) + expires = now + HZ; + + init_timer_on_stack(&timer); + timer.expires = expires+1; + timer.data = (unsigned long) current; + timer.function = yaffs_background_waker; + + set_current_state(TASK_INTERRUPTIBLE); + add_timer(&timer); + schedule(); + del_timer_sync(&timer); +#else + msleep(10); +#endif + } + + return 0; +} + +static int yaffs_BackgroundStart(yaffs_Device *dev) +{ + int retval = 0; + struct yaffs_LinuxContext *context = yaffs_DeviceToContext(dev); + + context->bgRunning = 1; + + context->bgThread = kthread_run(yaffs_BackgroundThread, + (void *)dev,"yaffs-bg"); + + if(IS_ERR(context->bgThread)){ + retval = PTR_ERR(context->bgThread); + context->bgThread = NULL; + context->bgRunning = 0; + } + return retval; +} + +static void yaffs_BackgroundStop(yaffs_Device *dev) +{ + struct yaffs_LinuxContext *ctxt = yaffs_DeviceToContext(dev); + + ctxt->bgRunning = 0; + + if( ctxt->bgThread){ + kthread_stop(ctxt->bgThread); + ctxt->bgThread = NULL; + } +} +#else +static int yaffs_BackgroundThread(void *data) +{ + return 0; +} + +static int yaffs_BackgroundStart(yaffs_Device *dev) +{ + return 0; +} + +static void yaffs_BackgroundStop(yaffs_Device *dev) +{ +} +#endif + + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17)) +static void yaffs_write_super(struct super_block *sb) +#else +static int yaffs_write_super(struct super_block *sb) +#endif +{ + unsigned request_checkpoint = (yaffs_auto_checkpoint >= 2); + + T(YAFFS_TRACE_OS | YAFFS_TRACE_SYNC | YAFFS_TRACE_BACKGROUND, + (TSTR("yaffs_write_super%s\n"), + request_checkpoint ? " checkpt" : "")); + + yaffs_do_sync_fs(sb, request_checkpoint); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)) + return 0; +#endif +} + + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17)) +static int yaffs_sync_fs(struct super_block *sb, int wait) +#else +static int yaffs_sync_fs(struct super_block *sb) +#endif +{ + unsigned request_checkpoint = (yaffs_auto_checkpoint >= 1); + + T(YAFFS_TRACE_OS | YAFFS_TRACE_SYNC, + (TSTR("yaffs_sync_fs%s\n"), + request_checkpoint ? " checkpt" : "")); + + yaffs_do_sync_fs(sb, request_checkpoint); + + return 0; +} + +#ifdef YAFFS_USE_OWN_IGET + +static struct inode *yaffs_iget(struct super_block *sb, unsigned long ino) +{ + struct inode *inode; + yaffs_Object *obj; + yaffs_Device *dev = yaffs_SuperToDevice(sb); + + T(YAFFS_TRACE_OS, + (TSTR("yaffs_iget for %lu\n"), ino)); + + inode = iget_locked(sb, ino); + if (!inode) + return ERR_PTR(-ENOMEM); + if (!(inode->i_state & I_NEW)) + return inode; + + /* NB This is called as a side effect of other functions, but + * we had to release the lock to prevent deadlocks, so + * need to lock again. + */ + + yaffs_GrossLock(dev); + + obj = yaffs_FindObjectByNumber(dev, inode->i_ino); + + yaffs_FillInodeFromObject(inode, obj); + + yaffs_GrossUnlock(dev); + + unlock_new_inode(inode); + return inode; +} + +#else + +static void yaffs_read_inode(struct inode *inode) +{ + /* NB This is called as a side effect of other functions, but + * we had to release the lock to prevent deadlocks, so + * need to lock again. + */ + + yaffs_Object *obj; + yaffs_Device *dev = yaffs_SuperToDevice(inode->i_sb); + + T(YAFFS_TRACE_OS, + (TSTR("yaffs_read_inode for %d\n"), (int)inode->i_ino)); + + if(current != yaffs_DeviceToContext(dev)->readdirProcess) + yaffs_GrossLock(dev); + + obj = yaffs_FindObjectByNumber(dev, inode->i_ino); + + yaffs_FillInodeFromObject(inode, obj); + + if(current != yaffs_DeviceToContext(dev)->readdirProcess) + yaffs_GrossUnlock(dev); +} + +#endif + +static YLIST_HEAD(yaffs_context_list); +struct semaphore yaffs_context_lock; + +#if 0 /* not used */ +static int yaffs_remount_fs(struct super_block *sb, int *flags, char *data) +{ + yaffs_Device *dev = yaffs_SuperToDevice(sb); + + if (*flags & MS_RDONLY) { + struct mtd_info *mtd = yaffs_SuperToDevice(sb)->genericDevice; + + T(YAFFS_TRACE_OS, + (TSTR("yaffs_remount_fs: %s: RO\n"), dev->name)); + + yaffs_GrossLock(dev); + + yaffs_FlushSuperBlock(sb,1); + + if (mtd->sync) + mtd->sync(mtd); + + yaffs_GrossUnlock(dev); + } else { + T(YAFFS_TRACE_OS, + (TSTR("yaffs_remount_fs: %s: RW\n"), dev->name)); + } + + return 0; +} +#endif + +static void yaffs_put_super(struct super_block *sb) +{ + yaffs_Device *dev = yaffs_SuperToDevice(sb); + + T(YAFFS_TRACE_OS, (TSTR("yaffs_put_super\n"))); + + T(YAFFS_TRACE_OS | YAFFS_TRACE_BACKGROUND, + (TSTR("Shutting down yaffs background thread\n"))); + yaffs_BackgroundStop(dev); + T(YAFFS_TRACE_OS | YAFFS_TRACE_BACKGROUND, + (TSTR("yaffs background thread shut down\n"))); + + yaffs_GrossLock(dev); + + yaffs_FlushSuperBlock(sb,1); + + if (yaffs_DeviceToContext(dev)->putSuperFunc) + yaffs_DeviceToContext(dev)->putSuperFunc(sb); + + + yaffs_Deinitialise(dev); + + yaffs_GrossUnlock(dev); + + down(&yaffs_context_lock); + ylist_del_init(&(yaffs_DeviceToContext(dev)->contextList)); + up(&yaffs_context_lock); + + if (yaffs_DeviceToContext(dev)->spareBuffer) { + YFREE(yaffs_DeviceToContext(dev)->spareBuffer); + yaffs_DeviceToContext(dev)->spareBuffer = NULL; + } + + kfree(dev); +} + + +static void yaffs_MTDPutSuper(struct super_block *sb) +{ + struct mtd_info *mtd = yaffs_DeviceToContext(yaffs_SuperToDevice(sb))->mtd; + + if (mtd->sync) + mtd->sync(mtd); + + put_mtd_device(mtd); +} + + +static void yaffs_MarkSuperBlockDirty(yaffs_Device *dev) +{ + struct super_block *sb = yaffs_DeviceToContext(dev)->superBlock; + + T(YAFFS_TRACE_OS, (TSTR("yaffs_MarkSuperBlockDirty() sb = %p\n"), sb)); + if (sb) + sb->s_dirt = 1; +} + +typedef struct { + int inband_tags; + int skip_checkpoint_read; + int skip_checkpoint_write; + int no_cache; + int tags_ecc_on; + int tags_ecc_overridden; + int lazy_loading_enabled; + int lazy_loading_overridden; + int empty_lost_and_found; + int empty_lost_and_found_overridden; +} yaffs_options; + +#define MAX_OPT_LEN 30 +static int yaffs_parse_options(yaffs_options *options, const char *options_str) +{ + char cur_opt[MAX_OPT_LEN + 1]; + int p; + int error = 0; + + /* Parse through the options which is a comma seperated list */ + + while (options_str && *options_str && !error) { + memset(cur_opt, 0, MAX_OPT_LEN + 1); + p = 0; + + while(*options_str == ',') + options_str++; + + while (*options_str && *options_str != ',') { + if (p < MAX_OPT_LEN) { + cur_opt[p] = *options_str; + p++; + } + options_str++; + } + + if (!strcmp(cur_opt, "inband-tags")) + options->inband_tags = 1; + else if (!strcmp(cur_opt, "tags-ecc-off")){ + options->tags_ecc_on = 0; + options->tags_ecc_overridden=1; + } else if (!strcmp(cur_opt, "tags-ecc-on")){ + options->tags_ecc_on = 1; + options->tags_ecc_overridden = 1; + } else if (!strcmp(cur_opt, "lazy-loading-off")){ + options->lazy_loading_enabled = 0; + options->lazy_loading_overridden=1; + } else if (!strcmp(cur_opt, "lazy-loading-on")){ + options->lazy_loading_enabled = 1; + options->lazy_loading_overridden = 1; + } else if (!strcmp(cur_opt, "empty-lost-and-found-off")){ + options->empty_lost_and_found = 0; + options->empty_lost_and_found_overridden=1; + } else if (!strcmp(cur_opt, "empty-lost-and-found-on")){ + options->empty_lost_and_found = 1; + options->empty_lost_and_found_overridden=1; + } else if (!strcmp(cur_opt, "no-cache")) + options->no_cache = 1; + else if (!strcmp(cur_opt, "no-checkpoint-read")) + options->skip_checkpoint_read = 1; + else if (!strcmp(cur_opt, "no-checkpoint-write")) + options->skip_checkpoint_write = 1; + else if (!strcmp(cur_opt, "no-checkpoint")) { + options->skip_checkpoint_read = 1; + options->skip_checkpoint_write = 1; + } else { + printk(KERN_INFO "yaffs: Bad mount option \"%s\"\n", + cur_opt); + error = 1; + } + } + + return error; +} + +static struct super_block *yaffs_internal_read_super(int yaffsVersion, + struct super_block *sb, + void *data, int silent) +{ + int nBlocks; + struct inode *inode = NULL; + struct dentry *root; + yaffs_Device *dev = 0; + char devname_buf[BDEVNAME_SIZE + 1]; + struct mtd_info *mtd; + int err; + char *data_str = (char *)data; + struct yaffs_LinuxContext *context = NULL; + yaffs_DeviceParam *param; + + yaffs_options options; + + sb->s_magic = YAFFS_MAGIC; + sb->s_op = &yaffs_super_ops; + sb->s_flags |= MS_NOATIME; + +#ifdef YAFFS_COMPILE_EXPORTFS + sb->s_export_op = &yaffs_export_ops; +#endif + + if (!sb) + printk(KERN_INFO "yaffs: sb is NULL\n"); + else if (!sb->s_dev) + printk(KERN_INFO "yaffs: sb->s_dev is NULL\n"); + else if (!yaffs_devname(sb, devname_buf)) + printk(KERN_INFO "yaffs: devname is NULL\n"); + else + printk(KERN_INFO "yaffs: dev is %d name is \"%s\"\n", + sb->s_dev, + yaffs_devname(sb, devname_buf)); + + if (!data_str) + data_str = ""; + + printk(KERN_INFO "yaffs: passed flags \"%s\"\n", data_str); + + memset(&options, 0, sizeof(options)); + + if (yaffs_parse_options(&options, data_str)) { + /* Option parsing failed */ + return NULL; + } + + + sb->s_blocksize = PAGE_CACHE_SIZE; + sb->s_blocksize_bits = PAGE_CACHE_SHIFT; + + T(YAFFS_TRACE_OS, + (TSTR("yaffs_read_super: Using yaffs%d\n"), yaffsVersion)); + T(YAFFS_TRACE_OS, + (TSTR("yaffs_read_super: block size %d\n"), + (int)(sb->s_blocksize))); + + T(YAFFS_TRACE_ALWAYS, + (TSTR("yaffs: Attempting MTD mount of %u.%u,\"%s\"\n"), + MAJOR(sb->s_dev), MINOR(sb->s_dev), + yaffs_devname(sb, devname_buf))); + + /* Check it's an mtd device..... */ + if (MAJOR(sb->s_dev) != MTD_BLOCK_MAJOR) + return NULL; /* This isn't an mtd device */ + + /* Get the device */ + mtd = get_mtd_device(NULL, MINOR(sb->s_dev)); + if (!mtd) { + T(YAFFS_TRACE_ALWAYS, + (TSTR("yaffs: MTD device #%u doesn't appear to exist\n"), + MINOR(sb->s_dev))); + return NULL; + } + /* Check it's NAND */ + if (mtd->type != MTD_NANDFLASH) { + T(YAFFS_TRACE_ALWAYS, + (TSTR("yaffs: MTD device is not NAND it's type %d\n"), + mtd->type)); + return NULL; + } + + T(YAFFS_TRACE_OS, (TSTR(" erase %p\n"), mtd->erase)); + T(YAFFS_TRACE_OS, (TSTR(" read %p\n"), mtd->read)); + T(YAFFS_TRACE_OS, (TSTR(" write %p\n"), mtd->write)); + T(YAFFS_TRACE_OS, (TSTR(" readoob %p\n"), mtd->read_oob)); + T(YAFFS_TRACE_OS, (TSTR(" writeoob %p\n"), mtd->write_oob)); + T(YAFFS_TRACE_OS, (TSTR(" block_isbad %p\n"), mtd->block_isbad)); + T(YAFFS_TRACE_OS, (TSTR(" block_markbad %p\n"), mtd->block_markbad)); + T(YAFFS_TRACE_OS, (TSTR(" %s %d\n"), WRITE_SIZE_STR, WRITE_SIZE(mtd))); + T(YAFFS_TRACE_OS, (TSTR(" oobsize %d\n"), mtd->oobsize)); + T(YAFFS_TRACE_OS, (TSTR(" erasesize %d\n"), mtd->erasesize)); +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29) + T(YAFFS_TRACE_OS, (TSTR(" size %u\n"), mtd->size)); +#else + T(YAFFS_TRACE_OS, (TSTR(" size %lld\n"), mtd->size)); +#endif + +#ifdef CONFIG_YAFFS_AUTO_YAFFS2 + + if (yaffsVersion == 1 && WRITE_SIZE(mtd) >= 2048) { + T(YAFFS_TRACE_ALWAYS, + (TSTR("yaffs: auto selecting yaffs2\n"))); + yaffsVersion = 2; + } + + /* Added NCB 26/5/2006 for completeness */ + if (yaffsVersion == 2 && !options.inband_tags && WRITE_SIZE(mtd) == 512) { + T(YAFFS_TRACE_ALWAYS, + (TSTR("yaffs: auto selecting yaffs1\n"))); + yaffsVersion = 1; + } + +#endif + + if (yaffsVersion == 2) { + /* Check for version 2 style functions */ + if (!mtd->erase || + !mtd->block_isbad || + !mtd->block_markbad || + !mtd->read || + !mtd->write || +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17)) + !mtd->read_oob || !mtd->write_oob) { +#else + !mtd->write_ecc || + !mtd->read_ecc || !mtd->read_oob || !mtd->write_oob) { +#endif + T(YAFFS_TRACE_ALWAYS, + (TSTR("yaffs: MTD device does not support required " + "functions\n"))); + return NULL; + } + + if ((WRITE_SIZE(mtd) < YAFFS_MIN_YAFFS2_CHUNK_SIZE || + mtd->oobsize < YAFFS_MIN_YAFFS2_SPARE_SIZE) && + !options.inband_tags) { + T(YAFFS_TRACE_ALWAYS, + (TSTR("yaffs: MTD device does not have the " + "right page sizes\n"))); + return NULL; + } + } else { + /* Check for V1 style functions */ + if (!mtd->erase || + !mtd->read || + !mtd->write || +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17)) + !mtd->read_oob || !mtd->write_oob) { +#else + !mtd->write_ecc || + !mtd->read_ecc || !mtd->read_oob || !mtd->write_oob) { +#endif + T(YAFFS_TRACE_ALWAYS, + (TSTR("yaffs: MTD device does not support required " + "functions\n"))); + return NULL; + } + + if (WRITE_SIZE(mtd) < YAFFS_BYTES_PER_CHUNK || + mtd->oobsize != YAFFS_BYTES_PER_SPARE) { + T(YAFFS_TRACE_ALWAYS, + (TSTR("yaffs: MTD device does not support have the " + "right page sizes\n"))); + return NULL; + } + } + + /* OK, so if we got here, we have an MTD that's NAND and looks + * like it has the right capabilities + * Set the yaffs_Device up for mtd + */ + + dev = kmalloc(sizeof(yaffs_Device), GFP_KERNEL); + context = kmalloc(sizeof(struct yaffs_LinuxContext),GFP_KERNEL); + + if(!dev || !context ){ + if(dev) + kfree(dev); + if(context) + kfree(context); + dev = NULL; + context = NULL; + } + + if (!dev) { + /* Deep shit could not allocate device structure */ + T(YAFFS_TRACE_ALWAYS, + (TSTR("yaffs_read_super: Failed trying to allocate " + "yaffs_Device. \n"))); + return NULL; + } + memset(dev, 0, sizeof(yaffs_Device)); + param = &(dev->param); + + memset(context,0,sizeof(struct yaffs_LinuxContext)); + dev->context = context; + YINIT_LIST_HEAD(&(context->contextList)); + context->dev = dev; + context->superBlock = sb; + + + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)) + sb->s_fs_info = dev; +#else + sb->u.generic_sbp = dev; +#endif + + yaffs_DeviceToContext(dev)->mtd = mtd; + param->name = mtd->name; + + /* Set up the memory size parameters.... */ + + nBlocks = YCALCBLOCKS(mtd->size, (YAFFS_CHUNKS_PER_BLOCK * YAFFS_BYTES_PER_CHUNK)); + + param->startBlock = 0; + param->endBlock = nBlocks - 1; + param->nChunksPerBlock = YAFFS_CHUNKS_PER_BLOCK; + param->totalBytesPerChunk = YAFFS_BYTES_PER_CHUNK; + param->nReservedBlocks = 5; + param->nShortOpCaches = (options.no_cache) ? 0 : 10; + param->inbandTags = options.inband_tags; + +#ifdef CONFIG_YAFFS_DISABLE_LAZY_LOAD + param->disableLazyLoad = 1; +#endif + if(options.lazy_loading_overridden) + param->disableLazyLoad = !options.lazy_loading_enabled; + +#ifdef CONFIG_YAFFS_DISABLE_TAGS_ECC + param->noTagsECC = 1; +#endif + +#ifdef CONFIG_YAFFS_DISABLE_BACKGROUND +#else + param->deferDirectoryUpdate = 1; +#endif + + if(options.tags_ecc_overridden) + param->noTagsECC = !options.tags_ecc_on; + +#ifdef CONFIG_YAFFS_EMPTY_LOST_AND_FOUND + param->emptyLostAndFound = 1; +#endif + +#ifdef CONFIG_YAFFS_DISABLE_BLOCK_REFRESHING + param->refreshPeriod = 0; +#else + param->refreshPeriod = 500; +#endif + + if(options.empty_lost_and_found_overridden) + param->emptyLostAndFound = options.empty_lost_and_found; + + /* ... and the functions. */ + if (yaffsVersion == 2) { + param->writeChunkWithTagsToNAND = + nandmtd2_WriteChunkWithTagsToNAND; + param->readChunkWithTagsFromNAND = + nandmtd2_ReadChunkWithTagsFromNAND; + param->markNANDBlockBad = nandmtd2_MarkNANDBlockBad; + param->queryNANDBlock = nandmtd2_QueryNANDBlock; + yaffs_DeviceToContext(dev)->spareBuffer = YMALLOC(mtd->oobsize); + param->isYaffs2 = 1; +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17)) + param->totalBytesPerChunk = mtd->writesize; + param->nChunksPerBlock = mtd->erasesize / mtd->writesize; +#else + param->totalBytesPerChunk = mtd->oobblock; + param->nChunksPerBlock = mtd->erasesize / mtd->oobblock; +#endif + nBlocks = YCALCBLOCKS(mtd->size, mtd->erasesize); + + param->startBlock = 0; + param->endBlock = nBlocks - 1; + } else { +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17)) + /* use the MTD interface in yaffs_mtdif1.c */ + param->writeChunkWithTagsToNAND = + nandmtd1_WriteChunkWithTagsToNAND; + param->readChunkWithTagsFromNAND = + nandmtd1_ReadChunkWithTagsFromNAND; + param->markNANDBlockBad = nandmtd1_MarkNANDBlockBad; + param->queryNANDBlock = nandmtd1_QueryNANDBlock; +#else + param->writeChunkToNAND = nandmtd_WriteChunkToNAND; + param->readChunkFromNAND = nandmtd_ReadChunkFromNAND; +#endif + param->isYaffs2 = 0; + } + /* ... and common functions */ + param->eraseBlockInNAND = nandmtd_EraseBlockInNAND; + param->initialiseNAND = nandmtd_InitialiseNAND; + + yaffs_DeviceToContext(dev)->putSuperFunc = yaffs_MTDPutSuper; + + param->markSuperBlockDirty = yaffs_MarkSuperBlockDirty; + param->gcControl = yaffs_gc_control_callback; + + yaffs_DeviceToContext(dev)->superBlock= sb; + + +#ifndef CONFIG_YAFFS_DOES_ECC + param->useNANDECC = 1; +#endif + +#ifdef CONFIG_YAFFS_DISABLE_WIDE_TNODES + param->wideTnodesDisabled = 1; +#endif + + param->skipCheckpointRead = options.skip_checkpoint_read; + param->skipCheckpointWrite = options.skip_checkpoint_write; + + /* we assume this is protected by lock_kernel() in mount/umount */ + down(&yaffs_context_lock); + ylist_add_tail(&(yaffs_DeviceToContext(dev)->contextList), &yaffs_context_list); + up(&yaffs_context_lock); + + /* Directory search handling...*/ + YINIT_LIST_HEAD(&(yaffs_DeviceToContext(dev)->searchContexts)); + param->removeObjectCallback = yaffs_RemoveObjectCallback; + + init_MUTEX(&(yaffs_DeviceToContext(dev)->grossLock)); + + yaffs_GrossLock(dev); + + err = yaffs_GutsInitialise(dev); + + T(YAFFS_TRACE_OS, + (TSTR("yaffs_read_super: guts initialised %s\n"), + (err == YAFFS_OK) ? "OK" : "FAILED")); + + if(err == YAFFS_OK) + yaffs_BackgroundStart(dev); + + if(!context->bgThread) + param->deferDirectoryUpdate = 0; + + + /* Release lock before yaffs_get_inode() */ + yaffs_GrossUnlock(dev); + + /* Create root inode */ + if (err == YAFFS_OK) + inode = yaffs_get_inode(sb, S_IFDIR | 0755, 0, + yaffs_Root(dev)); + + if (!inode) + return NULL; + + inode->i_op = &yaffs_dir_inode_operations; + inode->i_fop = &yaffs_dir_operations; + + T(YAFFS_TRACE_OS, (TSTR("yaffs_read_super: got root inode\n"))); + + root = d_alloc_root(inode); + + T(YAFFS_TRACE_OS, (TSTR("yaffs_read_super: d_alloc_root done\n"))); + + if (!root) { + iput(inode); + return NULL; + } + sb->s_root = root; + sb->s_dirt = !dev->isCheckpointed; + T(YAFFS_TRACE_ALWAYS, + (TSTR("yaffs_read_super: isCheckpointed %d\n"), + dev->isCheckpointed)); + + T(YAFFS_TRACE_OS, (TSTR("yaffs_read_super: done\n"))); + return sb; +} + + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)) +static int yaffs_internal_read_super_mtd(struct super_block *sb, void *data, + int silent) +{ + return yaffs_internal_read_super(1, sb, data, silent) ? 0 : -EINVAL; +} + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17)) +static int yaffs_read_super(struct file_system_type *fs, + int flags, const char *dev_name, + void *data, struct vfsmount *mnt) +{ + + return get_sb_bdev(fs, flags, dev_name, data, + yaffs_internal_read_super_mtd, mnt); +} +#else +static struct super_block *yaffs_read_super(struct file_system_type *fs, + int flags, const char *dev_name, + void *data) +{ + + return get_sb_bdev(fs, flags, dev_name, data, + yaffs_internal_read_super_mtd); +} +#endif + +static struct file_system_type yaffs_fs_type = { + .owner = THIS_MODULE, + .name = "yaffs", + .get_sb = yaffs_read_super, + .kill_sb = kill_block_super, + .fs_flags = FS_REQUIRES_DEV, +}; +#else +static struct super_block *yaffs_read_super(struct super_block *sb, void *data, + int silent) +{ + return yaffs_internal_read_super(1, sb, data, silent); +} + +static DECLARE_FSTYPE(yaffs_fs_type, "yaffs", yaffs_read_super, + FS_REQUIRES_DEV); +#endif + + +#ifdef CONFIG_YAFFS_YAFFS2 + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)) +static int yaffs2_internal_read_super_mtd(struct super_block *sb, void *data, + int silent) +{ + return yaffs_internal_read_super(2, sb, data, silent) ? 0 : -EINVAL; +} + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17)) +static int yaffs2_read_super(struct file_system_type *fs, + int flags, const char *dev_name, void *data, + struct vfsmount *mnt) +{ + return get_sb_bdev(fs, flags, dev_name, data, + yaffs2_internal_read_super_mtd, mnt); +} +#else +static struct super_block *yaffs2_read_super(struct file_system_type *fs, + int flags, const char *dev_name, + void *data) +{ + + return get_sb_bdev(fs, flags, dev_name, data, + yaffs2_internal_read_super_mtd); +} +#endif + +static struct file_system_type yaffs2_fs_type = { + .owner = THIS_MODULE, + .name = "yaffs2", + .get_sb = yaffs2_read_super, + .kill_sb = kill_block_super, + .fs_flags = FS_REQUIRES_DEV, +}; +#else +static struct super_block *yaffs2_read_super(struct super_block *sb, + void *data, int silent) +{ + return yaffs_internal_read_super(2, sb, data, silent); +} + +static DECLARE_FSTYPE(yaffs2_fs_type, "yaffs2", yaffs2_read_super, + FS_REQUIRES_DEV); +#endif + +#endif /* CONFIG_YAFFS_YAFFS2 */ + +static struct proc_dir_entry *my_proc_entry; +static struct proc_dir_entry *debug_proc_entry; + +static char *yaffs_dump_dev_part0(char *buf, yaffs_Device * dev) +{ + buf += sprintf(buf, "startBlock......... %d\n", dev->param.startBlock); + buf += sprintf(buf, "endBlock........... %d\n", dev->param.endBlock); + buf += sprintf(buf, "totalBytesPerChunk. %d\n", dev->param.totalBytesPerChunk); + buf += sprintf(buf, "useNANDECC......... %d\n", dev->param.useNANDECC); + buf += sprintf(buf, "noTagsECC.......... %d\n", dev->param.noTagsECC); + buf += sprintf(buf, "isYaffs2........... %d\n", dev->param.isYaffs2); + buf += sprintf(buf, "inbandTags......... %d\n", dev->param.inbandTags); + buf += sprintf(buf, "emptyLostAndFound.. %d\n", dev->param.emptyLostAndFound); + buf += sprintf(buf, "disableLazyLoad.... %d\n", dev->param.disableLazyLoad); + buf += sprintf(buf, "refreshPeriod...... %d\n", dev->param.refreshPeriod); + buf += sprintf(buf, "nShortOpCaches..... %d\n", dev->param.nShortOpCaches); + buf += sprintf(buf, "nReservedBlocks.... %d\n", dev->param.nReservedBlocks); + + buf += sprintf(buf, "\n"); + + return buf; +} + + +static char *yaffs_dump_dev_part1(char *buf, yaffs_Device * dev) +{ + buf += sprintf(buf, "nDataBytesPerChunk. %d\n", dev->nDataBytesPerChunk); + buf += sprintf(buf, "chunkGroupBits..... %d\n", dev->chunkGroupBits); + buf += sprintf(buf, "chunkGroupSize..... %d\n", dev->chunkGroupSize); + buf += sprintf(buf, "nErasedBlocks...... %d\n", dev->nErasedBlocks); + buf += sprintf(buf, "blocksInCheckpoint. %d\n", dev->blocksInCheckpoint); + buf += sprintf(buf, "\n"); + buf += sprintf(buf, "nTnodesCreated..... %d\n", dev->nTnodesCreated); + buf += sprintf(buf, "nFreeTnodes........ %d\n", dev->nFreeTnodes); + buf += sprintf(buf, "nObjectsCreated.... %d\n", dev->nObjectsCreated); + buf += sprintf(buf, "nFreeObjects....... %d\n", dev->nFreeObjects); + buf += sprintf(buf, "nFreeChunks........ %d\n", dev->nFreeChunks); + buf += sprintf(buf, "\n"); + buf += sprintf(buf, "nPageWrites........ %u\n", dev->nPageWrites); + buf += sprintf(buf, "nPageReads......... %u\n", dev->nPageReads); + buf += sprintf(buf, "nBlockErasures..... %u\n", dev->nBlockErasures); + buf += sprintf(buf, "nGCCopies.......... %u\n", dev->nGCCopies); + buf += sprintf(buf, "allGCs............. %u\n", dev->allGCs); + buf += sprintf(buf, "passiveGCs......... %u\n", dev->passiveGCs); + buf += sprintf(buf, "oldestDirtyGCs..... %u\n", dev->oldestDirtyGCs); + buf += sprintf(buf, "backgroundGCs...... %u\n", dev->backgroundGCs); + buf += sprintf(buf, "nRetriedWrites..... %u\n", dev->nRetriedWrites); + buf += sprintf(buf, "nRetireBlocks...... %u\n", dev->nRetiredBlocks); + buf += sprintf(buf, "eccFixed........... %u\n", dev->eccFixed); + buf += sprintf(buf, "eccUnfixed......... %u\n", dev->eccUnfixed); + buf += sprintf(buf, "tagsEccFixed....... %u\n", dev->tagsEccFixed); + buf += sprintf(buf, "tagsEccUnfixed..... %u\n", dev->tagsEccUnfixed); + buf += sprintf(buf, "cacheHits.......... %u\n", dev->cacheHits); + buf += sprintf(buf, "nDeletedFiles...... %u\n", dev->nDeletedFiles); + buf += sprintf(buf, "nUnlinkedFiles..... %u\n", dev->nUnlinkedFiles); + buf += sprintf(buf, "refreshCount....... %u\n", dev->refreshCount); + buf += + sprintf(buf, "nBackgroudDeletions %u\n", dev->nBackgroundDeletions); + + return buf; +} + +static int yaffs_proc_read(char *page, + char **start, + off_t offset, int count, int *eof, void *data) +{ + struct ylist_head *item; + char *buf = page; + int step = offset; + int n = 0; + + /* Get proc_file_read() to step 'offset' by one on each sucessive call. + * We use 'offset' (*ppos) to indicate where we are in devList. + * This also assumes the user has posted a read buffer large + * enough to hold the complete output; but that's life in /proc. + */ + + *(int *)start = 1; + + /* Print header first */ + if (step == 0) + buf += sprintf(buf, "YAFFS built:" __DATE__ " " __TIME__"\n"); + else if (step == 1) + buf += sprintf(buf,"\n"); + else { + step-=2; + + down(&yaffs_context_lock); + + /* Locate and print the Nth entry. Order N-squared but N is small. */ + ylist_for_each(item, &yaffs_context_list) { + struct yaffs_LinuxContext *dc = ylist_entry(item, struct yaffs_LinuxContext, contextList); + yaffs_Device *dev = dc->dev; + + if (n < (step & ~1)) { + n+=2; + continue; + } + if((step & 1)==0){ + buf += sprintf(buf, "\nDevice %d \"%s\"\n", n, dev->param.name); + buf = yaffs_dump_dev_part0(buf, dev); + } else + buf = yaffs_dump_dev_part1(buf, dev); + + break; + } + up(&yaffs_context_lock); + } + + return buf - page < count ? buf - page : count; +} + +static int yaffs_stats_proc_read(char *page, + char **start, + off_t offset, int count, int *eof, void *data) +{ + struct ylist_head *item; + char *buf = page; + int n = 0; + + down(&yaffs_context_lock); + + /* Locate and print the Nth entry. Order N-squared but N is small. */ + ylist_for_each(item, &yaffs_context_list) { + struct yaffs_LinuxContext *dc = ylist_entry(item, struct yaffs_LinuxContext, contextList); + yaffs_Device *dev = dc->dev; + + int erasedChunks; + int nObjects; + int nTnodes; + + erasedChunks = dev->nErasedBlocks * dev->param.nChunksPerBlock; + nObjects = dev->nObjectsCreated -dev->nFreeObjects; + nTnodes = dev->nTnodesCreated - dev->nFreeTnodes; + + + buf += sprintf(buf,"%d, %d, %d, %u, %u, %d, %d\n", + n, dev->nFreeChunks, erasedChunks, + dev->backgroundGCs, dev->oldestDirtyGCs, + nObjects, nTnodes); + } + up(&yaffs_context_lock); + + + return buf - page < count ? buf - page : count; +} + +/** + * Set the verbosity of the warnings and error messages. + * + * Note that the names can only be a..z or _ with the current code. + */ + +static struct { + char *mask_name; + unsigned mask_bitfield; +} mask_flags[] = { + {"allocate", YAFFS_TRACE_ALLOCATE}, + {"always", YAFFS_TRACE_ALWAYS}, + {"background", YAFFS_TRACE_BACKGROUND}, + {"bad_blocks", YAFFS_TRACE_BAD_BLOCKS}, + {"buffers", YAFFS_TRACE_BUFFERS}, + {"bug", YAFFS_TRACE_BUG}, + {"checkpt", YAFFS_TRACE_CHECKPOINT}, + {"deletion", YAFFS_TRACE_DELETION}, + {"erase", YAFFS_TRACE_ERASE}, + {"error", YAFFS_TRACE_ERROR}, + {"gc_detail", YAFFS_TRACE_GC_DETAIL}, + {"gc", YAFFS_TRACE_GC}, + {"lock", YAFFS_TRACE_LOCK}, + {"mtd", YAFFS_TRACE_MTD}, + {"nandaccess", YAFFS_TRACE_NANDACCESS}, + {"os", YAFFS_TRACE_OS}, + {"scan_debug", YAFFS_TRACE_SCAN_DEBUG}, + {"scan", YAFFS_TRACE_SCAN}, + {"tracing", YAFFS_TRACE_TRACING}, + {"sync", YAFFS_TRACE_SYNC}, + {"write", YAFFS_TRACE_WRITE}, + + {"verify", YAFFS_TRACE_VERIFY}, + {"verify_nand", YAFFS_TRACE_VERIFY_NAND}, + {"verify_full", YAFFS_TRACE_VERIFY_FULL}, + {"verify_all", YAFFS_TRACE_VERIFY_ALL}, + + {"all", 0xffffffff}, + {"none", 0}, + {NULL, 0}, +}; + +#define MAX_MASK_NAME_LENGTH 40 +static int yaffs_proc_write_trace_options(struct file *file, const char *buf, + unsigned long count, void *data) +{ + unsigned rg = 0, mask_bitfield; + char *end; + char *mask_name; + const char *x; + char substring[MAX_MASK_NAME_LENGTH + 1]; + int i; + int done = 0; + int add, len = 0; + int pos = 0; + + rg = yaffs_traceMask; + + while (!done && (pos < count)) { + done = 1; + while ((pos < count) && isspace(buf[pos])) + pos++; + + switch (buf[pos]) { + case '+': + case '-': + case '=': + add = buf[pos]; + pos++; + break; + + default: + add = ' '; + break; + } + mask_name = NULL; + + mask_bitfield = simple_strtoul(buf + pos, &end, 0); + + if (end > buf + pos) { + mask_name = "numeral"; + len = end - (buf + pos); + pos += len; + done = 0; + } else { + for (x = buf + pos, i = 0; + (*x == '_' || (*x >= 'a' && *x <= 'z')) && + i < MAX_MASK_NAME_LENGTH; x++, i++, pos++) + substring[i] = *x; + substring[i] = '\0'; + + for (i = 0; mask_flags[i].mask_name != NULL; i++) { + if (strcmp(substring, mask_flags[i].mask_name) == 0) { + mask_name = mask_flags[i].mask_name; + mask_bitfield = mask_flags[i].mask_bitfield; + done = 0; + break; + } + } + } + + if (mask_name != NULL) { + done = 0; + switch (add) { + case '-': + rg &= ~mask_bitfield; + break; + case '+': + rg |= mask_bitfield; + break; + case '=': + rg = mask_bitfield; + break; + default: + rg |= mask_bitfield; + break; + } + } + } + + yaffs_traceMask = rg | YAFFS_TRACE_ALWAYS; + + printk(KERN_DEBUG "new trace = 0x%08X\n", yaffs_traceMask); + + if (rg & YAFFS_TRACE_ALWAYS) { + for (i = 0; mask_flags[i].mask_name != NULL; i++) { + char flag; + flag = ((rg & mask_flags[i].mask_bitfield) == + mask_flags[i].mask_bitfield) ? '+' : '-'; + printk(KERN_DEBUG "%c%s\n", flag, mask_flags[i].mask_name); + } + } + + return count; +} + + +static int yaffs_proc_write(struct file *file, const char *buf, + unsigned long count, void *data) +{ + return yaffs_proc_write_trace_options(file, buf, count, data); +} + +/* Stuff to handle installation of file systems */ +struct file_system_to_install { + struct file_system_type *fst; + int installed; +}; + +static struct file_system_to_install fs_to_install[] = { + {&yaffs_fs_type, 0}, + {&yaffs2_fs_type, 0}, + {NULL, 0} +}; + +static int __init init_yaffs_fs(void) +{ + int error = 0; + struct file_system_to_install *fsinst; + + T(YAFFS_TRACE_ALWAYS, + (TSTR("yaffs built " __DATE__ " " __TIME__ " Installing. \n"))); + +#ifdef CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED + T(YAFFS_TRACE_ALWAYS, + (TSTR(" \n\n\n\nYAFFS-WARNING CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED selected.\n\n\n\n"))); +#endif + + + + + init_MUTEX(&yaffs_context_lock); + + /* Install the proc_fs entries */ + my_proc_entry = create_proc_entry("yaffs", + S_IRUGO | S_IFREG, + YPROC_ROOT); + + if (my_proc_entry) { + my_proc_entry->write_proc = yaffs_proc_write; + my_proc_entry->read_proc = yaffs_proc_read; + my_proc_entry->data = NULL; + } else + return -ENOMEM; + + debug_proc_entry = create_proc_entry("yaffs_stats", + S_IRUGO | S_IFREG, + YPROC_ROOT); + + if (debug_proc_entry) { + debug_proc_entry->write_proc = NULL; + debug_proc_entry->read_proc = yaffs_stats_proc_read; + debug_proc_entry->data = NULL; + } else + return -ENOMEM; + + /* Now add the file system entries */ + + fsinst = fs_to_install; + + while (fsinst->fst && !error) { + error = register_filesystem(fsinst->fst); + if (!error) + fsinst->installed = 1; + fsinst++; + } + + /* Any errors? uninstall */ + if (error) { + fsinst = fs_to_install; + + while (fsinst->fst) { + if (fsinst->installed) { + unregister_filesystem(fsinst->fst); + fsinst->installed = 0; + } + fsinst++; + } + } + + return error; +} + +static void __exit exit_yaffs_fs(void) +{ + + struct file_system_to_install *fsinst; + + T(YAFFS_TRACE_ALWAYS, + (TSTR("yaffs built " __DATE__ " " __TIME__ " removing. \n"))); + + remove_proc_entry("yaffs", YPROC_ROOT); + remove_proc_entry("yaffs_stats", YPROC_ROOT); + + fsinst = fs_to_install; + + while (fsinst->fst) { + if (fsinst->installed) { + unregister_filesystem(fsinst->fst); + fsinst->installed = 0; + } + fsinst++; + } +} + +module_init(init_yaffs_fs) +module_exit(exit_yaffs_fs) + +MODULE_DESCRIPTION("YAFFS2 - a NAND specific flash file system"); +MODULE_AUTHOR("Charles Manning, Aleph One Ltd., 2002-2010"); +MODULE_LICENSE("GPL"); diff --git a/fs/yaffs2/yaffs_getblockinfo.h b/fs/yaffs2/yaffs_getblockinfo.h new file mode 100644 index 000000000000..71fe1deea3c6 --- /dev/null +++ b/fs/yaffs2/yaffs_getblockinfo.h @@ -0,0 +1,35 @@ +/* + * YAFFS: Yet another Flash File System . A NAND-flash specific file system. + * + * Copyright (C) 2002-2010 Aleph One Ltd. + * for Toby Churchill Ltd and Brightstar Engineering + * + * Created by Charles Manning <charles@aleph1.co.uk> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License version 2.1 as + * published by the Free Software Foundation. + * + * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL. + */ + +#ifndef __YAFFS_GETBLOCKINFO_H__ +#define __YAFFS_GETBLOCKINFO_H__ + +#include "yaffs_guts.h" +#include "yaffs_trace.h" + +/* Function to manipulate block info */ +static Y_INLINE yaffs_BlockInfo *yaffs_GetBlockInfo(yaffs_Device * dev, int blk) +{ + if (blk < dev->internalStartBlock || blk > dev->internalEndBlock) { + T(YAFFS_TRACE_ERROR, + (TSTR + ("**>> yaffs: getBlockInfo block %d is not valid" TENDSTR), + blk)); + YBUG(); + } + return &dev->blockInfo[blk - dev->internalStartBlock]; +} + +#endif diff --git a/fs/yaffs2/yaffs_guts.c b/fs/yaffs2/yaffs_guts.c new file mode 100644 index 000000000000..9509dd401a33 --- /dev/null +++ b/fs/yaffs2/yaffs_guts.c @@ -0,0 +1,8237 @@ +/* + * YAFFS: Yet Another Flash File System. A NAND-flash specific file system. + * + * Copyright (C) 2002-2010 Aleph One Ltd. + * for Toby Churchill Ltd and Brightstar Engineering + * + * Created by Charles Manning <charles@aleph1.co.uk> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include "yportenv.h" +#include "yaffs_trace.h" + +#include "yaffsinterface.h" +#include "yaffs_guts.h" +#include "yaffs_tagsvalidity.h" +#include "yaffs_getblockinfo.h" + +#include "yaffs_tagscompat.h" +#ifndef CONFIG_YAFFS_USE_OWN_SORT +#include "yaffs_qsort.h" +#endif +#include "yaffs_nand.h" + +#include "yaffs_checkptrw.h" + +#include "yaffs_nand.h" +#include "yaffs_packedtags2.h" + + +/* Note YAFFS_GC_GOOD_ENOUGH must be <= YAFFS_GC_PASSIVE_THRESHOLD */ +#define YAFFS_GC_GOOD_ENOUGH 2 +#define YAFFS_GC_PASSIVE_THRESHOLD 4 + +#define YAFFS_SMALL_HOLE_THRESHOLD 3 + +/* + * Checkpoints are really no benefit on very small partitions. + * + * To save space on small partitions don't bother with checkpoints unless + * the partition is at least this big. + */ +#define YAFFS_CHECKPOINT_MIN_BLOCKS 60 + +#include "yaffs_ecc.h" + + +/* Robustification (if it ever comes about...) */ +static void yaffs_RetireBlock(yaffs_Device *dev, int blockInNAND); +static void yaffs_HandleWriteChunkError(yaffs_Device *dev, int chunkInNAND, + int erasedOk); +static void yaffs_HandleWriteChunkOk(yaffs_Device *dev, int chunkInNAND, + const __u8 *data, + const yaffs_ExtendedTags *tags); +static void yaffs_HandleUpdateChunk(yaffs_Device *dev, int chunkInNAND, + const yaffs_ExtendedTags *tags); + +/* Other local prototypes */ +static void yaffs_UpdateParent(yaffs_Object *obj); +static int yaffs_UnlinkObject(yaffs_Object *obj); +static int yaffs_ObjectHasCachedWriteData(yaffs_Object *obj); + +static void yaffs_HardlinkFixup(yaffs_Device *dev, yaffs_Object *hardList); + +static int yaffs_WriteNewChunkWithTagsToNAND(yaffs_Device *dev, + const __u8 *buffer, + yaffs_ExtendedTags *tags, + int useReserve); +static int yaffs_PutChunkIntoFile(yaffs_Object *in, int chunkInInode, + int chunkInNAND, int inScan); + +static yaffs_Object *yaffs_CreateNewObject(yaffs_Device *dev, int number, + yaffs_ObjectType type); +static void yaffs_AddObjectToDirectory(yaffs_Object *directory, + yaffs_Object *obj); +static int yaffs_UpdateObjectHeader(yaffs_Object *in, const YCHAR *name, + int force, int isShrink, int shadows); +static void yaffs_RemoveObjectFromDirectory(yaffs_Object *obj); +static int yaffs_CheckStructures(void); +static int yaffs_DoGenericObjectDeletion(yaffs_Object *in); + +static yaffs_BlockInfo *yaffs_GetBlockInfo(yaffs_Device *dev, int blockNo); + + +static int yaffs_CheckChunkErased(struct yaffs_DeviceStruct *dev, + int chunkInNAND); + +static int yaffs_UnlinkWorker(yaffs_Object *obj); + +static int yaffs_TagsMatch(const yaffs_ExtendedTags *tags, int objectId, + int chunkInObject); + +static int yaffs_AllocateChunk(yaffs_Device *dev, int useReserve, + yaffs_BlockInfo **blockUsedPtr); + +static void yaffs_VerifyFreeChunks(yaffs_Device *dev); + +static void yaffs_CheckObjectDetailsLoaded(yaffs_Object *in); + +static void yaffs_VerifyDirectory(yaffs_Object *directory); +#ifdef YAFFS_PARANOID +static int yaffs_CheckFileSanity(yaffs_Object *in); +#else +#define yaffs_CheckFileSanity(in) +#endif + +static void yaffs_InvalidateWholeChunkCache(yaffs_Object *in); +static void yaffs_InvalidateChunkCache(yaffs_Object *object, int chunkId); + +static void yaffs_InvalidateCheckpoint(yaffs_Device *dev); + +static int yaffs_FindChunkInFile(yaffs_Object *in, int chunkInInode, + yaffs_ExtendedTags *tags); + +static __u32 yaffs_GetChunkGroupBase(yaffs_Device *dev, yaffs_Tnode *tn, + unsigned pos); +static yaffs_Tnode *yaffs_FindLevel0Tnode(yaffs_Device *dev, + yaffs_FileStructure *fStruct, + __u32 chunkId); + +static int yaffs_HandleHole(yaffs_Object *obj, loff_t newSize); +static void yaffs_SkipRestOfBlock(yaffs_Device *dev); +static int yaffs_VerifyChunkWritten(yaffs_Device *dev, + int chunkInNAND, + const __u8 *data, + yaffs_ExtendedTags *tags); + +/* Function to calculate chunk and offset */ + +static void yaffs_AddrToChunk(yaffs_Device *dev, loff_t addr, int *chunkOut, + __u32 *offsetOut) +{ + int chunk; + __u32 offset; + + chunk = (__u32)(addr >> dev->chunkShift); + + if (dev->chunkDiv == 1) { + /* easy power of 2 case */ + offset = (__u32)(addr & dev->chunkMask); + } else { + /* Non power-of-2 case */ + + loff_t chunkBase; + + chunk /= dev->chunkDiv; + + chunkBase = ((loff_t)chunk) * dev->nDataBytesPerChunk; + offset = (__u32)(addr - chunkBase); + } + + *chunkOut = chunk; + *offsetOut = offset; +} + +/* Function to return the number of shifts for a power of 2 greater than or + * equal to the given number + * Note we don't try to cater for all possible numbers and this does not have to + * be hellishly efficient. + */ + +static __u32 ShiftsGE(__u32 x) +{ + int extraBits; + int nShifts; + + nShifts = extraBits = 0; + + while (x > 1) { + if (x & 1) + extraBits++; + x >>= 1; + nShifts++; + } + + if (extraBits) + nShifts++; + + return nShifts; +} + +/* Function to return the number of shifts to get a 1 in bit 0 + */ + +static __u32 Shifts(__u32 x) +{ + __u32 nShifts; + + nShifts = 0; + + if (!x) + return 0; + + while (!(x&1)) { + x >>= 1; + nShifts++; + } + + return nShifts; +} + + + +/* + * Temporary buffer manipulations. + */ + +static int yaffs_InitialiseTempBuffers(yaffs_Device *dev) +{ + int i; + __u8 *buf = (__u8 *)1; + + memset(dev->tempBuffer, 0, sizeof(dev->tempBuffer)); + + for (i = 0; buf && i < YAFFS_N_TEMP_BUFFERS; i++) { + dev->tempBuffer[i].line = 0; /* not in use */ + dev->tempBuffer[i].buffer = buf = + YMALLOC_DMA(dev->param.totalBytesPerChunk); + } + + return buf ? YAFFS_OK : YAFFS_FAIL; +} + +__u8 *yaffs_GetTempBuffer(yaffs_Device *dev, int lineNo) +{ + int i, j; + + dev->tempInUse++; + if (dev->tempInUse > dev->maxTemp) + dev->maxTemp = dev->tempInUse; + + for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) { + if (dev->tempBuffer[i].line == 0) { + dev->tempBuffer[i].line = lineNo; + if ((i + 1) > dev->maxTemp) { + dev->maxTemp = i + 1; + for (j = 0; j <= i; j++) + dev->tempBuffer[j].maxLine = + dev->tempBuffer[j].line; + } + + return dev->tempBuffer[i].buffer; + } + } + + T(YAFFS_TRACE_BUFFERS, + (TSTR("Out of temp buffers at line %d, other held by lines:"), + lineNo)); + for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) + T(YAFFS_TRACE_BUFFERS, (TSTR(" %d "), dev->tempBuffer[i].line)); + + T(YAFFS_TRACE_BUFFERS, (TSTR(" " TENDSTR))); + + /* + * If we got here then we have to allocate an unmanaged one + * This is not good. + */ + + dev->unmanagedTempAllocations++; + return YMALLOC(dev->nDataBytesPerChunk); + +} + +void yaffs_ReleaseTempBuffer(yaffs_Device *dev, __u8 *buffer, + int lineNo) +{ + int i; + + dev->tempInUse--; + + for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) { + if (dev->tempBuffer[i].buffer == buffer) { + dev->tempBuffer[i].line = 0; + return; + } + } + + if (buffer) { + /* assume it is an unmanaged one. */ + T(YAFFS_TRACE_BUFFERS, + (TSTR("Releasing unmanaged temp buffer in line %d" TENDSTR), + lineNo)); + YFREE(buffer); + dev->unmanagedTempDeallocations++; + } + +} + +/* + * Determine if we have a managed buffer. + */ +int yaffs_IsManagedTempBuffer(yaffs_Device *dev, const __u8 *buffer) +{ + int i; + + for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) { + if (dev->tempBuffer[i].buffer == buffer) + return 1; + } + + for (i = 0; i < dev->param.nShortOpCaches; i++) { + if (dev->srCache[i].data == buffer) + return 1; + } + + if (buffer == dev->checkpointBuffer) + return 1; + + T(YAFFS_TRACE_ALWAYS, + (TSTR("yaffs: unmaged buffer detected.\n" TENDSTR))); + return 0; +} + + + +/* + * Chunk bitmap manipulations + */ + +static Y_INLINE __u8 *yaffs_BlockBits(yaffs_Device *dev, int blk) +{ + if (blk < dev->internalStartBlock || blk > dev->internalEndBlock) { + T(YAFFS_TRACE_ERROR, + (TSTR("**>> yaffs: BlockBits block %d is not valid" TENDSTR), + blk)); + YBUG(); + } + return dev->chunkBits + + (dev->chunkBitmapStride * (blk - dev->internalStartBlock)); +} + +static Y_INLINE void yaffs_VerifyChunkBitId(yaffs_Device *dev, int blk, int chunk) +{ + if (blk < dev->internalStartBlock || blk > dev->internalEndBlock || + chunk < 0 || chunk >= dev->param.nChunksPerBlock) { + T(YAFFS_TRACE_ERROR, + (TSTR("**>> yaffs: Chunk Id (%d:%d) invalid"TENDSTR), + blk, chunk)); + YBUG(); + } +} + +static Y_INLINE void yaffs_ClearChunkBits(yaffs_Device *dev, int blk) +{ + __u8 *blkBits = yaffs_BlockBits(dev, blk); + + memset(blkBits, 0, dev->chunkBitmapStride); +} + +static Y_INLINE void yaffs_ClearChunkBit(yaffs_Device *dev, int blk, int chunk) +{ + __u8 *blkBits = yaffs_BlockBits(dev, blk); + + yaffs_VerifyChunkBitId(dev, blk, chunk); + + blkBits[chunk / 8] &= ~(1 << (chunk & 7)); +} + +static Y_INLINE void yaffs_SetChunkBit(yaffs_Device *dev, int blk, int chunk) +{ + __u8 *blkBits = yaffs_BlockBits(dev, blk); + + yaffs_VerifyChunkBitId(dev, blk, chunk); + + blkBits[chunk / 8] |= (1 << (chunk & 7)); +} + +static Y_INLINE int yaffs_CheckChunkBit(yaffs_Device *dev, int blk, int chunk) +{ + __u8 *blkBits = yaffs_BlockBits(dev, blk); + yaffs_VerifyChunkBitId(dev, blk, chunk); + + return (blkBits[chunk / 8] & (1 << (chunk & 7))) ? 1 : 0; +} + +static Y_INLINE int yaffs_StillSomeChunkBits(yaffs_Device *dev, int blk) +{ + __u8 *blkBits = yaffs_BlockBits(dev, blk); + int i; + for (i = 0; i < dev->chunkBitmapStride; i++) { + if (*blkBits) + return 1; + blkBits++; + } + return 0; +} + +static int yaffs_CountChunkBits(yaffs_Device *dev, int blk) +{ + __u8 *blkBits = yaffs_BlockBits(dev, blk); + int i; + int n = 0; + for (i = 0; i < dev->chunkBitmapStride; i++) { + __u8 x = *blkBits; + while (x) { + if (x & 1) + n++; + x >>= 1; + } + + blkBits++; + } + return n; +} + +/* + * Verification code + */ + +static int yaffs_SkipVerification(yaffs_Device *dev) +{ + dev=dev; + return !(yaffs_traceMask & (YAFFS_TRACE_VERIFY | YAFFS_TRACE_VERIFY_FULL)); +} + +static int yaffs_SkipFullVerification(yaffs_Device *dev) +{ + dev=dev; + return !(yaffs_traceMask & (YAFFS_TRACE_VERIFY_FULL)); +} + +static int yaffs_SkipNANDVerification(yaffs_Device *dev) +{ + dev=dev; + return !(yaffs_traceMask & (YAFFS_TRACE_VERIFY_NAND)); +} + +static const char *blockStateName[] = { +"Unknown", +"Needs scanning", +"Scanning", +"Empty", +"Allocating", +"Full", +"Dirty", +"Checkpoint", +"Collecting", +"Dead" +}; + +static void yaffs_VerifyBlock(yaffs_Device *dev, yaffs_BlockInfo *bi, int n) +{ + int actuallyUsed; + int inUse; + + if (yaffs_SkipVerification(dev)) + return; + + /* Report illegal runtime states */ + if (bi->blockState >= YAFFS_NUMBER_OF_BLOCK_STATES) + T(YAFFS_TRACE_VERIFY, (TSTR("Block %d has undefined state %d"TENDSTR), n, bi->blockState)); + + switch (bi->blockState) { + case YAFFS_BLOCK_STATE_UNKNOWN: + case YAFFS_BLOCK_STATE_SCANNING: + case YAFFS_BLOCK_STATE_NEEDS_SCANNING: + T(YAFFS_TRACE_VERIFY, (TSTR("Block %d has bad run-state %s"TENDSTR), + n, blockStateName[bi->blockState])); + } + + /* Check pages in use and soft deletions are legal */ + + actuallyUsed = bi->pagesInUse - bi->softDeletions; + + if (bi->pagesInUse < 0 || bi->pagesInUse > dev->param.nChunksPerBlock || + bi->softDeletions < 0 || bi->softDeletions > dev->param.nChunksPerBlock || + actuallyUsed < 0 || actuallyUsed > dev->param.nChunksPerBlock) + T(YAFFS_TRACE_VERIFY, (TSTR("Block %d has illegal values pagesInUsed %d softDeletions %d"TENDSTR), + n, bi->pagesInUse, bi->softDeletions)); + + + /* Check chunk bitmap legal */ + inUse = yaffs_CountChunkBits(dev, n); + if (inUse != bi->pagesInUse) + T(YAFFS_TRACE_VERIFY, (TSTR("Block %d has inconsistent values pagesInUse %d counted chunk bits %d"TENDSTR), + n, bi->pagesInUse, inUse)); + + /* Check that the sequence number is valid. + * Ten million is legal, but is very unlikely + */ + if (dev->param.isYaffs2 && + (bi->blockState == YAFFS_BLOCK_STATE_ALLOCATING || bi->blockState == YAFFS_BLOCK_STATE_FULL) && + (bi->sequenceNumber < YAFFS_LOWEST_SEQUENCE_NUMBER || bi->sequenceNumber > 10000000)) + T(YAFFS_TRACE_VERIFY, (TSTR("Block %d has suspect sequence number of %d"TENDSTR), + n, bi->sequenceNumber)); +} + +static void yaffs_VerifyCollectedBlock(yaffs_Device *dev, yaffs_BlockInfo *bi, + int n) +{ + yaffs_VerifyBlock(dev, bi, n); + + /* After collection the block should be in the erased state */ + /* This will need to change if we do partial gc */ + + if (bi->blockState != YAFFS_BLOCK_STATE_COLLECTING && + bi->blockState != YAFFS_BLOCK_STATE_EMPTY) { + T(YAFFS_TRACE_ERROR, (TSTR("Block %d is in state %d after gc, should be erased"TENDSTR), + n, bi->blockState)); + } +} + +static void yaffs_VerifyBlocks(yaffs_Device *dev) +{ + int i; + int nBlocksPerState[YAFFS_NUMBER_OF_BLOCK_STATES]; + int nIllegalBlockStates = 0; + + if (yaffs_SkipVerification(dev)) + return; + + memset(nBlocksPerState, 0, sizeof(nBlocksPerState)); + + for (i = dev->internalStartBlock; i <= dev->internalEndBlock; i++) { + yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, i); + yaffs_VerifyBlock(dev, bi, i); + + if (bi->blockState < YAFFS_NUMBER_OF_BLOCK_STATES) + nBlocksPerState[bi->blockState]++; + else + nIllegalBlockStates++; + } + + T(YAFFS_TRACE_VERIFY, (TSTR(""TENDSTR))); + T(YAFFS_TRACE_VERIFY, (TSTR("Block summary"TENDSTR))); + + T(YAFFS_TRACE_VERIFY, (TSTR("%d blocks have illegal states"TENDSTR), nIllegalBlockStates)); + if (nBlocksPerState[YAFFS_BLOCK_STATE_ALLOCATING] > 1) + T(YAFFS_TRACE_VERIFY, (TSTR("Too many allocating blocks"TENDSTR))); + + for (i = 0; i < YAFFS_NUMBER_OF_BLOCK_STATES; i++) + T(YAFFS_TRACE_VERIFY, + (TSTR("%s %d blocks"TENDSTR), + blockStateName[i], nBlocksPerState[i])); + + if (dev->blocksInCheckpoint != nBlocksPerState[YAFFS_BLOCK_STATE_CHECKPOINT]) + T(YAFFS_TRACE_VERIFY, + (TSTR("Checkpoint block count wrong dev %d count %d"TENDSTR), + dev->blocksInCheckpoint, nBlocksPerState[YAFFS_BLOCK_STATE_CHECKPOINT])); + + if (dev->nErasedBlocks != nBlocksPerState[YAFFS_BLOCK_STATE_EMPTY]) + T(YAFFS_TRACE_VERIFY, + (TSTR("Erased block count wrong dev %d count %d"TENDSTR), + dev->nErasedBlocks, nBlocksPerState[YAFFS_BLOCK_STATE_EMPTY])); + + if (nBlocksPerState[YAFFS_BLOCK_STATE_COLLECTING] > 1) + T(YAFFS_TRACE_VERIFY, + (TSTR("Too many collecting blocks %d (max is 1)"TENDSTR), + nBlocksPerState[YAFFS_BLOCK_STATE_COLLECTING])); + + T(YAFFS_TRACE_VERIFY, (TSTR(""TENDSTR))); + +} + +/* + * Verify the object header. oh must be valid, but obj and tags may be NULL in which + * case those tests will not be performed. + */ +static void yaffs_VerifyObjectHeader(yaffs_Object *obj, yaffs_ObjectHeader *oh, yaffs_ExtendedTags *tags, int parentCheck) +{ + if (obj && yaffs_SkipVerification(obj->myDev)) + return; + + if (!(tags && obj && oh)) { + T(YAFFS_TRACE_VERIFY, + (TSTR("Verifying object header tags %p obj %p oh %p"TENDSTR), + tags, obj, oh)); + return; + } + + if (oh->type <= YAFFS_OBJECT_TYPE_UNKNOWN || + oh->type > YAFFS_OBJECT_TYPE_MAX) + T(YAFFS_TRACE_VERIFY, + (TSTR("Obj %d header type is illegal value 0x%x"TENDSTR), + tags->objectId, oh->type)); + + if (tags->objectId != obj->objectId) + T(YAFFS_TRACE_VERIFY, + (TSTR("Obj %d header mismatch objectId %d"TENDSTR), + tags->objectId, obj->objectId)); + + + /* + * Check that the object's parent ids match if parentCheck requested. + * + * Tests do not apply to the root object. + */ + + if (parentCheck && tags->objectId > 1 && !obj->parent) + T(YAFFS_TRACE_VERIFY, + (TSTR("Obj %d header mismatch parentId %d obj->parent is NULL"TENDSTR), + tags->objectId, oh->parentObjectId)); + + if (parentCheck && obj->parent && + oh->parentObjectId != obj->parent->objectId && + (oh->parentObjectId != YAFFS_OBJECTID_UNLINKED || + obj->parent->objectId != YAFFS_OBJECTID_DELETED)) + T(YAFFS_TRACE_VERIFY, + (TSTR("Obj %d header mismatch parentId %d parentObjectId %d"TENDSTR), + tags->objectId, oh->parentObjectId, obj->parent->objectId)); + + if (tags->objectId > 1 && oh->name[0] == 0) /* Null name */ + T(YAFFS_TRACE_VERIFY, + (TSTR("Obj %d header name is NULL"TENDSTR), + obj->objectId)); + + if (tags->objectId > 1 && ((__u8)(oh->name[0])) == 0xff) /* Trashed name */ + T(YAFFS_TRACE_VERIFY, + (TSTR("Obj %d header name is 0xFF"TENDSTR), + obj->objectId)); +} + + +#if 0 +/* Not being used, but don't want to throw away yet */ +static int yaffs_VerifyTnodeWorker(yaffs_Object *obj, yaffs_Tnode *tn, + __u32 level, int chunkOffset) +{ + int i; + yaffs_Device *dev = obj->myDev; + int ok = 1; + + if (tn) { + if (level > 0) { + + for (i = 0; i < YAFFS_NTNODES_INTERNAL && ok; i++) { + if (tn->internal[i]) { + ok = yaffs_VerifyTnodeWorker(obj, + tn->internal[i], + level - 1, + (chunkOffset<<YAFFS_TNODES_INTERNAL_BITS) + i); + } + } + } else if (level == 0) { + yaffs_ExtendedTags tags; + __u32 objectId = obj->objectId; + + chunkOffset <<= YAFFS_TNODES_LEVEL0_BITS; + + for (i = 0; i < YAFFS_NTNODES_LEVEL0; i++) { + __u32 theChunk = yaffs_GetChunkGroupBase(dev, tn, i); + + if (theChunk > 0) { + /* T(~0,(TSTR("verifying (%d:%d) %d"TENDSTR),tags.objectId,tags.chunkId,theChunk)); */ + yaffs_ReadChunkWithTagsFromNAND(dev, theChunk, NULL, &tags); + if (tags.objectId != objectId || tags.chunkId != chunkOffset) { + T(~0, (TSTR("Object %d chunkId %d NAND mismatch chunk %d tags (%d:%d)"TENDSTR), + objectId, chunkOffset, theChunk, + tags.objectId, tags.chunkId)); + } + } + chunkOffset++; + } + } + } + + return ok; + +} + +#endif + +static void yaffs_VerifyFile(yaffs_Object *obj) +{ + int requiredTallness; + int actualTallness; + __u32 lastChunk; + __u32 x; + __u32 i; + yaffs_Device *dev; + yaffs_ExtendedTags tags; + yaffs_Tnode *tn; + __u32 objectId; + + if (!obj) + return; + + if (yaffs_SkipVerification(obj->myDev)) + return; + + dev = obj->myDev; + objectId = obj->objectId; + + /* Check file size is consistent with tnode depth */ + lastChunk = obj->variant.fileVariant.fileSize / dev->nDataBytesPerChunk + 1; + x = lastChunk >> YAFFS_TNODES_LEVEL0_BITS; + requiredTallness = 0; + while (x > 0) { + x >>= YAFFS_TNODES_INTERNAL_BITS; + requiredTallness++; + } + + actualTallness = obj->variant.fileVariant.topLevel; + + /* Check that the chunks in the tnode tree are all correct. + * We do this by scanning through the tnode tree and + * checking the tags for every chunk match. + */ + + if (yaffs_SkipNANDVerification(dev)) + return; + + for (i = 1; i <= lastChunk; i++) { + tn = yaffs_FindLevel0Tnode(dev, &obj->variant.fileVariant, i); + + if (tn) { + __u32 theChunk = yaffs_GetChunkGroupBase(dev, tn, i); + if (theChunk > 0) { + /* T(~0,(TSTR("verifying (%d:%d) %d"TENDSTR),objectId,i,theChunk)); */ + yaffs_ReadChunkWithTagsFromNAND(dev, theChunk, NULL, &tags); + if (tags.objectId != objectId || tags.chunkId != i) { + T(~0, (TSTR("Object %d chunkId %d NAND mismatch chunk %d tags (%d:%d)"TENDSTR), + objectId, i, theChunk, + tags.objectId, tags.chunkId)); + } + } + } + } +} + + +static void yaffs_VerifyHardLink(yaffs_Object *obj) +{ + if (obj && yaffs_SkipVerification(obj->myDev)) + return; + + /* Verify sane equivalent object */ +} + +static void yaffs_VerifySymlink(yaffs_Object *obj) +{ + if (obj && yaffs_SkipVerification(obj->myDev)) + return; + + /* Verify symlink string */ +} + +static void yaffs_VerifySpecial(yaffs_Object *obj) +{ + if (obj && yaffs_SkipVerification(obj->myDev)) + return; +} + +static void yaffs_VerifyObject(yaffs_Object *obj) +{ + yaffs_Device *dev; + + __u32 chunkMin; + __u32 chunkMax; + + __u32 chunkIdOk; + __u32 chunkInRange; + __u32 chunkShouldNotBeDeleted; + __u32 chunkValid; + + if (!obj) + return; + + if (obj->beingCreated) + return; + + dev = obj->myDev; + + if (yaffs_SkipVerification(dev)) + return; + + /* Check sane object header chunk */ + + chunkMin = dev->internalStartBlock * dev->param.nChunksPerBlock; + chunkMax = (dev->internalEndBlock+1) * dev->param.nChunksPerBlock - 1; + + chunkInRange = (((unsigned)(obj->hdrChunk)) >= chunkMin && ((unsigned)(obj->hdrChunk)) <= chunkMax); + chunkIdOk = chunkInRange || (obj->hdrChunk == 0); + chunkValid = chunkInRange && + yaffs_CheckChunkBit(dev, + obj->hdrChunk / dev->param.nChunksPerBlock, + obj->hdrChunk % dev->param.nChunksPerBlock); + chunkShouldNotBeDeleted = chunkInRange && !chunkValid; + + if (!obj->fake && + (!chunkIdOk || chunkShouldNotBeDeleted)) { + T(YAFFS_TRACE_VERIFY, + (TSTR("Obj %d has chunkId %d %s %s"TENDSTR), + obj->objectId, obj->hdrChunk, + chunkIdOk ? "" : ",out of range", + chunkShouldNotBeDeleted ? ",marked as deleted" : "")); + } + + if (chunkValid && !yaffs_SkipNANDVerification(dev)) { + yaffs_ExtendedTags tags; + yaffs_ObjectHeader *oh; + __u8 *buffer = yaffs_GetTempBuffer(dev, __LINE__); + + oh = (yaffs_ObjectHeader *)buffer; + + yaffs_ReadChunkWithTagsFromNAND(dev, obj->hdrChunk, buffer, + &tags); + + yaffs_VerifyObjectHeader(obj, oh, &tags, 1); + + yaffs_ReleaseTempBuffer(dev, buffer, __LINE__); + } + + /* Verify it has a parent */ + if (obj && !obj->fake && + (!obj->parent || obj->parent->myDev != dev)) { + T(YAFFS_TRACE_VERIFY, + (TSTR("Obj %d has parent pointer %p which does not look like an object"TENDSTR), + obj->objectId, obj->parent)); + } + + /* Verify parent is a directory */ + if (obj->parent && obj->parent->variantType != YAFFS_OBJECT_TYPE_DIRECTORY) { + T(YAFFS_TRACE_VERIFY, + (TSTR("Obj %d's parent is not a directory (type %d)"TENDSTR), + obj->objectId, obj->parent->variantType)); + } + + switch (obj->variantType) { + case YAFFS_OBJECT_TYPE_FILE: + yaffs_VerifyFile(obj); + break; + case YAFFS_OBJECT_TYPE_SYMLINK: + yaffs_VerifySymlink(obj); + break; + case YAFFS_OBJECT_TYPE_DIRECTORY: + yaffs_VerifyDirectory(obj); + break; + case YAFFS_OBJECT_TYPE_HARDLINK: + yaffs_VerifyHardLink(obj); + break; + case YAFFS_OBJECT_TYPE_SPECIAL: + yaffs_VerifySpecial(obj); + break; + case YAFFS_OBJECT_TYPE_UNKNOWN: + default: + T(YAFFS_TRACE_VERIFY, + (TSTR("Obj %d has illegaltype %d"TENDSTR), + obj->objectId, obj->variantType)); + break; + } +} + +static void yaffs_VerifyObjects(yaffs_Device *dev) +{ + yaffs_Object *obj; + int i; + struct ylist_head *lh; + + if (yaffs_SkipVerification(dev)) + return; + + /* Iterate through the objects in each hash entry */ + + for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) { + ylist_for_each(lh, &dev->objectBucket[i].list) { + if (lh) { + obj = ylist_entry(lh, yaffs_Object, hashLink); + yaffs_VerifyObject(obj); + } + } + } +} + + +/* + * Simple hash function. Needs to have a reasonable spread + */ + +static Y_INLINE int yaffs_HashFunction(int n) +{ + n = abs(n); + return n % YAFFS_NOBJECT_BUCKETS; +} + +/* + * Access functions to useful fake objects. + * Note that root might have a presence in NAND if permissions are set. + */ + +yaffs_Object *yaffs_Root(yaffs_Device *dev) +{ + return dev->rootDir; +} + +yaffs_Object *yaffs_LostNFound(yaffs_Device *dev) +{ + return dev->lostNFoundDir; +} + + +/* + * Erased NAND checking functions + */ + +int yaffs_CheckFF(__u8 *buffer, int nBytes) +{ + /* Horrible, slow implementation */ + while (nBytes--) { + if (*buffer != 0xFF) + return 0; + buffer++; + } + return 1; +} + +static int yaffs_CheckChunkErased(struct yaffs_DeviceStruct *dev, + int chunkInNAND) +{ + int retval = YAFFS_OK; + __u8 *data = yaffs_GetTempBuffer(dev, __LINE__); + yaffs_ExtendedTags tags; + int result; + + result = yaffs_ReadChunkWithTagsFromNAND(dev, chunkInNAND, data, &tags); + + if (tags.eccResult > YAFFS_ECC_RESULT_NO_ERROR) + retval = YAFFS_FAIL; + + if (!yaffs_CheckFF(data, dev->nDataBytesPerChunk) || tags.chunkUsed) { + T(YAFFS_TRACE_NANDACCESS, + (TSTR("Chunk %d not erased" TENDSTR), chunkInNAND)); + retval = YAFFS_FAIL; + } + + yaffs_ReleaseTempBuffer(dev, data, __LINE__); + + return retval; + +} + + +static int yaffs_VerifyChunkWritten(yaffs_Device *dev, + int chunkInNAND, + const __u8 *data, + yaffs_ExtendedTags *tags) +{ + int retval = YAFFS_OK; + yaffs_ExtendedTags tempTags; + __u8 *buffer = yaffs_GetTempBuffer(dev,__LINE__); + int result; + + result = yaffs_ReadChunkWithTagsFromNAND(dev,chunkInNAND,buffer,&tempTags); + if(memcmp(buffer,data,dev->nDataBytesPerChunk) || + tempTags.objectId != tags->objectId || + tempTags.chunkId != tags->chunkId || + tempTags.byteCount != tags->byteCount) + retval = YAFFS_FAIL; + + yaffs_ReleaseTempBuffer(dev, buffer, __LINE__); + + return retval; +} + +static int yaffs_WriteNewChunkWithTagsToNAND(struct yaffs_DeviceStruct *dev, + const __u8 *data, + yaffs_ExtendedTags *tags, + int useReserve) +{ + int attempts = 0; + int writeOk = 0; + int chunk; + + yaffs_InvalidateCheckpoint(dev); + + do { + yaffs_BlockInfo *bi = 0; + int erasedOk = 0; + + chunk = yaffs_AllocateChunk(dev, useReserve, &bi); + if (chunk < 0) { + /* no space */ + break; + } + + /* First check this chunk is erased, if it needs + * checking. The checking policy (unless forced + * always on) is as follows: + * + * Check the first page we try to write in a block. + * If the check passes then we don't need to check any + * more. If the check fails, we check again... + * If the block has been erased, we don't need to check. + * + * However, if the block has been prioritised for gc, + * then we think there might be something odd about + * this block and stop using it. + * + * Rationale: We should only ever see chunks that have + * not been erased if there was a partially written + * chunk due to power loss. This checking policy should + * catch that case with very few checks and thus save a + * lot of checks that are most likely not needed. + * + * Mods to the above + * If an erase check fails or the write fails we skip the + * rest of the block. + */ + + /* let's give it a try */ + attempts++; + +#ifdef CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED + bi->skipErasedCheck = 0; +#endif + if (!bi->skipErasedCheck) { + erasedOk = yaffs_CheckChunkErased(dev, chunk); + if (erasedOk != YAFFS_OK) { + T(YAFFS_TRACE_ERROR, + (TSTR("**>> yaffs chunk %d was not erased" + TENDSTR), chunk)); + + /* If not erased, delete this one, + * skip rest of block and + * try another chunk */ + yaffs_DeleteChunk(dev,chunk,1,__LINE__); + yaffs_SkipRestOfBlock(dev); + continue; + } + } + + writeOk = yaffs_WriteChunkWithTagsToNAND(dev, chunk, + data, tags); + + if(!bi->skipErasedCheck) + writeOk = yaffs_VerifyChunkWritten(dev, chunk, data, tags); + + if (writeOk != YAFFS_OK) { + /* Clean up aborted write, skip to next block and + * try another chunk */ + yaffs_HandleWriteChunkError(dev, chunk, erasedOk); + continue; + } + + bi->skipErasedCheck = 1; + + /* Copy the data into the robustification buffer */ + yaffs_HandleWriteChunkOk(dev, chunk, data, tags); + + } while (writeOk != YAFFS_OK && + (yaffs_wr_attempts <= 0 || attempts <= yaffs_wr_attempts)); + + if (!writeOk) + chunk = -1; + + if (attempts > 1) { + T(YAFFS_TRACE_ERROR, + (TSTR("**>> yaffs write required %d attempts" TENDSTR), + attempts)); + + dev->nRetriedWrites += (attempts - 1); + } + + return chunk; +} + + +/* + * Oldest Dirty Sequence Number handling. + */ + +/* yaffs_CalcOldestDirtySequence() + * yaffs_FindOldestDirtySequence() + * Calculate the oldest dirty sequence number if we don't know it. + */ +static void yaffs_CalcOldestDirtySequence(yaffs_Device *dev) +{ + int i; + unsigned seq; + unsigned blockNo = 0; + yaffs_BlockInfo *b; + + if(!dev->param.isYaffs2) + return; + + /* Find the oldest dirty sequence number. */ + seq = dev->sequenceNumber + 1; + b = dev->blockInfo; + for (i = dev->internalStartBlock; i <= dev->internalEndBlock; i++) { + if (b->blockState == YAFFS_BLOCK_STATE_FULL && + (b->pagesInUse - b->softDeletions) < dev->param.nChunksPerBlock && + b->sequenceNumber < seq) { + seq = b->sequenceNumber; + blockNo = i; + } + b++; + } + + if(blockNo){ + dev->oldestDirtySequence = seq; + dev->oldestDirtyBlock = blockNo; + } + +} + + +static void yaffs_FindOldestDirtySequence(yaffs_Device *dev) +{ + if(dev->param.isYaffs2 && !dev->oldestDirtySequence) + yaffs_CalcOldestDirtySequence(dev); +} + +/* + * yaffs_ClearOldestDirtySequence() + * Called when a block is erased or marked bad. (ie. when its sequenceNumber + * becomes invalid). If the value matches the oldest then we clear + * dev->oldestDirtySequence to force its recomputation. + */ +static void yaffs_ClearOldestDirtySequence(yaffs_Device *dev, yaffs_BlockInfo *bi) +{ + + if(!dev->param.isYaffs2) + return; + + if(!bi || bi->sequenceNumber == dev->oldestDirtySequence){ + dev->oldestDirtySequence = 0; + dev->oldestDirtyBlock = 0; + } +} + +/* + * yaffs_UpdateOldestDirtySequence() + * Update the oldest dirty sequence number whenever we dirty a block. + * Only do this if the oldestDirtySequence is actually being tracked. + */ +static void yaffs_UpdateOldestDirtySequence(yaffs_Device *dev, unsigned blockNo, yaffs_BlockInfo *bi) +{ + if(dev->param.isYaffs2 && dev->oldestDirtySequence){ + if(dev->oldestDirtySequence > bi->sequenceNumber){ + dev->oldestDirtySequence = bi->sequenceNumber; + dev->oldestDirtyBlock = blockNo; + } + } +} + +/* + * Block retiring for handling a broken block. + */ + +static void yaffs_RetireBlock(yaffs_Device *dev, int blockInNAND) +{ + yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, blockInNAND); + + yaffs_InvalidateCheckpoint(dev); + + yaffs_ClearOldestDirtySequence(dev,bi); + + if (yaffs_MarkBlockBad(dev, blockInNAND) != YAFFS_OK) { + if (yaffs_EraseBlockInNAND(dev, blockInNAND) != YAFFS_OK) { + T(YAFFS_TRACE_ALWAYS, (TSTR( + "yaffs: Failed to mark bad and erase block %d" + TENDSTR), blockInNAND)); + } else { + yaffs_ExtendedTags tags; + int chunkId = blockInNAND * dev->param.nChunksPerBlock; + + __u8 *buffer = yaffs_GetTempBuffer(dev, __LINE__); + + memset(buffer, 0xff, dev->nDataBytesPerChunk); + yaffs_InitialiseTags(&tags); + tags.sequenceNumber = YAFFS_SEQUENCE_BAD_BLOCK; + if (dev->param.writeChunkWithTagsToNAND(dev, chunkId - + dev->chunkOffset, buffer, &tags) != YAFFS_OK) + T(YAFFS_TRACE_ALWAYS, (TSTR("yaffs: Failed to " + TCONT("write bad block marker to block %d") + TENDSTR), blockInNAND)); + + yaffs_ReleaseTempBuffer(dev, buffer, __LINE__); + } + } + + bi->blockState = YAFFS_BLOCK_STATE_DEAD; + bi->gcPrioritise = 0; + bi->needsRetiring = 0; + + dev->nRetiredBlocks++; +} + +/* + * Functions for robustisizing TODO + * + */ + +static void yaffs_HandleWriteChunkOk(yaffs_Device *dev, int chunkInNAND, + const __u8 *data, + const yaffs_ExtendedTags *tags) +{ + dev=dev; + chunkInNAND=chunkInNAND; + data=data; + tags=tags; +} + +static void yaffs_HandleUpdateChunk(yaffs_Device *dev, int chunkInNAND, + const yaffs_ExtendedTags *tags) +{ + dev=dev; + chunkInNAND=chunkInNAND; + tags=tags; +} + +void yaffs_HandleChunkError(yaffs_Device *dev, yaffs_BlockInfo *bi) +{ + if (!bi->gcPrioritise) { + bi->gcPrioritise = 1; + dev->hasPendingPrioritisedGCs = 1; + bi->chunkErrorStrikes++; + + if (bi->chunkErrorStrikes > 3) { + bi->needsRetiring = 1; /* Too many stikes, so retire this */ + T(YAFFS_TRACE_ALWAYS, (TSTR("yaffs: Block struck out" TENDSTR))); + + } + } +} + +static void yaffs_HandleWriteChunkError(yaffs_Device *dev, int chunkInNAND, + int erasedOk) +{ + int blockInNAND = chunkInNAND / dev->param.nChunksPerBlock; + yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, blockInNAND); + + yaffs_HandleChunkError(dev, bi); + + if (erasedOk) { + /* Was an actual write failure, so mark the block for retirement */ + bi->needsRetiring = 1; + T(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS, + (TSTR("**>> Block %d needs retiring" TENDSTR), blockInNAND)); + } + + /* Delete the chunk */ + yaffs_DeleteChunk(dev, chunkInNAND, 1, __LINE__); + yaffs_SkipRestOfBlock(dev); +} + + +/*---------------- Name handling functions ------------*/ + +static __u16 yaffs_CalcNameSum(const YCHAR *name) +{ + __u16 sum = 0; + __u16 i = 1; + + const YUCHAR *bname = (const YUCHAR *) name; + if (bname) { + while ((*bname) && (i < (YAFFS_MAX_NAME_LENGTH/2))) { + +#ifdef CONFIG_YAFFS_CASE_INSENSITIVE + sum += yaffs_toupper(*bname) * i; +#else + sum += (*bname) * i; +#endif + i++; + bname++; + } + } + return sum; +} + +static void yaffs_SetObjectName(yaffs_Object *obj, const YCHAR *name) +{ +#ifdef CONFIG_YAFFS_SHORT_NAMES_IN_RAM + memset(obj->shortName, 0, sizeof(YCHAR) * (YAFFS_SHORT_NAME_LENGTH+1)); + if (name && yaffs_strnlen(name,YAFFS_SHORT_NAME_LENGTH+1) <= YAFFS_SHORT_NAME_LENGTH) + yaffs_strcpy(obj->shortName, name); + else + obj->shortName[0] = _Y('\0'); +#endif + obj->sum = yaffs_CalcNameSum(name); +} + +/*-------------------- TNODES ------------------- + + * List of spare tnodes + * The list is hooked together using the first pointer + * in the tnode. + */ + +/* yaffs_CreateTnodes creates a bunch more tnodes and + * adds them to the tnode free list. + * Don't use this function directly + */ +static Y_INLINE int yaffs_CalcTnodeSize(yaffs_Device *dev) +{ + int tnodeSize; + /* Calculate the tnode size in bytes for variable width tnode support. + * Must be a multiple of 32-bits */ + tnodeSize = (dev->tnodeWidth * YAFFS_NTNODES_LEVEL0)/8; + + if (tnodeSize < sizeof(yaffs_Tnode)) + tnodeSize = sizeof(yaffs_Tnode); + return tnodeSize; +} + +static int yaffs_CreateTnodes(yaffs_Device *dev, int nTnodes) +{ + int i; + int tnodeSize = yaffs_CalcTnodeSize(dev); + yaffs_Tnode *newTnodes; + __u8 *mem; + yaffs_Tnode *curr; + yaffs_Tnode *next; + yaffs_TnodeList *tnl; + + if (nTnodes < 1) + return YAFFS_OK; + + + /* make these things */ + + newTnodes = YMALLOC(nTnodes * tnodeSize); + mem = (__u8 *)newTnodes; + + if (!newTnodes) { + T(YAFFS_TRACE_ERROR, + (TSTR("yaffs: Could not allocate Tnodes" TENDSTR))); + return YAFFS_FAIL; + } + + /* Hook them into the free list */ +#if 0 + for (i = 0; i < nTnodes - 1; i++) { + newTnodes[i].internal[0] = &newTnodes[i + 1]; +#ifdef CONFIG_YAFFS_TNODE_LIST_DEBUG + newTnodes[i].internal[YAFFS_NTNODES_INTERNAL] = (void *)1; +#endif + } + + newTnodes[nTnodes - 1].internal[0] = dev->freeTnodes; +#ifdef CONFIG_YAFFS_TNODE_LIST_DEBUG + newTnodes[nTnodes - 1].internal[YAFFS_NTNODES_INTERNAL] = (void *)1; +#endif + dev->freeTnodes = newTnodes; +#else + /* New hookup for wide tnodes */ + for (i = 0; i < nTnodes - 1; i++) { + curr = (yaffs_Tnode *) &mem[i * tnodeSize]; + next = (yaffs_Tnode *) &mem[(i+1) * tnodeSize]; + curr->internal[0] = next; + } + + curr = (yaffs_Tnode *) &mem[(nTnodes - 1) * tnodeSize]; + curr->internal[0] = dev->freeTnodes; + dev->freeTnodes = (yaffs_Tnode *)mem; + +#endif + + + dev->nFreeTnodes += nTnodes; + dev->nTnodesCreated += nTnodes; + + /* Now add this bunch of tnodes to a list for freeing up. + * NB If we can't add this to the management list it isn't fatal + * but it just means we can't free this bunch of tnodes later. + */ + + tnl = YMALLOC(sizeof(yaffs_TnodeList)); + if (!tnl) { + T(YAFFS_TRACE_ERROR, + (TSTR + ("yaffs: Could not add tnodes to management list" TENDSTR))); + return YAFFS_FAIL; + } else { + tnl->tnodes = newTnodes; + tnl->next = dev->allocatedTnodeList; + dev->allocatedTnodeList = tnl; + } + + T(YAFFS_TRACE_ALLOCATE, (TSTR("yaffs: Tnodes added" TENDSTR))); + + return YAFFS_OK; +} + +/* GetTnode gets us a clean tnode. Tries to make allocate more if we run out */ + +static yaffs_Tnode *yaffs_GetTnodeRaw(yaffs_Device *dev) +{ + yaffs_Tnode *tn = NULL; + +#ifdef CONFIG_YAFFS_VALGRIND_TEST + tn = YMALLOC(yaffs_CalcTnodeSize(dev)); + if(tn) + dev->nTnodesCreated++; +#else + /* If there are none left make more */ + if (!dev->freeTnodes) + yaffs_CreateTnodes(dev, YAFFS_ALLOCATION_NTNODES); + + if (dev->freeTnodes) { + tn = dev->freeTnodes; +#ifdef CONFIG_YAFFS_TNODE_LIST_DEBUG + if (tn->internal[YAFFS_NTNODES_INTERNAL] != (void *)1) { + /* Hoosterman, this thing looks like it isn't in the list */ + T(YAFFS_TRACE_ALWAYS, + (TSTR("yaffs: Tnode list bug 1" TENDSTR))); + } +#endif + dev->freeTnodes = dev->freeTnodes->internal[0]; + dev->nFreeTnodes--; + } +#endif + dev->nCheckpointBlocksRequired = 0; /* force recalculation*/ + + return tn; +} + +static yaffs_Tnode *yaffs_GetTnode(yaffs_Device *dev) +{ + yaffs_Tnode *tn = yaffs_GetTnodeRaw(dev); + int tnodeSize = yaffs_CalcTnodeSize(dev); + + if (tn) + memset(tn, 0, tnodeSize); + + return tn; +} + +/* FreeTnode frees up a tnode and puts it back on the free list */ +static void yaffs_FreeTnode(yaffs_Device *dev, yaffs_Tnode *tn) +{ + if (tn) { +#ifdef CONFIG_YAFFS_VALGRIND_TEST + YFREE(tn); + dev->nTnodesCreated--; +#else +#ifdef CONFIG_YAFFS_TNODE_LIST_DEBUG + if (tn->internal[YAFFS_NTNODES_INTERNAL] != 0) { + /* Hoosterman, this thing looks like it is already in the list */ + T(YAFFS_TRACE_ALWAYS, + (TSTR("yaffs: Tnode list bug 2" TENDSTR))); + } + tn->internal[YAFFS_NTNODES_INTERNAL] = (void *)1; +#endif + tn->internal[0] = dev->freeTnodes; + dev->freeTnodes = tn; + dev->nFreeTnodes++; +#endif + } + dev->nCheckpointBlocksRequired = 0; /* force recalculation*/ +} + +static void yaffs_DeinitialiseTnodes(yaffs_Device *dev) +{ + /* Free the list of allocated tnodes */ + yaffs_TnodeList *tmp; + + while (dev->allocatedTnodeList) { + tmp = dev->allocatedTnodeList->next; + + YFREE(dev->allocatedTnodeList->tnodes); + YFREE(dev->allocatedTnodeList); + dev->allocatedTnodeList = tmp; + + } + + dev->freeTnodes = NULL; + dev->nFreeTnodes = 0; + dev->nTnodesCreated = 0; +} + +static void yaffs_InitialiseTnodes(yaffs_Device *dev) +{ + dev->allocatedTnodeList = NULL; + dev->freeTnodes = NULL; + dev->nFreeTnodes = 0; + dev->nTnodesCreated = 0; +} + + +void yaffs_LoadLevel0Tnode(yaffs_Device *dev, yaffs_Tnode *tn, unsigned pos, + unsigned val) +{ + __u32 *map = (__u32 *)tn; + __u32 bitInMap; + __u32 bitInWord; + __u32 wordInMap; + __u32 mask; + + pos &= YAFFS_TNODES_LEVEL0_MASK; + val >>= dev->chunkGroupBits; + + bitInMap = pos * dev->tnodeWidth; + wordInMap = bitInMap / 32; + bitInWord = bitInMap & (32 - 1); + + mask = dev->tnodeMask << bitInWord; + + map[wordInMap] &= ~mask; + map[wordInMap] |= (mask & (val << bitInWord)); + + if (dev->tnodeWidth > (32 - bitInWord)) { + bitInWord = (32 - bitInWord); + wordInMap++;; + mask = dev->tnodeMask >> (/*dev->tnodeWidth -*/ bitInWord); + map[wordInMap] &= ~mask; + map[wordInMap] |= (mask & (val >> bitInWord)); + } +} + +static __u32 yaffs_GetChunkGroupBase(yaffs_Device *dev, yaffs_Tnode *tn, + unsigned pos) +{ + __u32 *map = (__u32 *)tn; + __u32 bitInMap; + __u32 bitInWord; + __u32 wordInMap; + __u32 val; + + pos &= YAFFS_TNODES_LEVEL0_MASK; + + bitInMap = pos * dev->tnodeWidth; + wordInMap = bitInMap / 32; + bitInWord = bitInMap & (32 - 1); + + val = map[wordInMap] >> bitInWord; + + if (dev->tnodeWidth > (32 - bitInWord)) { + bitInWord = (32 - bitInWord); + wordInMap++;; + val |= (map[wordInMap] << bitInWord); + } + + val &= dev->tnodeMask; + val <<= dev->chunkGroupBits; + + return val; +} + +/* ------------------- End of individual tnode manipulation -----------------*/ + +/* ---------Functions to manipulate the look-up tree (made up of tnodes) ------ + * The look up tree is represented by the top tnode and the number of topLevel + * in the tree. 0 means only the level 0 tnode is in the tree. + */ + +/* FindLevel0Tnode finds the level 0 tnode, if one exists. */ +static yaffs_Tnode *yaffs_FindLevel0Tnode(yaffs_Device *dev, + yaffs_FileStructure *fStruct, + __u32 chunkId) +{ + yaffs_Tnode *tn = fStruct->top; + __u32 i; + int requiredTallness; + int level = fStruct->topLevel; + + dev=dev; + + /* Check sane level and chunk Id */ + if (level < 0 || level > YAFFS_TNODES_MAX_LEVEL) + return NULL; + + if (chunkId > YAFFS_MAX_CHUNK_ID) + return NULL; + + /* First check we're tall enough (ie enough topLevel) */ + + i = chunkId >> YAFFS_TNODES_LEVEL0_BITS; + requiredTallness = 0; + while (i) { + i >>= YAFFS_TNODES_INTERNAL_BITS; + requiredTallness++; + } + + if (requiredTallness > fStruct->topLevel) + return NULL; /* Not tall enough, so we can't find it */ + + /* Traverse down to level 0 */ + while (level > 0 && tn) { + tn = tn->internal[(chunkId >> + (YAFFS_TNODES_LEVEL0_BITS + + (level - 1) * + YAFFS_TNODES_INTERNAL_BITS)) & + YAFFS_TNODES_INTERNAL_MASK]; + level--; + } + + return tn; +} + +/* AddOrFindLevel0Tnode finds the level 0 tnode if it exists, otherwise first expands the tree. + * This happens in two steps: + * 1. If the tree isn't tall enough, then make it taller. + * 2. Scan down the tree towards the level 0 tnode adding tnodes if required. + * + * Used when modifying the tree. + * + * If the tn argument is NULL, then a fresh tnode will be added otherwise the specified tn will + * be plugged into the ttree. + */ + +static yaffs_Tnode *yaffs_AddOrFindLevel0Tnode(yaffs_Device *dev, + yaffs_FileStructure *fStruct, + __u32 chunkId, + yaffs_Tnode *passedTn) +{ + int requiredTallness; + int i; + int l; + yaffs_Tnode *tn; + + __u32 x; + + + /* Check sane level and page Id */ + if (fStruct->topLevel < 0 || fStruct->topLevel > YAFFS_TNODES_MAX_LEVEL) + return NULL; + + if (chunkId > YAFFS_MAX_CHUNK_ID) + return NULL; + + /* First check we're tall enough (ie enough topLevel) */ + + x = chunkId >> YAFFS_TNODES_LEVEL0_BITS; + requiredTallness = 0; + while (x) { + x >>= YAFFS_TNODES_INTERNAL_BITS; + requiredTallness++; + } + + + if (requiredTallness > fStruct->topLevel) { + /* Not tall enough, gotta make the tree taller */ + for (i = fStruct->topLevel; i < requiredTallness; i++) { + + tn = yaffs_GetTnode(dev); + + if (tn) { + tn->internal[0] = fStruct->top; + fStruct->top = tn; + fStruct->topLevel++; + } else { + T(YAFFS_TRACE_ERROR, + (TSTR("yaffs: no more tnodes" TENDSTR))); + return NULL; + } + } + } + + /* Traverse down to level 0, adding anything we need */ + + l = fStruct->topLevel; + tn = fStruct->top; + + if (l > 0) { + while (l > 0 && tn) { + x = (chunkId >> + (YAFFS_TNODES_LEVEL0_BITS + + (l - 1) * YAFFS_TNODES_INTERNAL_BITS)) & + YAFFS_TNODES_INTERNAL_MASK; + + + if ((l > 1) && !tn->internal[x]) { + /* Add missing non-level-zero tnode */ + tn->internal[x] = yaffs_GetTnode(dev); + if(!tn->internal[x]) + return NULL; + } else if (l == 1) { + /* Looking from level 1 at level 0 */ + if (passedTn) { + /* If we already have one, then release it.*/ + if (tn->internal[x]) + yaffs_FreeTnode(dev, tn->internal[x]); + tn->internal[x] = passedTn; + + } else if (!tn->internal[x]) { + /* Don't have one, none passed in */ + tn->internal[x] = yaffs_GetTnode(dev); + if(!tn->internal[x]) + return NULL; + } + } + + tn = tn->internal[x]; + l--; + } + } else { + /* top is level 0 */ + if (passedTn) { + memcpy(tn, passedTn, (dev->tnodeWidth * YAFFS_NTNODES_LEVEL0)/8); + yaffs_FreeTnode(dev, passedTn); + } + } + + return tn; +} + +static int yaffs_FindChunkInGroup(yaffs_Device *dev, int theChunk, + yaffs_ExtendedTags *tags, int objectId, + int chunkInInode) +{ + int j; + + for (j = 0; theChunk && j < dev->chunkGroupSize; j++) { + if (yaffs_CheckChunkBit(dev, theChunk / dev->param.nChunksPerBlock, + theChunk % dev->param.nChunksPerBlock)) { + + if(dev->chunkGroupSize == 1) + return theChunk; + else { + yaffs_ReadChunkWithTagsFromNAND(dev, theChunk, NULL, + tags); + if (yaffs_TagsMatch(tags, objectId, chunkInInode)) { + /* found it; */ + return theChunk; + } + } + } + theChunk++; + } + return -1; +} + +#if 0 +/* Experimental code not being used yet. Might speed up file deletion */ +/* DeleteWorker scans backwards through the tnode tree and deletes all the + * chunks and tnodes in the file. + * Returns 1 if the tree was deleted. + * Returns 0 if it stopped early due to hitting the limit and the delete is incomplete. + */ + +static int yaffs_DeleteWorker(yaffs_Object *in, yaffs_Tnode *tn, __u32 level, + int chunkOffset, int *limit) +{ + int i; + int chunkInInode; + int theChunk; + yaffs_ExtendedTags tags; + int foundChunk; + yaffs_Device *dev = in->myDev; + + int allDone = 1; + + if (tn) { + if (level > 0) { + for (i = YAFFS_NTNODES_INTERNAL - 1; allDone && i >= 0; + i--) { + if (tn->internal[i]) { + if (limit && (*limit) < 0) { + allDone = 0; + } else { + allDone = + yaffs_DeleteWorker(in, + tn-> + internal + [i], + level - + 1, + (chunkOffset + << + YAFFS_TNODES_INTERNAL_BITS) + + i, + limit); + } + if (allDone) { + yaffs_FreeTnode(dev, + tn-> + internal[i]); + tn->internal[i] = NULL; + } + } + } + return (allDone) ? 1 : 0; + } else if (level == 0) { + int hitLimit = 0; + + for (i = YAFFS_NTNODES_LEVEL0 - 1; i >= 0 && !hitLimit; + i--) { + theChunk = yaffs_GetChunkGroupBase(dev, tn, i); + if (theChunk) { + + chunkInInode = (chunkOffset << + YAFFS_TNODES_LEVEL0_BITS) + i; + + foundChunk = + yaffs_FindChunkInGroup(dev, + theChunk, + &tags, + in->objectId, + chunkInInode); + + if (foundChunk > 0) { + yaffs_DeleteChunk(dev, + foundChunk, 1, + __LINE__); + in->nDataChunks--; + if (limit) { + *limit = *limit - 1; + if (*limit <= 0) + hitLimit = 1; + } + + } + + yaffs_LoadLevel0Tnode(dev, tn, i, 0); + } + + } + return (i < 0) ? 1 : 0; + + } + + } + + return 1; + +} + +#endif + +static void yaffs_SoftDeleteChunk(yaffs_Device *dev, int chunk) +{ + yaffs_BlockInfo *theBlock; + unsigned blockNo; + + T(YAFFS_TRACE_DELETION, (TSTR("soft delete chunk %d" TENDSTR), chunk)); + + blockNo = chunk / dev->param.nChunksPerBlock; + theBlock = yaffs_GetBlockInfo(dev, blockNo); + if (theBlock) { + theBlock->softDeletions++; + dev->nFreeChunks++; + yaffs_UpdateOldestDirtySequence(dev, blockNo, theBlock); + } +} + +/* SoftDeleteWorker scans backwards through the tnode tree and soft deletes all the chunks in the file. + * All soft deleting does is increment the block's softdelete count and pulls the chunk out + * of the tnode. + * Thus, essentially this is the same as DeleteWorker except that the chunks are soft deleted. + */ + +static int yaffs_SoftDeleteWorker(yaffs_Object *in, yaffs_Tnode *tn, + __u32 level, int chunkOffset) +{ + int i; + int theChunk; + int allDone = 1; + yaffs_Device *dev = in->myDev; + + if (tn) { + if (level > 0) { + + for (i = YAFFS_NTNODES_INTERNAL - 1; allDone && i >= 0; + i--) { + if (tn->internal[i]) { + allDone = + yaffs_SoftDeleteWorker(in, + tn-> + internal[i], + level - 1, + (chunkOffset + << + YAFFS_TNODES_INTERNAL_BITS) + + i); + if (allDone) { + yaffs_FreeTnode(dev, + tn-> + internal[i]); + tn->internal[i] = NULL; + } else { + /* Hoosterman... how could this happen? */ + } + } + } + return (allDone) ? 1 : 0; + } else if (level == 0) { + + for (i = YAFFS_NTNODES_LEVEL0 - 1; i >= 0; i--) { + theChunk = yaffs_GetChunkGroupBase(dev, tn, i); + if (theChunk) { + /* Note this does not find the real chunk, only the chunk group. + * We make an assumption that a chunk group is not larger than + * a block. + */ + yaffs_SoftDeleteChunk(dev, theChunk); + yaffs_LoadLevel0Tnode(dev, tn, i, 0); + } + + } + return 1; + + } + + } + + return 1; + +} + +static void yaffs_SoftDeleteFile(yaffs_Object *obj) +{ + if (obj->deleted && + obj->variantType == YAFFS_OBJECT_TYPE_FILE && !obj->softDeleted) { + if (obj->nDataChunks <= 0) { + /* Empty file with no duplicate object headers, just delete it immediately */ + yaffs_FreeTnode(obj->myDev, + obj->variant.fileVariant.top); + obj->variant.fileVariant.top = NULL; + T(YAFFS_TRACE_TRACING, + (TSTR("yaffs: Deleting empty file %d" TENDSTR), + obj->objectId)); + yaffs_DoGenericObjectDeletion(obj); + } else { + yaffs_SoftDeleteWorker(obj, + obj->variant.fileVariant.top, + obj->variant.fileVariant. + topLevel, 0); + obj->softDeleted = 1; + } + } +} + +/* Pruning removes any part of the file structure tree that is beyond the + * bounds of the file (ie that does not point to chunks). + * + * A file should only get pruned when its size is reduced. + * + * Before pruning, the chunks must be pulled from the tree and the + * level 0 tnode entries must be zeroed out. + * Could also use this for file deletion, but that's probably better handled + * by a special case. + * + * This function is recursive. For levels > 0 the function is called again on + * any sub-tree. For level == 0 we just check if the sub-tree has data. + * If there is no data in a subtree then it is pruned. + */ + +static yaffs_Tnode *yaffs_PruneWorker(yaffs_Device *dev, yaffs_Tnode *tn, + __u32 level, int del0) +{ + int i; + int hasData; + + if (tn) { + hasData = 0; + + if(level > 0){ + for (i = 0; i < YAFFS_NTNODES_INTERNAL; i++) { + if (tn->internal[i]) { + tn->internal[i] = + yaffs_PruneWorker(dev, tn->internal[i], + level - 1, + (i == 0) ? del0 : 1); + } + + if (tn->internal[i]) + hasData++; + } + } else { + int tnodeSize_u32 = yaffs_CalcTnodeSize(dev)/sizeof(__u32); + __u32 *map = (__u32 *)tn; + + for(i = 0; !hasData && i < tnodeSize_u32; i++){ + if(map[i]) + hasData++; + } + } + + if (hasData == 0 && del0) { + /* Free and return NULL */ + + yaffs_FreeTnode(dev, tn); + tn = NULL; + } + + } + + return tn; + +} + +static int yaffs_PruneFileStructure(yaffs_Device *dev, + yaffs_FileStructure *fStruct) +{ + int i; + int hasData; + int done = 0; + yaffs_Tnode *tn; + + if (fStruct->topLevel > 0) { + fStruct->top = + yaffs_PruneWorker(dev, fStruct->top, fStruct->topLevel, 0); + + /* Now we have a tree with all the non-zero branches NULL but the height + * is the same as it was. + * Let's see if we can trim internal tnodes to shorten the tree. + * We can do this if only the 0th element in the tnode is in use + * (ie all the non-zero are NULL) + */ + + while (fStruct->topLevel && !done) { + tn = fStruct->top; + + hasData = 0; + for (i = 1; i < YAFFS_NTNODES_INTERNAL; i++) { + if (tn->internal[i]) + hasData++; + } + + if (!hasData) { + fStruct->top = tn->internal[0]; + fStruct->topLevel--; + yaffs_FreeTnode(dev, tn); + } else { + done = 1; + } + } + } + + return YAFFS_OK; +} + +/*-------------------- End of File Structure functions.-------------------*/ + +/* yaffs_CreateFreeObjects creates a bunch more objects and + * adds them to the object free list. + */ +static int yaffs_CreateFreeObjects(yaffs_Device *dev, int nObjects) +{ + int i; + yaffs_Object *newObjects; + yaffs_ObjectList *list; + + if (nObjects < 1) + return YAFFS_OK; + + /* make these things */ + newObjects = YMALLOC(nObjects * sizeof(yaffs_Object)); + list = YMALLOC(sizeof(yaffs_ObjectList)); + + if (!newObjects || !list) { + if (newObjects){ + YFREE(newObjects); + newObjects = NULL; + } + if (list){ + YFREE(list); + list = NULL; + } + T(YAFFS_TRACE_ALLOCATE, + (TSTR("yaffs: Could not allocate more objects" TENDSTR))); + return YAFFS_FAIL; + } + + /* Hook them into the free list */ + for (i = 0; i < nObjects - 1; i++) { + newObjects[i].siblings.next = + (struct ylist_head *)(&newObjects[i + 1]); + } + + newObjects[nObjects - 1].siblings.next = (void *)dev->freeObjects; + dev->freeObjects = newObjects; + dev->nFreeObjects += nObjects; + dev->nObjectsCreated += nObjects; + + /* Now add this bunch of Objects to a list for freeing up. */ + + list->objects = newObjects; + list->next = dev->allocatedObjectList; + dev->allocatedObjectList = list; + + return YAFFS_OK; +} + + +/* AllocateEmptyObject gets us a clean Object. Tries to make allocate more if we run out */ +static yaffs_Object *yaffs_AllocateEmptyObject(yaffs_Device *dev) +{ + yaffs_Object *tn = NULL; + +#ifdef CONFIG_YAFFS_VALGRIND_TEST + tn = YMALLOC(sizeof(yaffs_Object)); + if(tn) + dev->nObjectsCreated++; +#else + /* If there are none left make more */ + if (!dev->freeObjects) + yaffs_CreateFreeObjects(dev, YAFFS_ALLOCATION_NOBJECTS); + + if (dev->freeObjects) { + tn = dev->freeObjects; + dev->freeObjects = + (yaffs_Object *) (dev->freeObjects->siblings.next); + dev->nFreeObjects--; + } +#endif + if (tn) { + /* Now sweeten it up... */ + + memset(tn, 0, sizeof(yaffs_Object)); + tn->beingCreated = 1; + + tn->myDev = dev; + tn->hdrChunk = 0; + tn->variantType = YAFFS_OBJECT_TYPE_UNKNOWN; + YINIT_LIST_HEAD(&(tn->hardLinks)); + YINIT_LIST_HEAD(&(tn->hashLink)); + YINIT_LIST_HEAD(&tn->siblings); + + + /* Now make the directory sane */ + if (dev->rootDir) { + tn->parent = dev->rootDir; + ylist_add(&(tn->siblings), &dev->rootDir->variant.directoryVariant.children); + } + + /* Add it to the lost and found directory. + * NB Can't put root or lostNFound in lostNFound so + * check if lostNFound exists first + */ + if (dev->lostNFoundDir) + yaffs_AddObjectToDirectory(dev->lostNFoundDir, tn); + + tn->beingCreated = 0; + } + + dev->nCheckpointBlocksRequired = 0; /* force recalculation*/ + + return tn; +} + +static yaffs_Object *yaffs_CreateFakeDirectory(yaffs_Device *dev, int number, + __u32 mode) +{ + + yaffs_Object *obj = + yaffs_CreateNewObject(dev, number, YAFFS_OBJECT_TYPE_DIRECTORY); + if (obj) { + obj->fake = 1; /* it is fake so it might have no NAND presence... */ + obj->renameAllowed = 0; /* ... and we're not allowed to rename it... */ + obj->unlinkAllowed = 0; /* ... or unlink it */ + obj->deleted = 0; + obj->unlinked = 0; + obj->yst_mode = mode; + obj->myDev = dev; + obj->hdrChunk = 0; /* Not a valid chunk. */ + } + + return obj; + +} + +static void yaffs_UnhashObject(yaffs_Object *tn) +{ + int bucket; + yaffs_Device *dev = tn->myDev; + + /* If it is still linked into the bucket list, free from the list */ + if (!ylist_empty(&tn->hashLink)) { + ylist_del_init(&tn->hashLink); + bucket = yaffs_HashFunction(tn->objectId); + dev->objectBucket[bucket].count--; + } +} + +/* FreeObject frees up a Object and puts it back on the free list */ +static void yaffs_FreeObject(yaffs_Object *tn) +{ + yaffs_Device *dev = tn->myDev; + + T(YAFFS_TRACE_OS, (TSTR("FreeObject %p inode %p"TENDSTR), tn, tn->myInode)); + + if (!tn) + YBUG(); + if (tn->parent) + YBUG(); + if (!ylist_empty(&tn->siblings)) + YBUG(); + + + if (tn->myInode) { + /* We're still hooked up to a cached inode. + * Don't delete now, but mark for later deletion + */ + tn->deferedFree = 1; + return; + } + + yaffs_UnhashObject(tn); + +#ifdef CONFIG_YAFFS_VALGRIND_TEST + YFREE(tn); + dev->nObjectsCreated--; + tn = NULL; +#else + /* Link into the free list. */ + tn->siblings.next = (struct ylist_head *)(dev->freeObjects); + dev->freeObjects = tn; + dev->nFreeObjects++; +#endif + dev->nCheckpointBlocksRequired = 0; /* force recalculation*/ +} + + +void yaffs_HandleDeferedFree(yaffs_Object *obj) +{ + if (obj->deferedFree) + yaffs_FreeObject(obj); +} + + +static void yaffs_DeinitialiseObjects(yaffs_Device *dev) +{ + /* Free the list of allocated Objects */ + + yaffs_ObjectList *tmp; + + while (dev->allocatedObjectList) { + tmp = dev->allocatedObjectList->next; + YFREE(dev->allocatedObjectList->objects); + YFREE(dev->allocatedObjectList); + + dev->allocatedObjectList = tmp; + } + + dev->freeObjects = NULL; + dev->nFreeObjects = 0; + dev->nObjectsCreated = 0; +} + +static void yaffs_InitialiseObjects(yaffs_Device *dev) +{ + int i; + + dev->allocatedObjectList = NULL; + dev->freeObjects = NULL; + dev->nFreeObjects = 0; + + for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) { + YINIT_LIST_HEAD(&dev->objectBucket[i].list); + dev->objectBucket[i].count = 0; + } +} + +static int yaffs_FindNiceObjectBucket(yaffs_Device *dev) +{ + int i; + int l = 999; + int lowest = 999999; + + + /* Search for the shortest list or one that + * isn't too long. + */ + + for (i = 0; i < 10 && lowest > 4; i++) { + dev->bucketFinder++; + dev->bucketFinder %= YAFFS_NOBJECT_BUCKETS; + if (dev->objectBucket[dev->bucketFinder].count < lowest) { + lowest = dev->objectBucket[dev->bucketFinder].count; + l = dev->bucketFinder; + } + + } + + return l; +} + +static int yaffs_CreateNewObjectNumber(yaffs_Device *dev) +{ + int bucket = yaffs_FindNiceObjectBucket(dev); + + /* Now find an object value that has not already been taken + * by scanning the list. + */ + + int found = 0; + struct ylist_head *i; + + __u32 n = (__u32) bucket; + + /* yaffs_CheckObjectHashSanity(); */ + + while (!found) { + found = 1; + n += YAFFS_NOBJECT_BUCKETS; + if (1 || dev->objectBucket[bucket].count > 0) { + ylist_for_each(i, &dev->objectBucket[bucket].list) { + /* If there is already one in the list */ + if (i && ylist_entry(i, yaffs_Object, + hashLink)->objectId == n) { + found = 0; + } + } + } + } + + return n; +} + +static void yaffs_HashObject(yaffs_Object *in) +{ + int bucket = yaffs_HashFunction(in->objectId); + yaffs_Device *dev = in->myDev; + + ylist_add(&in->hashLink, &dev->objectBucket[bucket].list); + dev->objectBucket[bucket].count++; +} + +yaffs_Object *yaffs_FindObjectByNumber(yaffs_Device *dev, __u32 number) +{ + int bucket = yaffs_HashFunction(number); + struct ylist_head *i; + yaffs_Object *in; + + ylist_for_each(i, &dev->objectBucket[bucket].list) { + /* Look if it is in the list */ + if (i) { + in = ylist_entry(i, yaffs_Object, hashLink); + if (in->objectId == number) { + + /* Don't tell the VFS about this one if it is defered free */ + if (in->deferedFree) + return NULL; + + return in; + } + } + } + + return NULL; +} + +yaffs_Object *yaffs_CreateNewObject(yaffs_Device *dev, int number, + yaffs_ObjectType type) +{ + yaffs_Object *theObject=NULL; + yaffs_Tnode *tn = NULL; + + if (number < 0) + number = yaffs_CreateNewObjectNumber(dev); + + if (type == YAFFS_OBJECT_TYPE_FILE) { + tn = yaffs_GetTnode(dev); + if (!tn) + return NULL; + } + + theObject = yaffs_AllocateEmptyObject(dev); + if (!theObject){ + if(tn) + yaffs_FreeTnode(dev,tn); + return NULL; + } + + + if (theObject) { + theObject->fake = 0; + theObject->renameAllowed = 1; + theObject->unlinkAllowed = 1; + theObject->objectId = number; + yaffs_HashObject(theObject); + theObject->variantType = type; +#ifdef CONFIG_YAFFS_WINCE + yfsd_WinFileTimeNow(theObject->win_atime); + theObject->win_ctime[0] = theObject->win_mtime[0] = + theObject->win_atime[0]; + theObject->win_ctime[1] = theObject->win_mtime[1] = + theObject->win_atime[1]; + +#else + + theObject->yst_atime = theObject->yst_mtime = + theObject->yst_ctime = Y_CURRENT_TIME; +#endif + switch (type) { + case YAFFS_OBJECT_TYPE_FILE: + theObject->variant.fileVariant.fileSize = 0; + theObject->variant.fileVariant.scannedFileSize = 0; + theObject->variant.fileVariant.shrinkSize = 0xFFFFFFFF; /* max __u32 */ + theObject->variant.fileVariant.topLevel = 0; + theObject->variant.fileVariant.top = tn; + break; + case YAFFS_OBJECT_TYPE_DIRECTORY: + YINIT_LIST_HEAD(&theObject->variant.directoryVariant. + children); + YINIT_LIST_HEAD(&theObject->variant.directoryVariant. + dirty); + break; + case YAFFS_OBJECT_TYPE_SYMLINK: + case YAFFS_OBJECT_TYPE_HARDLINK: + case YAFFS_OBJECT_TYPE_SPECIAL: + /* No action required */ + break; + case YAFFS_OBJECT_TYPE_UNKNOWN: + /* todo this should not happen */ + break; + } + } + + return theObject; +} + +static yaffs_Object *yaffs_FindOrCreateObjectByNumber(yaffs_Device *dev, + int number, + yaffs_ObjectType type) +{ + yaffs_Object *theObject = NULL; + + if (number > 0) + theObject = yaffs_FindObjectByNumber(dev, number); + + if (!theObject) + theObject = yaffs_CreateNewObject(dev, number, type); + + return theObject; + +} + + +static YCHAR *yaffs_CloneString(const YCHAR *str) +{ + YCHAR *newStr = NULL; + int len; + + if (!str) + str = _Y(""); + + len = yaffs_strnlen(str,YAFFS_MAX_ALIAS_LENGTH); + newStr = YMALLOC((len + 1) * sizeof(YCHAR)); + if (newStr){ + yaffs_strncpy(newStr, str,len); + newStr[len] = 0; + } + return newStr; + +} + +/* + * Mknod (create) a new object. + * equivalentObject only has meaning for a hard link; + * aliasString only has meaning for a symlink. + * rdev only has meaning for devices (a subset of special objects) + */ + +static yaffs_Object *yaffs_MknodObject(yaffs_ObjectType type, + yaffs_Object *parent, + const YCHAR *name, + __u32 mode, + __u32 uid, + __u32 gid, + yaffs_Object *equivalentObject, + const YCHAR *aliasString, __u32 rdev) +{ + yaffs_Object *in; + YCHAR *str = NULL; + + yaffs_Device *dev = parent->myDev; + + /* Check if the entry exists. If it does then fail the call since we don't want a dup.*/ + if (yaffs_FindObjectByName(parent, name)) + return NULL; + + if (type == YAFFS_OBJECT_TYPE_SYMLINK) { + str = yaffs_CloneString(aliasString); + if (!str) + return NULL; + } + + in = yaffs_CreateNewObject(dev, -1, type); + + if (!in){ + if(str) + YFREE(str); + return NULL; + } + + + + + + if (in) { + in->hdrChunk = 0; + in->valid = 1; + in->variantType = type; + + in->yst_mode = mode; + +#ifdef CONFIG_YAFFS_WINCE + yfsd_WinFileTimeNow(in->win_atime); + in->win_ctime[0] = in->win_mtime[0] = in->win_atime[0]; + in->win_ctime[1] = in->win_mtime[1] = in->win_atime[1]; + +#else + in->yst_atime = in->yst_mtime = in->yst_ctime = Y_CURRENT_TIME; + + in->yst_rdev = rdev; + in->yst_uid = uid; + in->yst_gid = gid; +#endif + in->nDataChunks = 0; + + yaffs_SetObjectName(in, name); + in->dirty = 1; + + yaffs_AddObjectToDirectory(parent, in); + + in->myDev = parent->myDev; + + switch (type) { + case YAFFS_OBJECT_TYPE_SYMLINK: + in->variant.symLinkVariant.alias = str; + break; + case YAFFS_OBJECT_TYPE_HARDLINK: + in->variant.hardLinkVariant.equivalentObject = + equivalentObject; + in->variant.hardLinkVariant.equivalentObjectId = + equivalentObject->objectId; + ylist_add(&in->hardLinks, &equivalentObject->hardLinks); + break; + case YAFFS_OBJECT_TYPE_FILE: + case YAFFS_OBJECT_TYPE_DIRECTORY: + case YAFFS_OBJECT_TYPE_SPECIAL: + case YAFFS_OBJECT_TYPE_UNKNOWN: + /* do nothing */ + break; + } + + if (yaffs_UpdateObjectHeader(in, name, 0, 0, 0) < 0) { + /* Could not create the object header, fail the creation */ + yaffs_DeleteObject(in); + in = NULL; + } + + yaffs_UpdateParent(parent); + } + + return in; +} + +yaffs_Object *yaffs_MknodFile(yaffs_Object *parent, const YCHAR *name, + __u32 mode, __u32 uid, __u32 gid) +{ + return yaffs_MknodObject(YAFFS_OBJECT_TYPE_FILE, parent, name, mode, + uid, gid, NULL, NULL, 0); +} + +yaffs_Object *yaffs_MknodDirectory(yaffs_Object *parent, const YCHAR *name, + __u32 mode, __u32 uid, __u32 gid) +{ + return yaffs_MknodObject(YAFFS_OBJECT_TYPE_DIRECTORY, parent, name, + mode, uid, gid, NULL, NULL, 0); +} + +yaffs_Object *yaffs_MknodSpecial(yaffs_Object *parent, const YCHAR *name, + __u32 mode, __u32 uid, __u32 gid, __u32 rdev) +{ + return yaffs_MknodObject(YAFFS_OBJECT_TYPE_SPECIAL, parent, name, mode, + uid, gid, NULL, NULL, rdev); +} + +yaffs_Object *yaffs_MknodSymLink(yaffs_Object *parent, const YCHAR *name, + __u32 mode, __u32 uid, __u32 gid, + const YCHAR *alias) +{ + return yaffs_MknodObject(YAFFS_OBJECT_TYPE_SYMLINK, parent, name, mode, + uid, gid, NULL, alias, 0); +} + +/* yaffs_Link returns the object id of the equivalent object.*/ +yaffs_Object *yaffs_Link(yaffs_Object *parent, const YCHAR *name, + yaffs_Object *equivalentObject) +{ + /* Get the real object in case we were fed a hard link as an equivalent object */ + equivalentObject = yaffs_GetEquivalentObject(equivalentObject); + + if (yaffs_MknodObject + (YAFFS_OBJECT_TYPE_HARDLINK, parent, name, 0, 0, 0, + equivalentObject, NULL, 0)) { + return equivalentObject; + } else { + return NULL; + } + +} + +static int yaffs_ChangeObjectName(yaffs_Object *obj, yaffs_Object *newDir, + const YCHAR *newName, int force, int shadows) +{ + int unlinkOp; + int deleteOp; + + yaffs_Object *existingTarget; + + if (newDir == NULL) + newDir = obj->parent; /* use the old directory */ + + if (newDir->variantType != YAFFS_OBJECT_TYPE_DIRECTORY) { + T(YAFFS_TRACE_ALWAYS, + (TSTR + ("tragedy: yaffs_ChangeObjectName: newDir is not a directory" + TENDSTR))); + YBUG(); + } + + /* TODO: Do we need this different handling for YAFFS2 and YAFFS1?? */ + if (obj->myDev->param.isYaffs2) + unlinkOp = (newDir == obj->myDev->unlinkedDir); + else + unlinkOp = (newDir == obj->myDev->unlinkedDir + && obj->variantType == YAFFS_OBJECT_TYPE_FILE); + + deleteOp = (newDir == obj->myDev->deletedDir); + + existingTarget = yaffs_FindObjectByName(newDir, newName); + + /* If the object is a file going into the unlinked directory, + * then it is OK to just stuff it in since duplicate names are allowed. + * else only proceed if the new name does not exist and if we're putting + * it into a directory. + */ + if ((unlinkOp || + deleteOp || + force || + (shadows > 0) || + !existingTarget) && + newDir->variantType == YAFFS_OBJECT_TYPE_DIRECTORY) { + yaffs_SetObjectName(obj, newName); + obj->dirty = 1; + + yaffs_AddObjectToDirectory(newDir, obj); + + if (unlinkOp) + obj->unlinked = 1; + + /* If it is a deletion then we mark it as a shrink for gc purposes. */ + if (yaffs_UpdateObjectHeader(obj, newName, 0, deleteOp, shadows) >= 0) + return YAFFS_OK; + } + + return YAFFS_FAIL; +} + +int yaffs_RenameObject(yaffs_Object *oldDir, const YCHAR *oldName, + yaffs_Object *newDir, const YCHAR *newName) +{ + yaffs_Object *obj = NULL; + yaffs_Object *existingTarget = NULL; + int force = 0; + int result; + yaffs_Device *dev; + + + if (!oldDir || oldDir->variantType != YAFFS_OBJECT_TYPE_DIRECTORY) + YBUG(); + if (!newDir || newDir->variantType != YAFFS_OBJECT_TYPE_DIRECTORY) + YBUG(); + + dev = oldDir->myDev; + +#ifdef CONFIG_YAFFS_CASE_INSENSITIVE + /* Special case for case insemsitive systems (eg. WinCE). + * While look-up is case insensitive, the name isn't. + * Therefore we might want to change x.txt to X.txt + */ + if (oldDir == newDir && yaffs_strcmp(oldName, newName) == 0) + force = 1; +#endif + + if(yaffs_strnlen(newName,YAFFS_MAX_NAME_LENGTH+1) > YAFFS_MAX_NAME_LENGTH) + /* ENAMETOOLONG */ + return YAFFS_FAIL; + + obj = yaffs_FindObjectByName(oldDir, oldName); + + if (obj && obj->renameAllowed) { + + /* Now do the handling for an existing target, if there is one */ + + existingTarget = yaffs_FindObjectByName(newDir, newName); + if (existingTarget && + existingTarget->variantType == YAFFS_OBJECT_TYPE_DIRECTORY && + !ylist_empty(&existingTarget->variant.directoryVariant.children)) { + /* There is a target that is a non-empty directory, so we fail */ + return YAFFS_FAIL; /* EEXIST or ENOTEMPTY */ + } else if (existingTarget && existingTarget != obj) { + /* Nuke the target first, using shadowing, + * but only if it isn't the same object. + * + * Note we must disable gc otherwise it can mess up the shadowing. + * + */ + dev->gcDisable=1; + yaffs_ChangeObjectName(obj, newDir, newName, force, + existingTarget->objectId); + existingTarget->isShadowed = 1; + yaffs_UnlinkObject(existingTarget); + dev->gcDisable=0; + } + + result = yaffs_ChangeObjectName(obj, newDir, newName, 1, 0); + + yaffs_UpdateParent(oldDir); + if(newDir != oldDir) + yaffs_UpdateParent(newDir); + + return result; + } + return YAFFS_FAIL; +} + +/*------------------------- Block Management and Page Allocation ----------------*/ + +static int yaffs_InitialiseBlocks(yaffs_Device *dev) +{ + int nBlocks = dev->internalEndBlock - dev->internalStartBlock + 1; + + dev->blockInfo = NULL; + dev->chunkBits = NULL; + + dev->allocationBlock = -1; /* force it to get a new one */ + + /* If the first allocation strategy fails, thry the alternate one */ + dev->blockInfo = YMALLOC(nBlocks * sizeof(yaffs_BlockInfo)); + if (!dev->blockInfo) { + dev->blockInfo = YMALLOC_ALT(nBlocks * sizeof(yaffs_BlockInfo)); + dev->blockInfoAlt = 1; + } else + dev->blockInfoAlt = 0; + + if (dev->blockInfo) { + /* Set up dynamic blockinfo stuff. */ + dev->chunkBitmapStride = (dev->param.nChunksPerBlock + 7) / 8; /* round up bytes */ + dev->chunkBits = YMALLOC(dev->chunkBitmapStride * nBlocks); + if (!dev->chunkBits) { + dev->chunkBits = YMALLOC_ALT(dev->chunkBitmapStride * nBlocks); + dev->chunkBitsAlt = 1; + } else + dev->chunkBitsAlt = 0; + } + + if (dev->blockInfo && dev->chunkBits) { + memset(dev->blockInfo, 0, nBlocks * sizeof(yaffs_BlockInfo)); + memset(dev->chunkBits, 0, dev->chunkBitmapStride * nBlocks); + return YAFFS_OK; + } + + return YAFFS_FAIL; +} + +static void yaffs_DeinitialiseBlocks(yaffs_Device *dev) +{ + if (dev->blockInfoAlt && dev->blockInfo) + YFREE_ALT(dev->blockInfo); + else if (dev->blockInfo) + YFREE(dev->blockInfo); + + dev->blockInfoAlt = 0; + + dev->blockInfo = NULL; + + if (dev->chunkBitsAlt && dev->chunkBits) + YFREE_ALT(dev->chunkBits); + else if (dev->chunkBits) + YFREE(dev->chunkBits); + dev->chunkBitsAlt = 0; + dev->chunkBits = NULL; +} + +static int yaffs_BlockNotDisqualifiedFromGC(yaffs_Device *dev, + yaffs_BlockInfo *bi) +{ + + if (!dev->param.isYaffs2) + return 1; /* disqualification only applies to yaffs2. */ + + if (!bi->hasShrinkHeader) + return 1; /* can gc */ + + yaffs_FindOldestDirtySequence(dev); + + /* Can't do gc of this block if there are any blocks older than this one that have + * discarded pages. + */ + return (bi->sequenceNumber <= dev->oldestDirtySequence); +} + +/* + * yaffs_FindRefreshBlock() + * periodically finds the oldest full block by sequence number for refreshing. + * Only for yaffs2. + */ +static __u32 yaffs_FindRefreshBlock(yaffs_Device *dev) +{ + __u32 b ; + + __u32 oldest = 0; + __u32 oldestSequence = 0; + + yaffs_BlockInfo *bi; + + /* + * If refresh period < 10 then refreshing is disabled. + */ + if(dev->param.refreshPeriod < 10 || + !dev->param.isYaffs2) + return oldest; + + /* + * Fix broken values. + */ + if(dev->refreshSkip > dev->param.refreshPeriod) + dev->refreshSkip = dev->param.refreshPeriod; + + if(dev->refreshSkip > 0) + return oldest; + + /* + * Refresh skip is now zero. + * We'll do a refresh this time around.... + * Update the refresh skip and find the oldest block. + */ + dev->refreshSkip = dev->param.refreshPeriod; + dev->refreshCount++; + bi = dev->blockInfo; + for (b = dev->internalStartBlock; b <=dev->internalEndBlock; b++){ + + if (bi->blockState == YAFFS_BLOCK_STATE_FULL){ + + if(oldest < 1 || + bi->sequenceNumber < oldestSequence){ + oldest = b; + oldestSequence = bi->sequenceNumber; + } + } + bi++; + } + + if (oldest > 0) { + T(YAFFS_TRACE_GC, + (TSTR("GC refresh count %d selected block %d with sequenceNumber %d" TENDSTR), + dev->refreshCount, oldest, oldestSequence)); + } + + return oldest; +} + + +static void yaffs_BlockBecameDirty(yaffs_Device *dev, int blockNo) +{ + yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, blockNo); + + int erasedOk = 0; + + /* If the block is still healthy erase it and mark as clean. + * If the block has had a data failure, then retire it. + */ + + T(YAFFS_TRACE_GC | YAFFS_TRACE_ERASE, + (TSTR("yaffs_BlockBecameDirty block %d state %d %s"TENDSTR), + blockNo, bi->blockState, (bi->needsRetiring) ? "needs retiring" : "")); + + yaffs_ClearOldestDirtySequence(dev,bi); + + bi->blockState = YAFFS_BLOCK_STATE_DIRTY; + + /* If this is the block being garbage collected then stop gc'ing this block */ + if(blockNo == dev->gcBlock) + dev->gcBlock = 0; + + /* If this block is currently the best candidate for gc then drop as a candidate */ + if(blockNo == dev->gcDirtiest){ + dev->gcDirtiest = 0; + dev->gcPagesInUse = 0; + } + + if (!bi->needsRetiring) { + yaffs_InvalidateCheckpoint(dev); + erasedOk = yaffs_EraseBlockInNAND(dev, blockNo); + if (!erasedOk) { + dev->nErasureFailures++; + T(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS, + (TSTR("**>> Erasure failed %d" TENDSTR), blockNo)); + } + } + + if (erasedOk && + ((yaffs_traceMask & YAFFS_TRACE_ERASE) || !yaffs_SkipVerification(dev))) { + int i; + for (i = 0; i < dev->param.nChunksPerBlock; i++) { + if (!yaffs_CheckChunkErased + (dev, blockNo * dev->param.nChunksPerBlock + i)) { + T(YAFFS_TRACE_ERROR, + (TSTR + (">>Block %d erasure supposedly OK, but chunk %d not erased" + TENDSTR), blockNo, i)); + } + } + } + + if (erasedOk) { + /* Clean it up... */ + bi->blockState = YAFFS_BLOCK_STATE_EMPTY; + bi->sequenceNumber = 0; + dev->nErasedBlocks++; + bi->pagesInUse = 0; + bi->softDeletions = 0; + bi->hasShrinkHeader = 0; + bi->skipErasedCheck = 1; /* This is clean, so no need to check */ + bi->gcPrioritise = 0; + yaffs_ClearChunkBits(dev, blockNo); + + T(YAFFS_TRACE_ERASE, + (TSTR("Erased block %d" TENDSTR), blockNo)); + } else { + dev->nFreeChunks -= dev->param.nChunksPerBlock; /* We lost a block of free space */ + + yaffs_RetireBlock(dev, blockNo); + T(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS, + (TSTR("**>> Block %d retired" TENDSTR), blockNo)); + } +} + +static int yaffs_FindBlockForAllocation(yaffs_Device *dev) +{ + int i; + + yaffs_BlockInfo *bi; + + if (dev->nErasedBlocks < 1) { + /* Hoosterman we've got a problem. + * Can't get space to gc + */ + T(YAFFS_TRACE_ERROR, + (TSTR("yaffs tragedy: no more erased blocks" TENDSTR))); + + return -1; + } + + /* Find an empty block. */ + + for (i = dev->internalStartBlock; i <= dev->internalEndBlock; i++) { + dev->allocationBlockFinder++; + if (dev->allocationBlockFinder < dev->internalStartBlock + || dev->allocationBlockFinder > dev->internalEndBlock) { + dev->allocationBlockFinder = dev->internalStartBlock; + } + + bi = yaffs_GetBlockInfo(dev, dev->allocationBlockFinder); + + if (bi->blockState == YAFFS_BLOCK_STATE_EMPTY) { + bi->blockState = YAFFS_BLOCK_STATE_ALLOCATING; + dev->sequenceNumber++; + bi->sequenceNumber = dev->sequenceNumber; + dev->nErasedBlocks--; + T(YAFFS_TRACE_ALLOCATE, + (TSTR("Allocated block %d, seq %d, %d left" TENDSTR), + dev->allocationBlockFinder, dev->sequenceNumber, + dev->nErasedBlocks)); + return dev->allocationBlockFinder; + } + } + + T(YAFFS_TRACE_ALWAYS, + (TSTR + ("yaffs tragedy: no more erased blocks, but there should have been %d" + TENDSTR), dev->nErasedBlocks)); + + return -1; +} + + + +static int yaffs_CheckpointRequired(yaffs_Device *dev) +{ + int nblocks = dev->internalEndBlock - dev->internalStartBlock + 1 ; + return dev->param.isYaffs2 && + !dev->param.skipCheckpointWrite && + (nblocks >= YAFFS_CHECKPOINT_MIN_BLOCKS); +} +static int yaffs_CalcCheckpointBlocksRequired(yaffs_Device *dev) +{ + if (!dev->nCheckpointBlocksRequired && + yaffs_CheckpointRequired(dev)){ + /* Not a valid value so recalculate */ + int nBytes = 0; + int nBlocks; + int devBlocks = (dev->param.endBlock - dev->param.startBlock + 1); + int tnodeSize = yaffs_CalcTnodeSize(dev); + + nBytes += sizeof(yaffs_CheckpointValidity); + nBytes += sizeof(yaffs_CheckpointDevice); + nBytes += devBlocks * sizeof(yaffs_BlockInfo); + nBytes += devBlocks * dev->chunkBitmapStride; + nBytes += (sizeof(yaffs_CheckpointObject) + sizeof(__u32)) * (dev->nObjectsCreated - dev->nFreeObjects); + nBytes += (tnodeSize + sizeof(__u32)) * (dev->nTnodesCreated - dev->nFreeTnodes); + nBytes += sizeof(yaffs_CheckpointValidity); + nBytes += sizeof(__u32); /* checksum*/ + + /* Round up and add 2 blocks to allow for some bad blocks, so add 3 */ + + nBlocks = (nBytes/(dev->nDataBytesPerChunk * dev->param.nChunksPerBlock)) + 3; + + dev->nCheckpointBlocksRequired = nBlocks; + } + + return dev->nCheckpointBlocksRequired; +} + +/* + * Check if there's space to allocate... + * Thinks.... do we need top make this ths same as yaffs_GetFreeChunks()? + */ +static int yaffs_CheckSpaceForAllocation(yaffs_Device *dev, int nChunks) +{ + int reservedChunks; + int reservedBlocks = dev->param.nReservedBlocks; + int checkpointBlocks; + + if (dev->param.isYaffs2) { + checkpointBlocks = yaffs_CalcCheckpointBlocksRequired(dev) - + dev->blocksInCheckpoint; + if (checkpointBlocks < 0) + checkpointBlocks = 0; + } else { + checkpointBlocks = 0; + } + + reservedChunks = ((reservedBlocks + checkpointBlocks) * dev->param.nChunksPerBlock); + + return (dev->nFreeChunks > (reservedChunks + nChunks)); +} + +static int yaffs_AllocateChunk(yaffs_Device *dev, int useReserve, + yaffs_BlockInfo **blockUsedPtr) +{ + int retVal; + yaffs_BlockInfo *bi; + + if (dev->allocationBlock < 0) { + /* Get next block to allocate off */ + dev->allocationBlock = yaffs_FindBlockForAllocation(dev); + dev->allocationPage = 0; + } + + if (!useReserve && !yaffs_CheckSpaceForAllocation(dev, 1)) { + /* Not enough space to allocate unless we're allowed to use the reserve. */ + return -1; + } + + if (dev->nErasedBlocks < dev->param.nReservedBlocks + && dev->allocationPage == 0) { + T(YAFFS_TRACE_ALLOCATE, (TSTR("Allocating reserve" TENDSTR))); + } + + /* Next page please.... */ + if (dev->allocationBlock >= 0) { + bi = yaffs_GetBlockInfo(dev, dev->allocationBlock); + + retVal = (dev->allocationBlock * dev->param.nChunksPerBlock) + + dev->allocationPage; + bi->pagesInUse++; + yaffs_SetChunkBit(dev, dev->allocationBlock, + dev->allocationPage); + + dev->allocationPage++; + + dev->nFreeChunks--; + + /* If the block is full set the state to full */ + if (dev->allocationPage >= dev->param.nChunksPerBlock) { + bi->blockState = YAFFS_BLOCK_STATE_FULL; + dev->allocationBlock = -1; + } + + if (blockUsedPtr) + *blockUsedPtr = bi; + + return retVal; + } + + T(YAFFS_TRACE_ERROR, + (TSTR("!!!!!!!!! Allocator out !!!!!!!!!!!!!!!!!" TENDSTR))); + + return -1; +} + +static int yaffs_GetErasedChunks(yaffs_Device *dev) +{ + int n; + + n = dev->nErasedBlocks * dev->param.nChunksPerBlock; + + if (dev->allocationBlock > 0) + n += (dev->param.nChunksPerBlock - dev->allocationPage); + + return n; + +} + +/* + * yaffs_SkipRestOfBlock() skips over the rest of the allocation block + * if we don't want to write to it. + */ +static void yaffs_SkipRestOfBlock(yaffs_Device *dev) +{ + if(dev->allocationBlock > 0){ + yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, dev->allocationBlock); + if(bi->blockState == YAFFS_BLOCK_STATE_ALLOCATING){ + bi->blockState = YAFFS_BLOCK_STATE_FULL; + dev->allocationBlock = -1; + } + } +} + + +static int yaffs_GarbageCollectBlock(yaffs_Device *dev, int block, + int wholeBlock) +{ + int oldChunk; + int newChunk; + int markNAND; + int retVal = YAFFS_OK; + int cleanups = 0; + int i; + int isCheckpointBlock; + int matchingChunk; + int maxCopies; + + int chunksBefore = yaffs_GetErasedChunks(dev); + int chunksAfter; + + yaffs_ExtendedTags tags; + + yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, block); + + yaffs_Object *object; + + isCheckpointBlock = (bi->blockState == YAFFS_BLOCK_STATE_CHECKPOINT); + + + T(YAFFS_TRACE_TRACING, + (TSTR("Collecting block %d, in use %d, shrink %d, wholeBlock %d" TENDSTR), + block, + bi->pagesInUse, + bi->hasShrinkHeader, + wholeBlock)); + + /*yaffs_VerifyFreeChunks(dev); */ + + if(bi->blockState == YAFFS_BLOCK_STATE_FULL) + bi->blockState = YAFFS_BLOCK_STATE_COLLECTING; + + bi->hasShrinkHeader = 0; /* clear the flag so that the block can erase */ + + dev->gcDisable = 1; + + if (isCheckpointBlock || + !yaffs_StillSomeChunkBits(dev, block)) { + T(YAFFS_TRACE_TRACING, + (TSTR + ("Collecting block %d that has no chunks in use" TENDSTR), + block)); + yaffs_BlockBecameDirty(dev, block); + } else { + + __u8 *buffer = yaffs_GetTempBuffer(dev, __LINE__); + + yaffs_VerifyBlock(dev, bi, block); + + maxCopies = (wholeBlock) ? dev->param.nChunksPerBlock : 5; + oldChunk = block * dev->param.nChunksPerBlock + dev->gcChunk; + + for (/* init already done */; + retVal == YAFFS_OK && + dev->gcChunk < dev->param.nChunksPerBlock && + (bi->blockState == YAFFS_BLOCK_STATE_COLLECTING) && + maxCopies > 0; + dev->gcChunk++, oldChunk++) { + if (yaffs_CheckChunkBit(dev, block, dev->gcChunk)) { + + /* This page is in use and might need to be copied off */ + + maxCopies--; + + markNAND = 1; + + yaffs_InitialiseTags(&tags); + + yaffs_ReadChunkWithTagsFromNAND(dev, oldChunk, + buffer, &tags); + + object = + yaffs_FindObjectByNumber(dev, + tags.objectId); + + T(YAFFS_TRACE_GC_DETAIL, + (TSTR + ("Collecting chunk in block %d, %d %d %d " TENDSTR), + dev->gcChunk, tags.objectId, tags.chunkId, + tags.byteCount)); + + if (object && !yaffs_SkipVerification(dev)) { + if (tags.chunkId == 0) + matchingChunk = object->hdrChunk; + else if (object->softDeleted) + matchingChunk = oldChunk; /* Defeat the test */ + else + matchingChunk = yaffs_FindChunkInFile(object, tags.chunkId, NULL); + + if (oldChunk != matchingChunk) + T(YAFFS_TRACE_ERROR, + (TSTR("gc: page in gc mismatch: %d %d %d %d"TENDSTR), + oldChunk, matchingChunk, tags.objectId, tags.chunkId)); + + } + + if (!object) { + T(YAFFS_TRACE_ERROR, + (TSTR + ("page %d in gc has no object: %d %d %d " + TENDSTR), oldChunk, + tags.objectId, tags.chunkId, tags.byteCount)); + } + + if (object && + object->deleted && + object->softDeleted && + tags.chunkId != 0) { + /* Data chunk in a soft deleted file, throw it away + * It's a soft deleted data chunk, + * No need to copy this, just forget about it and + * fix up the object. + */ + + /* Free chunks already includes softdeleted chunks. + * How ever this chunk is going to soon be really deleted + * which will increment free chunks. + * We have to decrement free chunks so this works out properly. + */ + dev->nFreeChunks--; + + object->nDataChunks--; + + if (object->nDataChunks <= 0) { + /* remeber to clean up the object */ + dev->gcCleanupList[cleanups] = + tags.objectId; + cleanups++; + } + markNAND = 0; + } else if (0) { + /* Todo object && object->deleted && object->nDataChunks == 0 */ + /* Deleted object header with no data chunks. + * Can be discarded and the file deleted. + */ + object->hdrChunk = 0; + yaffs_FreeTnode(object->myDev, + object->variant. + fileVariant.top); + object->variant.fileVariant.top = NULL; + yaffs_DoGenericObjectDeletion(object); + + } else if (object) { + /* It's either a data chunk in a live file or + * an ObjectHeader, so we're interested in it. + * NB Need to keep the ObjectHeaders of deleted files + * until the whole file has been deleted off + */ + tags.serialNumber++; + + dev->nGCCopies++; + + if (tags.chunkId == 0) { + /* It is an object Id, + * We need to nuke the shrinkheader flags first + * Also need to clean up shadowing. + * We no longer want the shrinkHeader flag since its work is done + * and if it is left in place it will mess up scanning. + */ + + yaffs_ObjectHeader *oh; + oh = (yaffs_ObjectHeader *)buffer; + + oh->isShrink = 0; + tags.extraIsShrinkHeader = 0; + + oh->shadowsObject = 0; + oh->inbandShadowsObject = 0; + tags.extraShadows = 0; + + /* Update file size */ + if(object->variantType == YAFFS_OBJECT_TYPE_FILE){ + oh->fileSize = object->variant.fileVariant.fileSize; + tags.extraFileLength = oh->fileSize; + } + + yaffs_VerifyObjectHeader(object, oh, &tags, 1); + newChunk = + yaffs_WriteNewChunkWithTagsToNAND(dev,(__u8 *) oh, &tags, 1); + } else + newChunk = + yaffs_WriteNewChunkWithTagsToNAND(dev, buffer, &tags, 1); + + if (newChunk < 0) { + retVal = YAFFS_FAIL; + } else { + + /* Ok, now fix up the Tnodes etc. */ + + if (tags.chunkId == 0) { + /* It's a header */ + object->hdrChunk = newChunk; + object->serial = tags.serialNumber; + } else { + /* It's a data chunk */ + int ok; + ok = yaffs_PutChunkIntoFile + (object, + tags.chunkId, + newChunk, 0); + } + } + } + + if (retVal == YAFFS_OK) + yaffs_DeleteChunk(dev, oldChunk, markNAND, __LINE__); + + } + } + + yaffs_ReleaseTempBuffer(dev, buffer, __LINE__); + + + /* Do any required cleanups */ + for (i = 0; i < cleanups; i++) { + /* Time to delete the file too */ + object = + yaffs_FindObjectByNumber(dev, + dev->gcCleanupList[i]); + if (object) { + yaffs_FreeTnode(dev, + object->variant.fileVariant. + top); + object->variant.fileVariant.top = NULL; + T(YAFFS_TRACE_GC, + (TSTR + ("yaffs: About to finally delete object %d" + TENDSTR), object->objectId)); + yaffs_DoGenericObjectDeletion(object); + object->myDev->nDeletedFiles--; + } + + } + + } + + yaffs_VerifyCollectedBlock(dev, bi, block); + + + + if (bi->blockState == YAFFS_BLOCK_STATE_COLLECTING) { + /* + * The gc did not complete. Set block state back to FULL + * because checkpointing does not restore gc. + */ + bi->blockState = YAFFS_BLOCK_STATE_FULL; + } else { + /* The gc completed. */ + chunksAfter = yaffs_GetErasedChunks(dev); + if (chunksBefore >= chunksAfter) { + T(YAFFS_TRACE_GC, + (TSTR + ("gc did not increase free chunks before %d after %d" + TENDSTR), chunksBefore, chunksAfter)); + } + dev->gcBlock = 0; + dev->gcChunk = 0; + } + + dev->gcDisable = 0; + + return retVal; +} + +/* + * FindBlockForgarbageCollection is used to select the dirtiest block (or close enough) + * for garbage collection. + */ + +static unsigned yaffs_FindBlockForGarbageCollection(yaffs_Device *dev, + int aggressive, + int background) +{ + int i; + int iterations; + unsigned selected = 0; + int prioritised = 0; + int prioritisedExists = 0; + yaffs_BlockInfo *bi; + int threshold; + + /* First let's see if we need to grab a prioritised block */ + if (dev->hasPendingPrioritisedGCs && !aggressive) { + dev->gcDirtiest = 0; + bi = dev->blockInfo; + for (i = dev->internalStartBlock; + i <= dev->internalEndBlock && !selected; + i++) { + + if (bi->gcPrioritise) { + prioritisedExists = 1; + if (bi->blockState == YAFFS_BLOCK_STATE_FULL && + yaffs_BlockNotDisqualifiedFromGC(dev, bi)) { + selected = i; + prioritised = 1; + } + } + bi++; + } + + /* + * If there is a prioritised block and none was selected then + * this happened because there is at least one old dirty block gumming + * up the works. Let's gc the oldest dirty block. + */ + + if(prioritisedExists && + !selected && + dev->oldestDirtyBlock > 0) + selected = dev->oldestDirtyBlock; + + if (!prioritisedExists) /* None found, so we can clear this */ + dev->hasPendingPrioritisedGCs = 0; + } + + /* If we're doing aggressive GC then we are happy to take a less-dirty block, and + * search harder. + * else (we're doing a leasurely gc), then we only bother to do this if the + * block has only a few pages in use. + */ + + if (!selected){ + int pagesUsed; + int nBlocks = dev->internalEndBlock - dev->internalStartBlock + 1; + if (aggressive){ + threshold = dev->param.nChunksPerBlock; + iterations = nBlocks; + } else { + int maxThreshold = dev->param.nChunksPerBlock/2; + threshold = background ? + (dev->gcNotDone + 2) * 2 : 0; + if(threshold <YAFFS_GC_PASSIVE_THRESHOLD) + threshold = YAFFS_GC_PASSIVE_THRESHOLD; + if(threshold > maxThreshold) + threshold = maxThreshold; + + iterations = nBlocks / 16 + 1; + if (iterations > 100) + iterations = 100; + } + + for (i = 0; + i < iterations && + (dev->gcDirtiest < 1 || + dev->gcPagesInUse > YAFFS_GC_GOOD_ENOUGH); + i++) { + dev->gcBlockFinder++; + if (dev->gcBlockFinder < dev->internalStartBlock || + dev->gcBlockFinder > dev->internalEndBlock) + dev->gcBlockFinder = dev->internalStartBlock; + + bi = yaffs_GetBlockInfo(dev, dev->gcBlockFinder); + + pagesUsed = bi->pagesInUse - bi->softDeletions; + + if (bi->blockState == YAFFS_BLOCK_STATE_FULL && + pagesUsed < dev->param.nChunksPerBlock && + (dev->gcDirtiest < 1 || pagesUsed < dev->gcPagesInUse) && + yaffs_BlockNotDisqualifiedFromGC(dev, bi)) { + dev->gcDirtiest = dev->gcBlockFinder; + dev->gcPagesInUse = pagesUsed; + } + } + + if(dev->gcDirtiest > 0 && dev->gcPagesInUse <= threshold) + selected = dev->gcDirtiest; + } + + /* + * If nothing has been selected for a while, try selecting the oldest dirty + * because that's gumming up the works. + */ + + if(!selected && dev->param.isYaffs2 && + dev->gcNotDone >= ( background ? 10 : 20)){ + yaffs_FindOldestDirtySequence(dev); + if(dev->oldestDirtyBlock > 0) { + selected = dev->oldestDirtyBlock; + dev->gcDirtiest = selected; + dev->oldestDirtyGCs++; + bi = yaffs_GetBlockInfo(dev, selected); + dev->gcPagesInUse = bi->pagesInUse - bi->softDeletions; + } else + dev->gcNotDone = 0; + } + + if(selected){ + T(YAFFS_TRACE_GC, + (TSTR("GC Selected block %d with %d free, prioritised:%d" TENDSTR), + selected, + dev->param.nChunksPerBlock - dev->gcPagesInUse, + prioritised)); + + if(background) + dev->backgroundGCs++; + dev->gcDirtiest = 0; + dev->gcPagesInUse = 0; + dev->gcNotDone = 0; + if(dev->refreshSkip > 0) + dev->refreshSkip--; + } else{ + dev->gcNotDone++; + T(YAFFS_TRACE_GC, + (TSTR("GC none: finder %d skip %d threshold %d dirtiest %d using %d oldest %d%s" TENDSTR), + dev->gcBlockFinder, dev->gcNotDone, + threshold, + dev->gcDirtiest, dev->gcPagesInUse, + dev->oldestDirtyBlock, + background ? " bg" : "")); + } + + return selected; +} + +/* New garbage collector + * If we're very low on erased blocks then we do aggressive garbage collection + * otherwise we do "leasurely" garbage collection. + * Aggressive gc looks further (whole array) and will accept less dirty blocks. + * Passive gc only inspects smaller areas and will only accept more dirty blocks. + * + * The idea is to help clear out space in a more spread-out manner. + * Dunno if it really does anything useful. + */ +static int yaffs_CheckGarbageCollection(yaffs_Device *dev, int background) +{ + int aggressive = 0; + int gcOk = YAFFS_OK; + int maxTries = 0; + + int minErased; + int erasedChunks; + + int checkpointBlockAdjust; + + if(dev->param.gcControl && + (dev->param.gcControl(dev) & 1) == 0) + return YAFFS_OK; + + if (dev->gcDisable) { + /* Bail out so we don't get recursive gc */ + return YAFFS_OK; + } + + /* This loop should pass the first time. + * We'll only see looping here if the collection does not increase space. + */ + + do { + maxTries++; + + checkpointBlockAdjust = yaffs_CalcCheckpointBlocksRequired(dev) - dev->blocksInCheckpoint; + if (checkpointBlockAdjust < 0) + checkpointBlockAdjust = 0; + + minErased = dev->param.nReservedBlocks + checkpointBlockAdjust + 1; + erasedChunks = dev->nErasedBlocks * dev->param.nChunksPerBlock; + + /* If we need a block soon then do aggressive gc.*/ + if (dev->nErasedBlocks < minErased) + aggressive = 1; + else { + if(dev->gcSkip > 20) + dev->gcSkip = 20; + if(erasedChunks < dev->nFreeChunks/2 || + dev->gcSkip < 1 || + background) + aggressive = 0; + else { + dev->gcSkip--; + break; + } + } + + dev->gcSkip = 5; + + /* If we don't already have a block being gc'd then see if we should start another */ + + if (dev->gcBlock < 1 && !aggressive) { + dev->gcBlock = yaffs_FindRefreshBlock(dev); + dev->gcChunk = 0; + } + if (dev->gcBlock < 1) { + dev->gcBlock = yaffs_FindBlockForGarbageCollection(dev, aggressive, background); + dev->gcChunk = 0; + } + + if (dev->gcBlock > 0) { + dev->allGCs++; + if (!aggressive) + dev->passiveGCs++; + + T(YAFFS_TRACE_GC, + (TSTR + ("yaffs: GC erasedBlocks %d aggressive %d" TENDSTR), + dev->nErasedBlocks, aggressive)); + + gcOk = yaffs_GarbageCollectBlock(dev, dev->gcBlock, aggressive); + } + + if (dev->nErasedBlocks < (dev->param.nReservedBlocks) && dev->gcBlock > 0) { + T(YAFFS_TRACE_GC, + (TSTR + ("yaffs: GC !!!no reclaim!!! erasedBlocks %d after try %d block %d" + TENDSTR), dev->nErasedBlocks, maxTries, dev->gcBlock)); + } + } while ((dev->nErasedBlocks < dev->param.nReservedBlocks) && + (dev->gcBlock > 0) && + (maxTries < 2)); + + return aggressive ? gcOk : YAFFS_OK; +} + +/* + * yaffs_BackgroundGarbageCollect() + * Garbage collects. Intended to be called from a background thread. + * Returns non-zero if at least half the free chunks are erased. + */ +int yaffs_BackgroundGarbageCollect(yaffs_Device *dev, unsigned urgency) +{ + int erasedChunks = dev->nErasedBlocks * dev->param.nChunksPerBlock; + + T(YAFFS_TRACE_BACKGROUND, (TSTR("Background gc %u" TENDSTR),urgency)); + + yaffs_CheckGarbageCollection(dev, 1); + return erasedChunks > dev->nFreeChunks/2; +} + +/*------------------------- TAGS --------------------------------*/ + +static int yaffs_TagsMatch(const yaffs_ExtendedTags *tags, int objectId, + int chunkInObject) +{ + return (tags->chunkId == chunkInObject && + tags->objectId == objectId && !tags->chunkDeleted) ? 1 : 0; + +} + + +/*-------------------- Data file manipulation -----------------*/ + +static int yaffs_FindChunkInFile(yaffs_Object *in, int chunkInInode, + yaffs_ExtendedTags *tags) +{ + /*Get the Tnode, then get the level 0 offset chunk offset */ + yaffs_Tnode *tn; + int theChunk = -1; + yaffs_ExtendedTags localTags; + int retVal = -1; + + yaffs_Device *dev = in->myDev; + + if (!tags) { + /* Passed a NULL, so use our own tags space */ + tags = &localTags; + } + + tn = yaffs_FindLevel0Tnode(dev, &in->variant.fileVariant, chunkInInode); + + if (tn) { + theChunk = yaffs_GetChunkGroupBase(dev, tn, chunkInInode); + + retVal = + yaffs_FindChunkInGroup(dev, theChunk, tags, in->objectId, + chunkInInode); + } + return retVal; +} + +static int yaffs_FindAndDeleteChunkInFile(yaffs_Object *in, int chunkInInode, + yaffs_ExtendedTags *tags) +{ + /* Get the Tnode, then get the level 0 offset chunk offset */ + yaffs_Tnode *tn; + int theChunk = -1; + yaffs_ExtendedTags localTags; + + yaffs_Device *dev = in->myDev; + int retVal = -1; + + if (!tags) { + /* Passed a NULL, so use our own tags space */ + tags = &localTags; + } + + tn = yaffs_FindLevel0Tnode(dev, &in->variant.fileVariant, chunkInInode); + + if (tn) { + + theChunk = yaffs_GetChunkGroupBase(dev, tn, chunkInInode); + + retVal = + yaffs_FindChunkInGroup(dev, theChunk, tags, in->objectId, + chunkInInode); + + /* Delete the entry in the filestructure (if found) */ + if (retVal != -1) + yaffs_LoadLevel0Tnode(dev, tn, chunkInInode, 0); + } + + return retVal; +} + +#ifdef YAFFS_PARANOID + +static int yaffs_CheckFileSanity(yaffs_Object *in) +{ + int chunk; + int nChunks; + int fSize; + int failed = 0; + int objId; + yaffs_Tnode *tn; + yaffs_Tags localTags; + yaffs_Tags *tags = &localTags; + int theChunk; + int chunkDeleted; + + if (in->variantType != YAFFS_OBJECT_TYPE_FILE) + return YAFFS_FAIL; + + objId = in->objectId; + fSize = in->variant.fileVariant.fileSize; + nChunks = + (fSize + in->myDev->nDataBytesPerChunk - 1) / in->myDev->nDataBytesPerChunk; + + for (chunk = 1; chunk <= nChunks; chunk++) { + tn = yaffs_FindLevel0Tnode(in->myDev, &in->variant.fileVariant, + chunk); + + if (tn) { + + theChunk = yaffs_GetChunkGroupBase(dev, tn, chunk); + + if (yaffs_CheckChunkBits + (dev, theChunk / dev->param.nChunksPerBlock, + theChunk % dev->param.nChunksPerBlock)) { + + yaffs_ReadChunkTagsFromNAND(in->myDev, theChunk, + tags, + &chunkDeleted); + if (yaffs_TagsMatch + (tags, in->objectId, chunk, chunkDeleted)) { + /* found it; */ + + } + } else { + + failed = 1; + } + + } else { + /* T(("No level 0 found for %d\n", chunk)); */ + } + } + + return failed ? YAFFS_FAIL : YAFFS_OK; +} + +#endif + +static int yaffs_PutChunkIntoFile(yaffs_Object *in, int chunkInInode, + int chunkInNAND, int inScan) +{ + /* NB inScan is zero unless scanning. + * For forward scanning, inScan is > 0; + * for backward scanning inScan is < 0 + * + * chunkInNAND = 0 is a dummy insert to make sure the tnodes are there. + */ + + yaffs_Tnode *tn; + yaffs_Device *dev = in->myDev; + int existingChunk; + yaffs_ExtendedTags existingTags; + yaffs_ExtendedTags newTags; + unsigned existingSerial, newSerial; + + if (in->variantType != YAFFS_OBJECT_TYPE_FILE) { + /* Just ignore an attempt at putting a chunk into a non-file during scanning + * If it is not during Scanning then something went wrong! + */ + if (!inScan) { + T(YAFFS_TRACE_ERROR, + (TSTR + ("yaffs tragedy:attempt to put data chunk into a non-file" + TENDSTR))); + YBUG(); + } + + yaffs_DeleteChunk(dev, chunkInNAND, 1, __LINE__); + return YAFFS_OK; + } + + tn = yaffs_AddOrFindLevel0Tnode(dev, + &in->variant.fileVariant, + chunkInInode, + NULL); + if (!tn) + return YAFFS_FAIL; + + if(!chunkInNAND) + /* Dummy insert, bail now */ + return YAFFS_OK; + + existingChunk = yaffs_GetChunkGroupBase(dev, tn, chunkInInode); + + if (inScan != 0) { + /* If we're scanning then we need to test for duplicates + * NB This does not need to be efficient since it should only ever + * happen when the power fails during a write, then only one + * chunk should ever be affected. + * + * Correction for YAFFS2: This could happen quite a lot and we need to think about efficiency! TODO + * Update: For backward scanning we don't need to re-read tags so this is quite cheap. + */ + + if (existingChunk > 0) { + /* NB Right now existing chunk will not be real chunkId if the device >= 32MB + * thus we have to do a FindChunkInFile to get the real chunk id. + * + * We have a duplicate now we need to decide which one to use: + * + * Backwards scanning YAFFS2: The old one is what we use, dump the new one. + * Forward scanning YAFFS2: The new one is what we use, dump the old one. + * YAFFS1: Get both sets of tags and compare serial numbers. + */ + + if (inScan > 0) { + /* Only do this for forward scanning */ + yaffs_ReadChunkWithTagsFromNAND(dev, + chunkInNAND, + NULL, &newTags); + + /* Do a proper find */ + existingChunk = + yaffs_FindChunkInFile(in, chunkInInode, + &existingTags); + } + + if (existingChunk <= 0) { + /*Hoosterman - how did this happen? */ + + T(YAFFS_TRACE_ERROR, + (TSTR + ("yaffs tragedy: existing chunk < 0 in scan" + TENDSTR))); + + } + + /* NB The deleted flags should be false, otherwise the chunks will + * not be loaded during a scan + */ + + if (inScan > 0) { + newSerial = newTags.serialNumber; + existingSerial = existingTags.serialNumber; + } + + if ((inScan > 0) && + (in->myDev->param.isYaffs2 || + existingChunk <= 0 || + ((existingSerial + 1) & 3) == newSerial)) { + /* Forward scanning. + * Use new + * Delete the old one and drop through to update the tnode + */ + yaffs_DeleteChunk(dev, existingChunk, 1, + __LINE__); + } else { + /* Backward scanning or we want to use the existing one + * Use existing. + * Delete the new one and return early so that the tnode isn't changed + */ + yaffs_DeleteChunk(dev, chunkInNAND, 1, + __LINE__); + return YAFFS_OK; + } + } + + } + + if (existingChunk == 0) + in->nDataChunks++; + + yaffs_LoadLevel0Tnode(dev, tn, chunkInInode, chunkInNAND); + + return YAFFS_OK; +} + +static int yaffs_ReadChunkDataFromObject(yaffs_Object *in, int chunkInInode, + __u8 *buffer) +{ + int chunkInNAND = yaffs_FindChunkInFile(in, chunkInInode, NULL); + + if (chunkInNAND >= 0) + return yaffs_ReadChunkWithTagsFromNAND(in->myDev, chunkInNAND, + buffer, NULL); + else { + T(YAFFS_TRACE_NANDACCESS, + (TSTR("Chunk %d not found zero instead" TENDSTR), + chunkInNAND)); + /* get sane (zero) data if you read a hole */ + memset(buffer, 0, in->myDev->nDataBytesPerChunk); + return 0; + } + +} + +void yaffs_DeleteChunk(yaffs_Device *dev, int chunkId, int markNAND, int lyn) +{ + int block; + int page; + yaffs_ExtendedTags tags; + yaffs_BlockInfo *bi; + + if (chunkId <= 0) + return; + + dev->nDeletions++; + block = chunkId / dev->param.nChunksPerBlock; + page = chunkId % dev->param.nChunksPerBlock; + + + if (!yaffs_CheckChunkBit(dev, block, page)) + T(YAFFS_TRACE_VERIFY, + (TSTR("Deleting invalid chunk %d"TENDSTR), + chunkId)); + + bi = yaffs_GetBlockInfo(dev, block); + + yaffs_UpdateOldestDirtySequence(dev, block, bi); + + T(YAFFS_TRACE_DELETION, + (TSTR("line %d delete of chunk %d" TENDSTR), lyn, chunkId)); + + if (markNAND && + bi->blockState != YAFFS_BLOCK_STATE_COLLECTING && !dev->param.isYaffs2) { + + yaffs_InitialiseTags(&tags); + + tags.chunkDeleted = 1; + + yaffs_WriteChunkWithTagsToNAND(dev, chunkId, NULL, &tags); + yaffs_HandleUpdateChunk(dev, chunkId, &tags); + } else { + dev->nUnmarkedDeletions++; + } + + /* Pull out of the management area. + * If the whole block became dirty, this will kick off an erasure. + */ + if (bi->blockState == YAFFS_BLOCK_STATE_ALLOCATING || + bi->blockState == YAFFS_BLOCK_STATE_FULL || + bi->blockState == YAFFS_BLOCK_STATE_NEEDS_SCANNING || + bi->blockState == YAFFS_BLOCK_STATE_COLLECTING) { + dev->nFreeChunks++; + + yaffs_ClearChunkBit(dev, block, page); + + bi->pagesInUse--; + + if (bi->pagesInUse == 0 && + !bi->hasShrinkHeader && + bi->blockState != YAFFS_BLOCK_STATE_ALLOCATING && + bi->blockState != YAFFS_BLOCK_STATE_NEEDS_SCANNING) { + yaffs_BlockBecameDirty(dev, block); + } + + } + +} + +static int yaffs_WriteChunkDataToObject(yaffs_Object *in, int chunkInInode, + const __u8 *buffer, int nBytes, + int useReserve) +{ + /* Find old chunk Need to do this to get serial number + * Write new one and patch into tree. + * Invalidate old tags. + */ + + int prevChunkId; + yaffs_ExtendedTags prevTags; + + int newChunkId; + yaffs_ExtendedTags newTags; + + yaffs_Device *dev = in->myDev; + + yaffs_CheckGarbageCollection(dev,0); + + /* Get the previous chunk at this location in the file if it exists. + * If it does not exist then put a zero into the tree. This creates + * the tnode now, rather than later when it is harder to clean up. + */ + prevChunkId = yaffs_FindChunkInFile(in, chunkInInode, &prevTags); + if(prevChunkId < 1 && + !yaffs_PutChunkIntoFile(in, chunkInInode, 0, 0)) + return 0; + + /* Set up new tags */ + yaffs_InitialiseTags(&newTags); + + newTags.chunkId = chunkInInode; + newTags.objectId = in->objectId; + newTags.serialNumber = + (prevChunkId > 0) ? prevTags.serialNumber + 1 : 1; + newTags.byteCount = nBytes; + + if (nBytes < 1 || nBytes > dev->param.totalBytesPerChunk) { + T(YAFFS_TRACE_ERROR, + (TSTR("Writing %d bytes to chunk!!!!!!!!!" TENDSTR), nBytes)); + YBUG(); + } + + + newChunkId = + yaffs_WriteNewChunkWithTagsToNAND(dev, buffer, &newTags, + useReserve); + + if (newChunkId > 0) { + yaffs_PutChunkIntoFile(in, chunkInInode, newChunkId, 0); + + if (prevChunkId > 0) + yaffs_DeleteChunk(dev, prevChunkId, 1, __LINE__); + + yaffs_CheckFileSanity(in); + } + return newChunkId; + +} + +/* UpdateObjectHeader updates the header on NAND for an object. + * If name is not NULL, then that new name is used. + */ +int yaffs_UpdateObjectHeader(yaffs_Object *in, const YCHAR *name, int force, + int isShrink, int shadows) +{ + + yaffs_BlockInfo *bi; + + yaffs_Device *dev = in->myDev; + + int prevChunkId; + int retVal = 0; + int result = 0; + + int newChunkId; + yaffs_ExtendedTags newTags; + yaffs_ExtendedTags oldTags; + YCHAR *alias = NULL; + + __u8 *buffer = NULL; + YCHAR oldName[YAFFS_MAX_NAME_LENGTH + 1]; + + yaffs_ObjectHeader *oh = NULL; + + yaffs_strcpy(oldName, _Y("silly old name")); + + + if (!in->fake || + in == dev->rootDir || /* The rootDir should also be saved */ + force) { + + yaffs_CheckGarbageCollection(dev,0); + yaffs_CheckObjectDetailsLoaded(in); + + buffer = yaffs_GetTempBuffer(in->myDev, __LINE__); + oh = (yaffs_ObjectHeader *) buffer; + + prevChunkId = in->hdrChunk; + + if (prevChunkId > 0) { + result = yaffs_ReadChunkWithTagsFromNAND(dev, prevChunkId, + buffer, &oldTags); + + yaffs_VerifyObjectHeader(in, oh, &oldTags, 0); + + memcpy(oldName, oh->name, sizeof(oh->name)); + } + + memset(buffer, 0xFF, dev->nDataBytesPerChunk); + + oh->type = in->variantType; + oh->yst_mode = in->yst_mode; + oh->shadowsObject = oh->inbandShadowsObject = shadows; + +#ifdef CONFIG_YAFFS_WINCE + oh->win_atime[0] = in->win_atime[0]; + oh->win_ctime[0] = in->win_ctime[0]; + oh->win_mtime[0] = in->win_mtime[0]; + oh->win_atime[1] = in->win_atime[1]; + oh->win_ctime[1] = in->win_ctime[1]; + oh->win_mtime[1] = in->win_mtime[1]; +#else + oh->yst_uid = in->yst_uid; + oh->yst_gid = in->yst_gid; + oh->yst_atime = in->yst_atime; + oh->yst_mtime = in->yst_mtime; + oh->yst_ctime = in->yst_ctime; + oh->yst_rdev = in->yst_rdev; +#endif + if (in->parent) + oh->parentObjectId = in->parent->objectId; + else + oh->parentObjectId = 0; + + if (name && *name) { + memset(oh->name, 0, sizeof(oh->name)); + yaffs_strncpy(oh->name, name, YAFFS_MAX_NAME_LENGTH); + } else if (prevChunkId > 0) + memcpy(oh->name, oldName, sizeof(oh->name)); + else + memset(oh->name, 0, sizeof(oh->name)); + + oh->isShrink = isShrink; + + switch (in->variantType) { + case YAFFS_OBJECT_TYPE_UNKNOWN: + /* Should not happen */ + break; + case YAFFS_OBJECT_TYPE_FILE: + oh->fileSize = + (oh->parentObjectId == YAFFS_OBJECTID_DELETED + || oh->parentObjectId == + YAFFS_OBJECTID_UNLINKED) ? 0 : in->variant. + fileVariant.fileSize; + break; + case YAFFS_OBJECT_TYPE_HARDLINK: + oh->equivalentObjectId = + in->variant.hardLinkVariant.equivalentObjectId; + break; + case YAFFS_OBJECT_TYPE_SPECIAL: + /* Do nothing */ + break; + case YAFFS_OBJECT_TYPE_DIRECTORY: + /* Do nothing */ + break; + case YAFFS_OBJECT_TYPE_SYMLINK: + alias = in->variant.symLinkVariant.alias; + if(!alias) + alias = _Y("no alias"); + yaffs_strncpy(oh->alias, + alias, + YAFFS_MAX_ALIAS_LENGTH); + oh->alias[YAFFS_MAX_ALIAS_LENGTH] = 0; + break; + } + + /* Tags */ + yaffs_InitialiseTags(&newTags); + in->serial++; + newTags.chunkId = 0; + newTags.objectId = in->objectId; + newTags.serialNumber = in->serial; + + /* Add extra info for file header */ + + newTags.extraHeaderInfoAvailable = 1; + newTags.extraParentObjectId = oh->parentObjectId; + newTags.extraFileLength = oh->fileSize; + newTags.extraIsShrinkHeader = oh->isShrink; + newTags.extraEquivalentObjectId = oh->equivalentObjectId; + newTags.extraShadows = (oh->shadowsObject > 0) ? 1 : 0; + newTags.extraObjectType = in->variantType; + + yaffs_VerifyObjectHeader(in, oh, &newTags, 1); + + /* Create new chunk in NAND */ + newChunkId = + yaffs_WriteNewChunkWithTagsToNAND(dev, buffer, &newTags, + (prevChunkId > 0) ? 1 : 0); + + if (newChunkId >= 0) { + + in->hdrChunk = newChunkId; + + if (prevChunkId > 0) { + yaffs_DeleteChunk(dev, prevChunkId, 1, + __LINE__); + } + + if (!yaffs_ObjectHasCachedWriteData(in)) + in->dirty = 0; + + /* If this was a shrink, then mark the block that the chunk lives on */ + if (isShrink) { + bi = yaffs_GetBlockInfo(in->myDev, + newChunkId / in->myDev->param.nChunksPerBlock); + bi->hasShrinkHeader = 1; + } + + } + + retVal = newChunkId; + + } + + if (buffer) + yaffs_ReleaseTempBuffer(dev, buffer, __LINE__); + + return retVal; +} + +/*------------------------ Short Operations Cache ---------------------------------------- + * In many situations where there is no high level buffering (eg WinCE) a lot of + * reads might be short sequential reads, and a lot of writes may be short + * sequential writes. eg. scanning/writing a jpeg file. + * In these cases, a short read/write cache can provide a huge perfomance benefit + * with dumb-as-a-rock code. + * In Linux, the page cache provides read buffering aand the short op cache provides write + * buffering. + * + * There are a limited number (~10) of cache chunks per device so that we don't + * need a very intelligent search. + */ + +static int yaffs_ObjectHasCachedWriteData(yaffs_Object *obj) +{ + yaffs_Device *dev = obj->myDev; + int i; + yaffs_ChunkCache *cache; + int nCaches = obj->myDev->param.nShortOpCaches; + + for (i = 0; i < nCaches; i++) { + cache = &dev->srCache[i]; + if (cache->object == obj && + cache->dirty) + return 1; + } + + return 0; +} + + +static void yaffs_FlushFilesChunkCache(yaffs_Object *obj) +{ + yaffs_Device *dev = obj->myDev; + int lowest = -99; /* Stop compiler whining. */ + int i; + yaffs_ChunkCache *cache; + int chunkWritten = 0; + int nCaches = obj->myDev->param.nShortOpCaches; + + if (nCaches > 0) { + do { + cache = NULL; + + /* Find the dirty cache for this object with the lowest chunk id. */ + for (i = 0; i < nCaches; i++) { + if (dev->srCache[i].object == obj && + dev->srCache[i].dirty) { + if (!cache + || dev->srCache[i].chunkId < + lowest) { + cache = &dev->srCache[i]; + lowest = cache->chunkId; + } + } + } + + if (cache && !cache->locked) { + /* Write it out and free it up */ + + chunkWritten = + yaffs_WriteChunkDataToObject(cache->object, + cache->chunkId, + cache->data, + cache->nBytes, + 1); + cache->dirty = 0; + cache->object = NULL; + } + + } while (cache && chunkWritten > 0); + + if (cache) { + /* Hoosterman, disk full while writing cache out. */ + T(YAFFS_TRACE_ERROR, + (TSTR("yaffs tragedy: no space during cache write" TENDSTR))); + + } + } + +} + +/*yaffs_FlushEntireDeviceCache(dev) + * + * + */ + +void yaffs_FlushEntireDeviceCache(yaffs_Device *dev) +{ + yaffs_Object *obj; + int nCaches = dev->param.nShortOpCaches; + int i; + + /* Find a dirty object in the cache and flush it... + * until there are no further dirty objects. + */ + do { + obj = NULL; + for (i = 0; i < nCaches && !obj; i++) { + if (dev->srCache[i].object && + dev->srCache[i].dirty) + obj = dev->srCache[i].object; + + } + if (obj) + yaffs_FlushFilesChunkCache(obj); + + } while (obj); + +} + + +/* Grab us a cache chunk for use. + * First look for an empty one. + * Then look for the least recently used non-dirty one. + * Then look for the least recently used dirty one...., flush and look again. + */ +static yaffs_ChunkCache *yaffs_GrabChunkCacheWorker(yaffs_Device *dev) +{ + int i; + + if (dev->param.nShortOpCaches > 0) { + for (i = 0; i < dev->param.nShortOpCaches; i++) { + if (!dev->srCache[i].object) + return &dev->srCache[i]; + } + } + + return NULL; +} + +static yaffs_ChunkCache *yaffs_GrabChunkCache(yaffs_Device *dev) +{ + yaffs_ChunkCache *cache; + yaffs_Object *theObj; + int usage; + int i; + int pushout; + + if (dev->param.nShortOpCaches > 0) { + /* Try find a non-dirty one... */ + + cache = yaffs_GrabChunkCacheWorker(dev); + + if (!cache) { + /* They were all dirty, find the last recently used object and flush + * its cache, then find again. + * NB what's here is not very accurate, we actually flush the object + * the last recently used page. + */ + + /* With locking we can't assume we can use entry zero */ + + theObj = NULL; + usage = -1; + cache = NULL; + pushout = -1; + + for (i = 0; i < dev->param.nShortOpCaches; i++) { + if (dev->srCache[i].object && + !dev->srCache[i].locked && + (dev->srCache[i].lastUse < usage || !cache)) { + usage = dev->srCache[i].lastUse; + theObj = dev->srCache[i].object; + cache = &dev->srCache[i]; + pushout = i; + } + } + + if (!cache || cache->dirty) { + /* Flush and try again */ + yaffs_FlushFilesChunkCache(theObj); + cache = yaffs_GrabChunkCacheWorker(dev); + } + + } + return cache; + } else + return NULL; + +} + +/* Find a cached chunk */ +static yaffs_ChunkCache *yaffs_FindChunkCache(const yaffs_Object *obj, + int chunkId) +{ + yaffs_Device *dev = obj->myDev; + int i; + if (dev->param.nShortOpCaches > 0) { + for (i = 0; i < dev->param.nShortOpCaches; i++) { + if (dev->srCache[i].object == obj && + dev->srCache[i].chunkId == chunkId) { + dev->cacheHits++; + + return &dev->srCache[i]; + } + } + } + return NULL; +} + +/* Mark the chunk for the least recently used algorithym */ +static void yaffs_UseChunkCache(yaffs_Device *dev, yaffs_ChunkCache *cache, + int isAWrite) +{ + + if (dev->param.nShortOpCaches > 0) { + if (dev->srLastUse < 0 || dev->srLastUse > 100000000) { + /* Reset the cache usages */ + int i; + for (i = 1; i < dev->param.nShortOpCaches; i++) + dev->srCache[i].lastUse = 0; + + dev->srLastUse = 0; + } + + dev->srLastUse++; + + cache->lastUse = dev->srLastUse; + + if (isAWrite) + cache->dirty = 1; + } +} + +/* Invalidate a single cache page. + * Do this when a whole page gets written, + * ie the short cache for this page is no longer valid. + */ +static void yaffs_InvalidateChunkCache(yaffs_Object *object, int chunkId) +{ + if (object->myDev->param.nShortOpCaches > 0) { + yaffs_ChunkCache *cache = yaffs_FindChunkCache(object, chunkId); + + if (cache) + cache->object = NULL; + } +} + +/* Invalidate all the cache pages associated with this object + * Do this whenever ther file is deleted or resized. + */ +static void yaffs_InvalidateWholeChunkCache(yaffs_Object *in) +{ + int i; + yaffs_Device *dev = in->myDev; + + if (dev->param.nShortOpCaches > 0) { + /* Invalidate it. */ + for (i = 0; i < dev->param.nShortOpCaches; i++) { + if (dev->srCache[i].object == in) + dev->srCache[i].object = NULL; + } + } +} + +/*--------------------- Checkpointing --------------------*/ + + +static int yaffs_WriteCheckpointValidityMarker(yaffs_Device *dev, int head) +{ + yaffs_CheckpointValidity cp; + + memset(&cp, 0, sizeof(cp)); + + cp.structType = sizeof(cp); + cp.magic = YAFFS_MAGIC; + cp.version = YAFFS_CHECKPOINT_VERSION; + cp.head = (head) ? 1 : 0; + + return (yaffs_CheckpointWrite(dev, &cp, sizeof(cp)) == sizeof(cp)) ? + 1 : 0; +} + +static int yaffs_ReadCheckpointValidityMarker(yaffs_Device *dev, int head) +{ + yaffs_CheckpointValidity cp; + int ok; + + ok = (yaffs_CheckpointRead(dev, &cp, sizeof(cp)) == sizeof(cp)); + + if (ok) + ok = (cp.structType == sizeof(cp)) && + (cp.magic == YAFFS_MAGIC) && + (cp.version == YAFFS_CHECKPOINT_VERSION) && + (cp.head == ((head) ? 1 : 0)); + return ok ? 1 : 0; +} + +static void yaffs_DeviceToCheckpointDevice(yaffs_CheckpointDevice *cp, + yaffs_Device *dev) +{ + cp->nErasedBlocks = dev->nErasedBlocks; + cp->allocationBlock = dev->allocationBlock; + cp->allocationPage = dev->allocationPage; + cp->nFreeChunks = dev->nFreeChunks; + + cp->nDeletedFiles = dev->nDeletedFiles; + cp->nUnlinkedFiles = dev->nUnlinkedFiles; + cp->nBackgroundDeletions = dev->nBackgroundDeletions; + cp->sequenceNumber = dev->sequenceNumber; + +} + +static void yaffs_CheckpointDeviceToDevice(yaffs_Device *dev, + yaffs_CheckpointDevice *cp) +{ + dev->nErasedBlocks = cp->nErasedBlocks; + dev->allocationBlock = cp->allocationBlock; + dev->allocationPage = cp->allocationPage; + dev->nFreeChunks = cp->nFreeChunks; + + dev->nDeletedFiles = cp->nDeletedFiles; + dev->nUnlinkedFiles = cp->nUnlinkedFiles; + dev->nBackgroundDeletions = cp->nBackgroundDeletions; + dev->sequenceNumber = cp->sequenceNumber; +} + + +static int yaffs_WriteCheckpointDevice(yaffs_Device *dev) +{ + yaffs_CheckpointDevice cp; + __u32 nBytes; + __u32 nBlocks = (dev->internalEndBlock - dev->internalStartBlock + 1); + + int ok; + + /* Write device runtime values*/ + yaffs_DeviceToCheckpointDevice(&cp, dev); + cp.structType = sizeof(cp); + + ok = (yaffs_CheckpointWrite(dev, &cp, sizeof(cp)) == sizeof(cp)); + + /* Write block info */ + if (ok) { + nBytes = nBlocks * sizeof(yaffs_BlockInfo); + ok = (yaffs_CheckpointWrite(dev, dev->blockInfo, nBytes) == nBytes); + } + + /* Write chunk bits */ + if (ok) { + nBytes = nBlocks * dev->chunkBitmapStride; + ok = (yaffs_CheckpointWrite(dev, dev->chunkBits, nBytes) == nBytes); + } + return ok ? 1 : 0; + +} + +static int yaffs_ReadCheckpointDevice(yaffs_Device *dev) +{ + yaffs_CheckpointDevice cp; + __u32 nBytes; + __u32 nBlocks = (dev->internalEndBlock - dev->internalStartBlock + 1); + + int ok; + + ok = (yaffs_CheckpointRead(dev, &cp, sizeof(cp)) == sizeof(cp)); + if (!ok) + return 0; + + if (cp.structType != sizeof(cp)) + return 0; + + + yaffs_CheckpointDeviceToDevice(dev, &cp); + + nBytes = nBlocks * sizeof(yaffs_BlockInfo); + + ok = (yaffs_CheckpointRead(dev, dev->blockInfo, nBytes) == nBytes); + + if (!ok) + return 0; + nBytes = nBlocks * dev->chunkBitmapStride; + + ok = (yaffs_CheckpointRead(dev, dev->chunkBits, nBytes) == nBytes); + + return ok ? 1 : 0; +} + +static void yaffs_ObjectToCheckpointObject(yaffs_CheckpointObject *cp, + yaffs_Object *obj) +{ + + cp->objectId = obj->objectId; + cp->parentId = (obj->parent) ? obj->parent->objectId : 0; + cp->hdrChunk = obj->hdrChunk; + cp->variantType = obj->variantType; + cp->deleted = obj->deleted; + cp->softDeleted = obj->softDeleted; + cp->unlinked = obj->unlinked; + cp->fake = obj->fake; + cp->renameAllowed = obj->renameAllowed; + cp->unlinkAllowed = obj->unlinkAllowed; + cp->serial = obj->serial; + cp->nDataChunks = obj->nDataChunks; + + if (obj->variantType == YAFFS_OBJECT_TYPE_FILE) + cp->fileSizeOrEquivalentObjectId = obj->variant.fileVariant.fileSize; + else if (obj->variantType == YAFFS_OBJECT_TYPE_HARDLINK) + cp->fileSizeOrEquivalentObjectId = obj->variant.hardLinkVariant.equivalentObjectId; +} + +static int yaffs_CheckpointObjectToObject(yaffs_Object *obj, yaffs_CheckpointObject *cp) +{ + + yaffs_Object *parent; + + if (obj->variantType != cp->variantType) { + T(YAFFS_TRACE_ERROR, (TSTR("Checkpoint read object %d type %d " + TCONT("chunk %d does not match existing object type %d") + TENDSTR), cp->objectId, cp->variantType, cp->hdrChunk, + obj->variantType)); + return 0; + } + + obj->objectId = cp->objectId; + + if (cp->parentId) + parent = yaffs_FindOrCreateObjectByNumber( + obj->myDev, + cp->parentId, + YAFFS_OBJECT_TYPE_DIRECTORY); + else + parent = NULL; + + if (parent) { + if (parent->variantType != YAFFS_OBJECT_TYPE_DIRECTORY) { + T(YAFFS_TRACE_ALWAYS, (TSTR("Checkpoint read object %d parent %d type %d" + TCONT(" chunk %d Parent type, %d, not directory") + TENDSTR), + cp->objectId, cp->parentId, cp->variantType, + cp->hdrChunk, parent->variantType)); + return 0; + } + yaffs_AddObjectToDirectory(parent, obj); + } + + obj->hdrChunk = cp->hdrChunk; + obj->variantType = cp->variantType; + obj->deleted = cp->deleted; + obj->softDeleted = cp->softDeleted; + obj->unlinked = cp->unlinked; + obj->fake = cp->fake; + obj->renameAllowed = cp->renameAllowed; + obj->unlinkAllowed = cp->unlinkAllowed; + obj->serial = cp->serial; + obj->nDataChunks = cp->nDataChunks; + + if (obj->variantType == YAFFS_OBJECT_TYPE_FILE) + obj->variant.fileVariant.fileSize = cp->fileSizeOrEquivalentObjectId; + else if (obj->variantType == YAFFS_OBJECT_TYPE_HARDLINK) + obj->variant.hardLinkVariant.equivalentObjectId = cp->fileSizeOrEquivalentObjectId; + + if (obj->hdrChunk > 0) + obj->lazyLoaded = 1; + return 1; +} + + + +static int yaffs_CheckpointTnodeWorker(yaffs_Object *in, yaffs_Tnode *tn, + __u32 level, int chunkOffset) +{ + int i; + yaffs_Device *dev = in->myDev; + int ok = 1; + int tnodeSize = yaffs_CalcTnodeSize(dev); + + if (tn) { + if (level > 0) { + + for (i = 0; i < YAFFS_NTNODES_INTERNAL && ok; i++) { + if (tn->internal[i]) { + ok = yaffs_CheckpointTnodeWorker(in, + tn->internal[i], + level - 1, + (chunkOffset<<YAFFS_TNODES_INTERNAL_BITS) + i); + } + } + } else if (level == 0) { + __u32 baseOffset = chunkOffset << YAFFS_TNODES_LEVEL0_BITS; + ok = (yaffs_CheckpointWrite(dev, &baseOffset, sizeof(baseOffset)) == sizeof(baseOffset)); + if (ok) + ok = (yaffs_CheckpointWrite(dev, tn, tnodeSize) == tnodeSize); + } + } + + return ok; + +} + +static int yaffs_WriteCheckpointTnodes(yaffs_Object *obj) +{ + __u32 endMarker = ~0; + int ok = 1; + + if (obj->variantType == YAFFS_OBJECT_TYPE_FILE) { + ok = yaffs_CheckpointTnodeWorker(obj, + obj->variant.fileVariant.top, + obj->variant.fileVariant.topLevel, + 0); + if (ok) + ok = (yaffs_CheckpointWrite(obj->myDev, &endMarker, sizeof(endMarker)) == + sizeof(endMarker)); + } + + return ok ? 1 : 0; +} + +static int yaffs_ReadCheckpointTnodes(yaffs_Object *obj) +{ + __u32 baseChunk; + int ok = 1; + yaffs_Device *dev = obj->myDev; + yaffs_FileStructure *fileStructPtr = &obj->variant.fileVariant; + yaffs_Tnode *tn; + int nread = 0; + int tnodeSize = yaffs_CalcTnodeSize(dev); + + ok = (yaffs_CheckpointRead(dev, &baseChunk, sizeof(baseChunk)) == sizeof(baseChunk)); + + while (ok && (~baseChunk)) { + nread++; + /* Read level 0 tnode */ + + + tn = yaffs_GetTnodeRaw(dev); + if (tn) + ok = (yaffs_CheckpointRead(dev, tn, tnodeSize) == tnodeSize); + else + ok = 0; + + if (tn && ok) + ok = yaffs_AddOrFindLevel0Tnode(dev, + fileStructPtr, + baseChunk, + tn) ? 1 : 0; + + if (ok) + ok = (yaffs_CheckpointRead(dev, &baseChunk, sizeof(baseChunk)) == sizeof(baseChunk)); + + } + + T(YAFFS_TRACE_CHECKPOINT, ( + TSTR("Checkpoint read tnodes %d records, last %d. ok %d" TENDSTR), + nread, baseChunk, ok)); + + return ok ? 1 : 0; +} + + +static int yaffs_WriteCheckpointObjects(yaffs_Device *dev) +{ + yaffs_Object *obj; + yaffs_CheckpointObject cp; + int i; + int ok = 1; + struct ylist_head *lh; + + + /* Iterate through the objects in each hash entry, + * dumping them to the checkpointing stream. + */ + + for (i = 0; ok && i < YAFFS_NOBJECT_BUCKETS; i++) { + ylist_for_each(lh, &dev->objectBucket[i].list) { + if (lh) { + obj = ylist_entry(lh, yaffs_Object, hashLink); + if (!obj->deferedFree) { + yaffs_ObjectToCheckpointObject(&cp, obj); + cp.structType = sizeof(cp); + + T(YAFFS_TRACE_CHECKPOINT, ( + TSTR("Checkpoint write object %d parent %d type %d chunk %d obj addr %p" TENDSTR), + cp.objectId, cp.parentId, cp.variantType, cp.hdrChunk, obj)); + + ok = (yaffs_CheckpointWrite(dev, &cp, sizeof(cp)) == sizeof(cp)); + + if (ok && obj->variantType == YAFFS_OBJECT_TYPE_FILE) + ok = yaffs_WriteCheckpointTnodes(obj); + } + } + } + } + + /* Dump end of list */ + memset(&cp, 0xFF, sizeof(yaffs_CheckpointObject)); + cp.structType = sizeof(cp); + + if (ok) + ok = (yaffs_CheckpointWrite(dev, &cp, sizeof(cp)) == sizeof(cp)); + + return ok ? 1 : 0; +} + +static int yaffs_ReadCheckpointObjects(yaffs_Device *dev) +{ + yaffs_Object *obj; + yaffs_CheckpointObject cp; + int ok = 1; + int done = 0; + yaffs_Object *hardList = NULL; + + while (ok && !done) { + ok = (yaffs_CheckpointRead(dev, &cp, sizeof(cp)) == sizeof(cp)); + if (cp.structType != sizeof(cp)) { + T(YAFFS_TRACE_CHECKPOINT, (TSTR("struct size %d instead of %d ok %d"TENDSTR), + cp.structType, (int)sizeof(cp), ok)); + ok = 0; + } + + T(YAFFS_TRACE_CHECKPOINT, (TSTR("Checkpoint read object %d parent %d type %d chunk %d " TENDSTR), + cp.objectId, cp.parentId, cp.variantType, cp.hdrChunk)); + + if (ok && cp.objectId == ~0) + done = 1; + else if (ok) { + obj = yaffs_FindOrCreateObjectByNumber(dev, cp.objectId, cp.variantType); + if (obj) { + ok = yaffs_CheckpointObjectToObject(obj, &cp); + if (!ok) + break; + if (obj->variantType == YAFFS_OBJECT_TYPE_FILE) { + ok = yaffs_ReadCheckpointTnodes(obj); + } else if (obj->variantType == YAFFS_OBJECT_TYPE_HARDLINK) { + obj->hardLinks.next = + (struct ylist_head *) hardList; + hardList = obj; + } + } else + ok = 0; + } + } + + if (ok) + yaffs_HardlinkFixup(dev, hardList); + + return ok ? 1 : 0; +} + +static int yaffs_WriteCheckpointSum(yaffs_Device *dev) +{ + __u32 checkpointSum; + int ok; + + yaffs_GetCheckpointSum(dev, &checkpointSum); + + ok = (yaffs_CheckpointWrite(dev, &checkpointSum, sizeof(checkpointSum)) == sizeof(checkpointSum)); + + if (!ok) + return 0; + + return 1; +} + +static int yaffs_ReadCheckpointSum(yaffs_Device *dev) +{ + __u32 checkpointSum0; + __u32 checkpointSum1; + int ok; + + yaffs_GetCheckpointSum(dev, &checkpointSum0); + + ok = (yaffs_CheckpointRead(dev, &checkpointSum1, sizeof(checkpointSum1)) == sizeof(checkpointSum1)); + + if (!ok) + return 0; + + if (checkpointSum0 != checkpointSum1) + return 0; + + return 1; +} + + +static int yaffs_WriteCheckpointData(yaffs_Device *dev) +{ + int ok = 1; + + if (!yaffs_CheckpointRequired(dev)) { + T(YAFFS_TRACE_CHECKPOINT, (TSTR("skipping checkpoint write" TENDSTR))); + ok = 0; + } + + if (ok) + ok = yaffs_CheckpointOpen(dev, 1); + + if (ok) { + T(YAFFS_TRACE_CHECKPOINT, (TSTR("write checkpoint validity" TENDSTR))); + ok = yaffs_WriteCheckpointValidityMarker(dev, 1); + } + if (ok) { + T(YAFFS_TRACE_CHECKPOINT, (TSTR("write checkpoint device" TENDSTR))); + ok = yaffs_WriteCheckpointDevice(dev); + } + if (ok) { + T(YAFFS_TRACE_CHECKPOINT, (TSTR("write checkpoint objects" TENDSTR))); + ok = yaffs_WriteCheckpointObjects(dev); + } + if (ok) { + T(YAFFS_TRACE_CHECKPOINT, (TSTR("write checkpoint validity" TENDSTR))); + ok = yaffs_WriteCheckpointValidityMarker(dev, 0); + } + + if (ok) + ok = yaffs_WriteCheckpointSum(dev); + + if (!yaffs_CheckpointClose(dev)) + ok = 0; + + if (ok) + dev->isCheckpointed = 1; + else + dev->isCheckpointed = 0; + + return dev->isCheckpointed; +} + +static int yaffs_ReadCheckpointData(yaffs_Device *dev) +{ + int ok = 1; + + if (dev->param.skipCheckpointRead || !dev->param.isYaffs2) { + T(YAFFS_TRACE_CHECKPOINT, (TSTR("skipping checkpoint read" TENDSTR))); + ok = 0; + } + + if (ok) + ok = yaffs_CheckpointOpen(dev, 0); /* open for read */ + + if (ok) { + T(YAFFS_TRACE_CHECKPOINT, (TSTR("read checkpoint validity" TENDSTR))); + ok = yaffs_ReadCheckpointValidityMarker(dev, 1); + } + if (ok) { + T(YAFFS_TRACE_CHECKPOINT, (TSTR("read checkpoint device" TENDSTR))); + ok = yaffs_ReadCheckpointDevice(dev); + } + if (ok) { + T(YAFFS_TRACE_CHECKPOINT, (TSTR("read checkpoint objects" TENDSTR))); + ok = yaffs_ReadCheckpointObjects(dev); + } + if (ok) { + T(YAFFS_TRACE_CHECKPOINT, (TSTR("read checkpoint validity" TENDSTR))); + ok = yaffs_ReadCheckpointValidityMarker(dev, 0); + } + + if (ok) { + ok = yaffs_ReadCheckpointSum(dev); + T(YAFFS_TRACE_CHECKPOINT, (TSTR("read checkpoint checksum %d" TENDSTR), ok)); + } + + if (!yaffs_CheckpointClose(dev)) + ok = 0; + + if (ok) + dev->isCheckpointed = 1; + else + dev->isCheckpointed = 0; + + return ok ? 1 : 0; + +} + +static void yaffs_InvalidateCheckpoint(yaffs_Device *dev) +{ + if (dev->isCheckpointed || + dev->blocksInCheckpoint > 0) { + dev->isCheckpointed = 0; + yaffs_CheckpointInvalidateStream(dev); + } + if (dev->param.markSuperBlockDirty) + dev->param.markSuperBlockDirty(dev); +} + + +int yaffs_CheckpointSave(yaffs_Device *dev) +{ + + T(YAFFS_TRACE_CHECKPOINT, (TSTR("save entry: isCheckpointed %d"TENDSTR), dev->isCheckpointed)); + + yaffs_VerifyObjects(dev); + yaffs_VerifyBlocks(dev); + yaffs_VerifyFreeChunks(dev); + + if (!dev->isCheckpointed) { + yaffs_InvalidateCheckpoint(dev); + yaffs_WriteCheckpointData(dev); + } + + T(YAFFS_TRACE_ALWAYS, (TSTR("save exit: isCheckpointed %d"TENDSTR), dev->isCheckpointed)); + + return dev->isCheckpointed; +} + +int yaffs_CheckpointRestore(yaffs_Device *dev) +{ + int retval; + T(YAFFS_TRACE_CHECKPOINT, (TSTR("restore entry: isCheckpointed %d"TENDSTR), dev->isCheckpointed)); + + retval = yaffs_ReadCheckpointData(dev); + + if (dev->isCheckpointed) { + yaffs_VerifyObjects(dev); + yaffs_VerifyBlocks(dev); + yaffs_VerifyFreeChunks(dev); + } + + T(YAFFS_TRACE_CHECKPOINT, (TSTR("restore exit: isCheckpointed %d"TENDSTR), dev->isCheckpointed)); + + return retval; +} + +/*--------------------- File read/write ------------------------ + * Read and write have very similar structures. + * In general the read/write has three parts to it + * An incomplete chunk to start with (if the read/write is not chunk-aligned) + * Some complete chunks + * An incomplete chunk to end off with + * + * Curve-balls: the first chunk might also be the last chunk. + */ + +int yaffs_ReadDataFromFile(yaffs_Object *in, __u8 *buffer, loff_t offset, + int nBytes) +{ + + int chunk; + __u32 start; + int nToCopy; + int n = nBytes; + int nDone = 0; + yaffs_ChunkCache *cache; + + yaffs_Device *dev; + + dev = in->myDev; + + while (n > 0) { + /* chunk = offset / dev->nDataBytesPerChunk + 1; */ + /* start = offset % dev->nDataBytesPerChunk; */ + yaffs_AddrToChunk(dev, offset, &chunk, &start); + chunk++; + + /* OK now check for the curveball where the start and end are in + * the same chunk. + */ + if ((start + n) < dev->nDataBytesPerChunk) + nToCopy = n; + else + nToCopy = dev->nDataBytesPerChunk - start; + + cache = yaffs_FindChunkCache(in, chunk); + + /* If the chunk is already in the cache or it is less than a whole chunk + * or we're using inband tags then use the cache (if there is caching) + * else bypass the cache. + */ + if (cache || nToCopy != dev->nDataBytesPerChunk || dev->param.inbandTags) { + if (dev->param.nShortOpCaches > 0) { + + /* If we can't find the data in the cache, then load it up. */ + + if (!cache) { + cache = yaffs_GrabChunkCache(in->myDev); + cache->object = in; + cache->chunkId = chunk; + cache->dirty = 0; + cache->locked = 0; + yaffs_ReadChunkDataFromObject(in, chunk, + cache-> + data); + cache->nBytes = 0; + } + + yaffs_UseChunkCache(dev, cache, 0); + + cache->locked = 1; + + + memcpy(buffer, &cache->data[start], nToCopy); + + cache->locked = 0; + } else { + /* Read into the local buffer then copy..*/ + + __u8 *localBuffer = + yaffs_GetTempBuffer(dev, __LINE__); + yaffs_ReadChunkDataFromObject(in, chunk, + localBuffer); + + memcpy(buffer, &localBuffer[start], nToCopy); + + + yaffs_ReleaseTempBuffer(dev, localBuffer, + __LINE__); + } + + } else { + + /* A full chunk. Read directly into the supplied buffer. */ + yaffs_ReadChunkDataFromObject(in, chunk, buffer); + + } + + n -= nToCopy; + offset += nToCopy; + buffer += nToCopy; + nDone += nToCopy; + + } + + return nDone; +} + +int yaffs_DoWriteDataToFile(yaffs_Object *in, const __u8 *buffer, loff_t offset, + int nBytes, int writeThrough) +{ + + int chunk; + __u32 start; + int nToCopy; + int n = nBytes; + int nDone = 0; + int nToWriteBack; + int startOfWrite = offset; + int chunkWritten = 0; + __u32 nBytesRead; + __u32 chunkStart; + + yaffs_Device *dev; + + dev = in->myDev; + + while (n > 0 && chunkWritten >= 0) { + yaffs_AddrToChunk(dev, offset, &chunk, &start); + + if (chunk * dev->nDataBytesPerChunk + start != offset || + start >= dev->nDataBytesPerChunk) { + T(YAFFS_TRACE_ERROR, ( + TSTR("AddrToChunk of offset %d gives chunk %d start %d" + TENDSTR), + (int)offset, chunk, start)); + } + chunk++; /* File pos to chunk in file offset */ + + /* OK now check for the curveball where the start and end are in + * the same chunk. + */ + + if ((start + n) < dev->nDataBytesPerChunk) { + nToCopy = n; + + /* Now folks, to calculate how many bytes to write back.... + * If we're overwriting and not writing to then end of file then + * we need to write back as much as was there before. + */ + + chunkStart = ((chunk - 1) * dev->nDataBytesPerChunk); + + if (chunkStart > in->variant.fileVariant.fileSize) + nBytesRead = 0; /* Past end of file */ + else + nBytesRead = in->variant.fileVariant.fileSize - chunkStart; + + if (nBytesRead > dev->nDataBytesPerChunk) + nBytesRead = dev->nDataBytesPerChunk; + + nToWriteBack = + (nBytesRead > + (start + n)) ? nBytesRead : (start + n); + + if (nToWriteBack < 0 || nToWriteBack > dev->nDataBytesPerChunk) + YBUG(); + + } else { + nToCopy = dev->nDataBytesPerChunk - start; + nToWriteBack = dev->nDataBytesPerChunk; + } + + if (nToCopy != dev->nDataBytesPerChunk || dev->param.inbandTags) { + /* An incomplete start or end chunk (or maybe both start and end chunk), + * or we're using inband tags, so we want to use the cache buffers. + */ + if (dev->param.nShortOpCaches > 0) { + yaffs_ChunkCache *cache; + /* If we can't find the data in the cache, then load the cache */ + cache = yaffs_FindChunkCache(in, chunk); + + if (!cache + && yaffs_CheckSpaceForAllocation(dev, 1)) { + cache = yaffs_GrabChunkCache(dev); + cache->object = in; + cache->chunkId = chunk; + cache->dirty = 0; + cache->locked = 0; + yaffs_ReadChunkDataFromObject(in, chunk, + cache->data); + } else if (cache && + !cache->dirty && + !yaffs_CheckSpaceForAllocation(dev, 1)) { + /* Drop the cache if it was a read cache item and + * no space check has been made for it. + */ + cache = NULL; + } + + if (cache) { + yaffs_UseChunkCache(dev, cache, 1); + cache->locked = 1; + + + memcpy(&cache->data[start], buffer, + nToCopy); + + + cache->locked = 0; + cache->nBytes = nToWriteBack; + + if (writeThrough) { + chunkWritten = + yaffs_WriteChunkDataToObject + (cache->object, + cache->chunkId, + cache->data, cache->nBytes, + 1); + cache->dirty = 0; + } + + } else { + chunkWritten = -1; /* fail the write */ + } + } else { + /* An incomplete start or end chunk (or maybe both start and end chunk) + * Read into the local buffer then copy, then copy over and write back. + */ + + __u8 *localBuffer = + yaffs_GetTempBuffer(dev, __LINE__); + + yaffs_ReadChunkDataFromObject(in, chunk, + localBuffer); + + + + memcpy(&localBuffer[start], buffer, nToCopy); + + chunkWritten = + yaffs_WriteChunkDataToObject(in, chunk, + localBuffer, + nToWriteBack, + 0); + + yaffs_ReleaseTempBuffer(dev, localBuffer, + __LINE__); + + } + + } else { + /* A full chunk. Write directly from the supplied buffer. */ + + + + chunkWritten = + yaffs_WriteChunkDataToObject(in, chunk, buffer, + dev->nDataBytesPerChunk, + 0); + + /* Since we've overwritten the cached data, we better invalidate it. */ + yaffs_InvalidateChunkCache(in, chunk); + } + + if (chunkWritten >= 0) { + n -= nToCopy; + offset += nToCopy; + buffer += nToCopy; + nDone += nToCopy; + } + + } + + /* Update file object */ + + if ((startOfWrite + nDone) > in->variant.fileVariant.fileSize) + in->variant.fileVariant.fileSize = (startOfWrite + nDone); + + in->dirty = 1; + + return nDone; +} + +int yaffs_WriteDataToFile(yaffs_Object *in, const __u8 *buffer, loff_t offset, + int nBytes, int writeThrough) +{ + yaffs_HandleHole(in,offset); + return yaffs_DoWriteDataToFile(in,buffer,offset,nBytes,writeThrough); +} + + + +/* ---------------------- File resizing stuff ------------------ */ + +static void yaffs_PruneResizedChunks(yaffs_Object *in, int newSize) +{ + + yaffs_Device *dev = in->myDev; + int oldFileSize = in->variant.fileVariant.fileSize; + + int lastDel = 1 + (oldFileSize - 1) / dev->nDataBytesPerChunk; + + int startDel = 1 + (newSize + dev->nDataBytesPerChunk - 1) / + dev->nDataBytesPerChunk; + int i; + int chunkId; + + /* Delete backwards so that we don't end up with holes if + * power is lost part-way through the operation. + */ + for (i = lastDel; i >= startDel; i--) { + /* NB this could be optimised somewhat, + * eg. could retrieve the tags and write them without + * using yaffs_DeleteChunk + */ + + chunkId = yaffs_FindAndDeleteChunkInFile(in, i, NULL); + if (chunkId > 0) { + if (chunkId < + (dev->internalStartBlock * dev->param.nChunksPerBlock) + || chunkId >= + ((dev->internalEndBlock + + 1) * dev->param.nChunksPerBlock)) { + T(YAFFS_TRACE_ALWAYS, + (TSTR("Found daft chunkId %d for %d" TENDSTR), + chunkId, i)); + } else { + in->nDataChunks--; + yaffs_DeleteChunk(dev, chunkId, 1, __LINE__); + } + } + } + +} + + +static void yaffs_ResizeDown( yaffs_Object *obj, loff_t newSize) +{ + int newFullChunks; + __u32 newSizeOfPartialChunk; + yaffs_Device *dev = obj->myDev; + + yaffs_AddrToChunk(dev, newSize, &newFullChunks, &newSizeOfPartialChunk); + + yaffs_PruneResizedChunks(obj, newSize); + + if (newSizeOfPartialChunk != 0) { + int lastChunk = 1 + newFullChunks; + __u8 *localBuffer = yaffs_GetTempBuffer(dev, __LINE__); + + /* Got to read and rewrite the last chunk with its new size and zero pad */ + yaffs_ReadChunkDataFromObject(obj, lastChunk, localBuffer); + memset(localBuffer + newSizeOfPartialChunk, 0, + dev->nDataBytesPerChunk - newSizeOfPartialChunk); + + yaffs_WriteChunkDataToObject(obj, lastChunk, localBuffer, + newSizeOfPartialChunk, 1); + + yaffs_ReleaseTempBuffer(dev, localBuffer, __LINE__); + } + + obj->variant.fileVariant.fileSize = newSize; + + yaffs_PruneFileStructure(dev, &obj->variant.fileVariant); +} + + +static int yaffs_HandleHole(yaffs_Object *obj, loff_t newSize) +{ + /* if newsSize > oldFileSize. + * We're going to be writing a hole. + * If the hole is small then write zeros otherwise write a start of hole marker. + */ + + + loff_t oldFileSize; + int increase; + int smallHole ; + int result = YAFFS_OK; + yaffs_Device *dev = NULL; + + __u8 *localBuffer = NULL; + + int smallIncreaseOk = 0; + + if(!obj) + return YAFFS_FAIL; + + if(obj->variantType != YAFFS_OBJECT_TYPE_FILE) + return YAFFS_FAIL; + + dev = obj->myDev; + + /* Bail out if not yaffs2 mode */ + if(!dev->param.isYaffs2) + return YAFFS_OK; + + oldFileSize = obj->variant.fileVariant.fileSize; + + if (newSize <= oldFileSize) + return YAFFS_OK; + + increase = newSize - oldFileSize; + + if(increase < YAFFS_SMALL_HOLE_THRESHOLD * dev->nDataBytesPerChunk && + yaffs_CheckSpaceForAllocation(dev, YAFFS_SMALL_HOLE_THRESHOLD + 1)) + smallHole = 1; + else + smallHole = 0; + + if(smallHole) + localBuffer= yaffs_GetTempBuffer(dev, __LINE__); + + if(localBuffer){ + /* fill hole with zero bytes */ + int pos = oldFileSize; + int thisWrite; + int written; + memset(localBuffer,0,dev->nDataBytesPerChunk); + smallIncreaseOk = 1; + + while(increase > 0 && smallIncreaseOk){ + thisWrite = increase; + if(thisWrite > dev->nDataBytesPerChunk) + thisWrite = dev->nDataBytesPerChunk; + written = yaffs_DoWriteDataToFile(obj,localBuffer,pos,thisWrite,0); + if(written == thisWrite){ + pos += thisWrite; + increase -= thisWrite; + } else + smallIncreaseOk = 0; + } + + yaffs_ReleaseTempBuffer(dev,localBuffer,__LINE__); + + /* If we were out of space then reverse any chunks we've added */ + if(!smallIncreaseOk) + yaffs_ResizeDown(obj, oldFileSize); + } + + if (!smallIncreaseOk && + obj->parent && + obj->parent->objectId != YAFFS_OBJECTID_UNLINKED && + obj->parent->objectId != YAFFS_OBJECTID_DELETED){ + /* Write a hole start header with the old file size */ + yaffs_UpdateObjectHeader(obj, NULL, 0,1,0); + } + + return result; + +} + +int yaffs_ResizeFile(yaffs_Object *in, loff_t newSize) +{ + yaffs_Device *dev = in->myDev; + int oldFileSize = in->variant.fileVariant.fileSize; + + yaffs_FlushFilesChunkCache(in); + yaffs_InvalidateWholeChunkCache(in); + + yaffs_CheckGarbageCollection(dev,0); + + if (in->variantType != YAFFS_OBJECT_TYPE_FILE) + return YAFFS_FAIL; + + if (newSize == oldFileSize) + return YAFFS_OK; + + if(newSize > oldFileSize){ + yaffs_HandleHole(in,newSize); + in->variant.fileVariant.fileSize = newSize; + } else { + /* newSize < oldFileSize */ + yaffs_ResizeDown(in, newSize); + } + + /* Write a new object header to reflect the resize. + * show we've shrunk the file, if need be + * Do this only if the file is not in the deleted directories + * and is not shadowed. + */ + if (in->parent && + !in->isShadowed && + in->parent->objectId != YAFFS_OBJECTID_UNLINKED && + in->parent->objectId != YAFFS_OBJECTID_DELETED) + yaffs_UpdateObjectHeader(in, NULL, 0,0,0); + + + return YAFFS_OK; +} + +loff_t yaffs_GetFileSize(yaffs_Object *obj) +{ + YCHAR *alias = NULL; + obj = yaffs_GetEquivalentObject(obj); + + switch (obj->variantType) { + case YAFFS_OBJECT_TYPE_FILE: + return obj->variant.fileVariant.fileSize; + case YAFFS_OBJECT_TYPE_SYMLINK: + alias = obj->variant.symLinkVariant.alias; + if(!alias) + return 0; + return yaffs_strnlen(alias,YAFFS_MAX_ALIAS_LENGTH); + default: + return 0; + } +} + + + +int yaffs_FlushFile(yaffs_Object *in, int updateTime, int dataSync) +{ + int retVal; + if (in->dirty) { + yaffs_FlushFilesChunkCache(in); + if(dataSync) /* Only sync data */ + retVal=YAFFS_OK; + else { + if (updateTime) { +#ifdef CONFIG_YAFFS_WINCE + yfsd_WinFileTimeNow(in->win_mtime); +#else + + in->yst_mtime = Y_CURRENT_TIME; + +#endif + } + + retVal = (yaffs_UpdateObjectHeader(in, NULL, 0, 0, 0) >= + 0) ? YAFFS_OK : YAFFS_FAIL; + } + } else { + retVal = YAFFS_OK; + } + + return retVal; + +} + +static int yaffs_DoGenericObjectDeletion(yaffs_Object *in) +{ + + /* First off, invalidate the file's data in the cache, without flushing. */ + yaffs_InvalidateWholeChunkCache(in); + + if (in->myDev->param.isYaffs2 && (in->parent != in->myDev->deletedDir)) { + /* Move to the unlinked directory so we have a record that it was deleted. */ + yaffs_ChangeObjectName(in, in->myDev->deletedDir, _Y("deleted"), 0, 0); + + } + + yaffs_RemoveObjectFromDirectory(in); + yaffs_DeleteChunk(in->myDev, in->hdrChunk, 1, __LINE__); + in->hdrChunk = 0; + + yaffs_FreeObject(in); + return YAFFS_OK; + +} + +/* yaffs_DeleteFile deletes the whole file data + * and the inode associated with the file. + * It does not delete the links associated with the file. + */ +static int yaffs_UnlinkFileIfNeeded(yaffs_Object *in) +{ + + int retVal; + int immediateDeletion = 0; + yaffs_Device *dev = in->myDev; + + if (!in->myInode) + immediateDeletion = 1; + + if (immediateDeletion) { + retVal = + yaffs_ChangeObjectName(in, in->myDev->deletedDir, + _Y("deleted"), 0, 0); + T(YAFFS_TRACE_TRACING, + (TSTR("yaffs: immediate deletion of file %d" TENDSTR), + in->objectId)); + in->deleted = 1; + in->myDev->nDeletedFiles++; + if (dev->param.disableSoftDelete || dev->param.isYaffs2) + yaffs_ResizeFile(in, 0); + yaffs_SoftDeleteFile(in); + } else { + retVal = + yaffs_ChangeObjectName(in, in->myDev->unlinkedDir, + _Y("unlinked"), 0, 0); + } + + + return retVal; +} + +int yaffs_DeleteFile(yaffs_Object *in) +{ + int retVal = YAFFS_OK; + int deleted; /* Need to cache value on stack if in is freed */ + yaffs_Device *dev = in->myDev; + + if (dev->param.disableSoftDelete || dev->param.isYaffs2) + yaffs_ResizeFile(in, 0); + + if (in->nDataChunks > 0) { + /* Use soft deletion if there is data in the file. + * That won't be the case if it has been resized to zero. + */ + if (!in->unlinked) + retVal = yaffs_UnlinkFileIfNeeded(in); + + deleted = in->deleted; + + if (retVal == YAFFS_OK && in->unlinked && !in->deleted) { + in->deleted = 1; + deleted = 1; + in->myDev->nDeletedFiles++; + yaffs_SoftDeleteFile(in); + } + return deleted ? YAFFS_OK : YAFFS_FAIL; + } else { + /* The file has no data chunks so we toss it immediately */ + yaffs_FreeTnode(in->myDev, in->variant.fileVariant.top); + in->variant.fileVariant.top = NULL; + yaffs_DoGenericObjectDeletion(in); + + return YAFFS_OK; + } +} + +static int yaffs_IsNonEmptyDirectory(yaffs_Object *obj) +{ + return (obj->variantType == YAFFS_OBJECT_TYPE_DIRECTORY) && + !(ylist_empty(&obj->variant.directoryVariant.children)); +} + +static int yaffs_DeleteDirectory(yaffs_Object *obj) +{ + /* First check that the directory is empty. */ + if (yaffs_IsNonEmptyDirectory(obj)) + return YAFFS_FAIL; + + return yaffs_DoGenericObjectDeletion(obj); +} + +static int yaffs_DeleteSymLink(yaffs_Object *in) +{ + if(in->variant.symLinkVariant.alias) + YFREE(in->variant.symLinkVariant.alias); + in->variant.symLinkVariant.alias=NULL; + + return yaffs_DoGenericObjectDeletion(in); +} + +static int yaffs_DeleteHardLink(yaffs_Object *in) +{ + /* remove this hardlink from the list assocaited with the equivalent + * object + */ + ylist_del_init(&in->hardLinks); + return yaffs_DoGenericObjectDeletion(in); +} + +int yaffs_DeleteObject(yaffs_Object *obj) +{ +int retVal = -1; + switch (obj->variantType) { + case YAFFS_OBJECT_TYPE_FILE: + retVal = yaffs_DeleteFile(obj); + break; + case YAFFS_OBJECT_TYPE_DIRECTORY: + if(!ylist_empty(&obj->variant.directoryVariant.dirty)){ + T(YAFFS_TRACE_BACKGROUND, (TSTR("Remove object %d from dirty directories" TENDSTR),obj->objectId)); + ylist_del_init(&obj->variant.directoryVariant.dirty); + } + return yaffs_DeleteDirectory(obj); + break; + case YAFFS_OBJECT_TYPE_SYMLINK: + retVal = yaffs_DeleteSymLink(obj); + break; + case YAFFS_OBJECT_TYPE_HARDLINK: + retVal = yaffs_DeleteHardLink(obj); + break; + case YAFFS_OBJECT_TYPE_SPECIAL: + retVal = yaffs_DoGenericObjectDeletion(obj); + break; + case YAFFS_OBJECT_TYPE_UNKNOWN: + retVal = 0; + break; /* should not happen. */ + } + + return retVal; +} + +static int yaffs_UnlinkWorker(yaffs_Object *obj) +{ + + int immediateDeletion = 0; + + if (!obj->myInode) + immediateDeletion = 1; + + if(obj) + yaffs_UpdateParent(obj->parent); + + if (obj->variantType == YAFFS_OBJECT_TYPE_HARDLINK) { + return yaffs_DeleteHardLink(obj); + } else if (!ylist_empty(&obj->hardLinks)) { + /* Curve ball: We're unlinking an object that has a hardlink. + * + * This problem arises because we are not strictly following + * The Linux link/inode model. + * + * We can't really delete the object. + * Instead, we do the following: + * - Select a hardlink. + * - Unhook it from the hard links + * - Move it from its parent directory (so that the rename can work) + * - Rename the object to the hardlink's name. + * - Delete the hardlink + */ + + yaffs_Object *hl; + yaffs_Object *parent; + int retVal; + YCHAR name[YAFFS_MAX_NAME_LENGTH + 1]; + + hl = ylist_entry(obj->hardLinks.next, yaffs_Object, hardLinks); + + yaffs_GetObjectName(hl, name, YAFFS_MAX_NAME_LENGTH + 1); + parent = hl->parent; + + ylist_del_init(&hl->hardLinks); + + yaffs_AddObjectToDirectory(obj->myDev->unlinkedDir, hl); + + retVal = yaffs_ChangeObjectName(obj,parent, name, 0, 0); + + if (retVal == YAFFS_OK) + retVal = yaffs_DoGenericObjectDeletion(hl); + + return retVal; + + } else if (immediateDeletion) { + switch (obj->variantType) { + case YAFFS_OBJECT_TYPE_FILE: + return yaffs_DeleteFile(obj); + break; + case YAFFS_OBJECT_TYPE_DIRECTORY: + ylist_del_init(&obj->variant.directoryVariant.dirty); + return yaffs_DeleteDirectory(obj); + break; + case YAFFS_OBJECT_TYPE_SYMLINK: + return yaffs_DeleteSymLink(obj); + break; + case YAFFS_OBJECT_TYPE_SPECIAL: + return yaffs_DoGenericObjectDeletion(obj); + break; + case YAFFS_OBJECT_TYPE_HARDLINK: + case YAFFS_OBJECT_TYPE_UNKNOWN: + default: + return YAFFS_FAIL; + } + } else if(yaffs_IsNonEmptyDirectory(obj)) + return YAFFS_FAIL; + else + return yaffs_ChangeObjectName(obj, obj->myDev->unlinkedDir, + _Y("unlinked"), 0, 0); +} + + +static int yaffs_UnlinkObject(yaffs_Object *obj) +{ + + if (obj && obj->unlinkAllowed) + return yaffs_UnlinkWorker(obj); + + return YAFFS_FAIL; + +} +int yaffs_Unlink(yaffs_Object *dir, const YCHAR *name) +{ + yaffs_Object *obj; + + obj = yaffs_FindObjectByName(dir, name); + return yaffs_UnlinkObject(obj); +} + +/*----------------------- Initialisation Scanning ---------------------- */ + +static void yaffs_HandleShadowedObject(yaffs_Device *dev, int objId, + int backwardScanning) +{ + yaffs_Object *obj; + + if (!backwardScanning) { + /* Handle YAFFS1 forward scanning case + * For YAFFS1 we always do the deletion + */ + + } else { + /* Handle YAFFS2 case (backward scanning) + * If the shadowed object exists then ignore. + */ + obj = yaffs_FindObjectByNumber(dev, objId); + if(obj) + return; + } + + /* Let's create it (if it does not exist) assuming it is a file so that it can do shrinking etc. + * We put it in unlinked dir to be cleaned up after the scanning + */ + obj = + yaffs_FindOrCreateObjectByNumber(dev, objId, + YAFFS_OBJECT_TYPE_FILE); + if (!obj) + return; + obj->isShadowed = 1; + yaffs_AddObjectToDirectory(dev->unlinkedDir, obj); + obj->variant.fileVariant.shrinkSize = 0; + obj->valid = 1; /* So that we don't read any other info for this file */ + +} + +typedef struct { + int seq; + int block; +} yaffs_BlockIndex; + + +static void yaffs_HardlinkFixup(yaffs_Device *dev, yaffs_Object *hardList) +{ + yaffs_Object *hl; + yaffs_Object *in; + + while (hardList) { + hl = hardList; + hardList = (yaffs_Object *) (hardList->hardLinks.next); + + in = yaffs_FindObjectByNumber(dev, + hl->variant.hardLinkVariant. + equivalentObjectId); + + if (in) { + /* Add the hardlink pointers */ + hl->variant.hardLinkVariant.equivalentObject = in; + ylist_add(&hl->hardLinks, &in->hardLinks); + } else { + /* Todo Need to report/handle this better. + * Got a problem... hardlink to a non-existant object + */ + hl->variant.hardLinkVariant.equivalentObject = NULL; + YINIT_LIST_HEAD(&hl->hardLinks); + + } + } +} + + + + + +static int ybicmp(const void *a, const void *b) +{ + register int aseq = ((yaffs_BlockIndex *)a)->seq; + register int bseq = ((yaffs_BlockIndex *)b)->seq; + register int ablock = ((yaffs_BlockIndex *)a)->block; + register int bblock = ((yaffs_BlockIndex *)b)->block; + if (aseq == bseq) + return ablock - bblock; + else + return aseq - bseq; +} + + +struct yaffs_ShadowFixerStruct { + int objectId; + int shadowedId; + struct yaffs_ShadowFixerStruct *next; +}; + + +static void yaffs_StripDeletedObjects(yaffs_Device *dev) +{ + /* + * Sort out state of unlinked and deleted objects after scanning. + */ + struct ylist_head *i; + struct ylist_head *n; + yaffs_Object *l; + + /* Soft delete all the unlinked files */ + ylist_for_each_safe(i, n, + &dev->unlinkedDir->variant.directoryVariant.children) { + if (i) { + l = ylist_entry(i, yaffs_Object, siblings); + yaffs_DeleteObject(l); + } + } + + ylist_for_each_safe(i, n, + &dev->deletedDir->variant.directoryVariant.children) { + if (i) { + l = ylist_entry(i, yaffs_Object, siblings); + yaffs_DeleteObject(l); + } + } + +} + +/* + * This code iterates through all the objects making sure that they are rooted. + * Any unrooted objects are re-rooted in lost+found. + * An object needs to be in one of: + * - Directly under deleted, unlinked + * - Directly or indirectly under root. + * + * Note: + * This code assumes that we don't ever change the current relationships between + * directories: + * rootDir->parent == unlinkedDir->parent == deletedDir->parent == NULL + * lostNfound->parent == rootDir + * + * This fixes the problem where directories might have inadvertently been deleted + * leaving the object "hanging" without being rooted in the directory tree. + */ + +static int yaffs_HasNULLParent(yaffs_Device *dev, yaffs_Object *obj) +{ + return (obj == dev->deletedDir || + obj == dev->unlinkedDir|| + obj == dev->rootDir); +} + +static void yaffs_FixHangingObjects(yaffs_Device *dev) +{ + yaffs_Object *obj; + yaffs_Object *parent; + int i; + struct ylist_head *lh; + struct ylist_head *n; + int depthLimit; + int hanging; + + + /* Iterate through the objects in each hash entry, + * looking at each object. + * Make sure it is rooted. + */ + + for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) { + ylist_for_each_safe(lh, n, &dev->objectBucket[i].list) { + if (lh) { + obj = ylist_entry(lh, yaffs_Object, hashLink); + parent= obj->parent; + + if(yaffs_HasNULLParent(dev,obj)){ + /* These directories are not hanging */ + hanging = 0; + } + else if(!parent || parent->variantType != YAFFS_OBJECT_TYPE_DIRECTORY) + hanging = 1; + else if(yaffs_HasNULLParent(dev,parent)) + hanging = 0; + else { + /* + * Need to follow the parent chain to see if it is hanging. + */ + hanging = 0; + depthLimit=100; + + while(parent != dev->rootDir && + parent->parent && + parent->parent->variantType == YAFFS_OBJECT_TYPE_DIRECTORY && + depthLimit > 0){ + parent = parent->parent; + depthLimit--; + } + if(parent != dev->rootDir) + hanging = 1; + } + if(hanging){ + T(YAFFS_TRACE_SCAN, + (TSTR("Hanging object %d moved to lost and found" TENDSTR), + obj->objectId)); + yaffs_AddObjectToDirectory(dev->lostNFoundDir,obj); + } + } + } + } +} + + +/* + * Delete directory contents for cleaning up lost and found. + */ +static void yaffs_DeleteDirectoryContents(yaffs_Object *dir) +{ + yaffs_Object *obj; + struct ylist_head *lh; + struct ylist_head *n; + + if(dir->variantType != YAFFS_OBJECT_TYPE_DIRECTORY) + YBUG(); + + ylist_for_each_safe(lh, n, &dir->variant.directoryVariant.children) { + if (lh) { + obj = ylist_entry(lh, yaffs_Object, siblings); + if(obj->variantType == YAFFS_OBJECT_TYPE_DIRECTORY) + yaffs_DeleteDirectoryContents(obj); + + T(YAFFS_TRACE_SCAN, + (TSTR("Deleting lost_found object %d" TENDSTR), + obj->objectId)); + + /* Need to use UnlinkObject since Delete would not handle + * hardlinked objects correctly. + */ + yaffs_UnlinkObject(obj); + } + } + +} + +static void yaffs_EmptyLostAndFound(yaffs_Device *dev) +{ + yaffs_DeleteDirectoryContents(dev->lostNFoundDir); +} + +static int yaffs_Scan(yaffs_Device *dev) +{ + yaffs_ExtendedTags tags; + int blk; + int blockIterator; + int startIterator; + int endIterator; + int result; + + int chunk; + int c; + int deleted; + yaffs_BlockState state; + yaffs_Object *hardList = NULL; + yaffs_BlockInfo *bi; + __u32 sequenceNumber; + yaffs_ObjectHeader *oh; + yaffs_Object *in; + yaffs_Object *parent; + + int alloc_failed = 0; + + struct yaffs_ShadowFixerStruct *shadowFixerList = NULL; + + + __u8 *chunkData; + + + + T(YAFFS_TRACE_SCAN, + (TSTR("yaffs_Scan starts intstartblk %d intendblk %d..." TENDSTR), + dev->internalStartBlock, dev->internalEndBlock)); + + chunkData = yaffs_GetTempBuffer(dev, __LINE__); + + dev->sequenceNumber = YAFFS_LOWEST_SEQUENCE_NUMBER; + + /* Scan all the blocks to determine their state */ + bi = dev->blockInfo; + for (blk = dev->internalStartBlock; blk <= dev->internalEndBlock; blk++) { + yaffs_ClearChunkBits(dev, blk); + bi->pagesInUse = 0; + bi->softDeletions = 0; + + yaffs_QueryInitialBlockState(dev, blk, &state, &sequenceNumber); + + bi->blockState = state; + bi->sequenceNumber = sequenceNumber; + + if (bi->sequenceNumber == YAFFS_SEQUENCE_BAD_BLOCK) + bi->blockState = state = YAFFS_BLOCK_STATE_DEAD; + + T(YAFFS_TRACE_SCAN_DEBUG, + (TSTR("Block scanning block %d state %d seq %d" TENDSTR), blk, + state, sequenceNumber)); + + if (state == YAFFS_BLOCK_STATE_DEAD) { + T(YAFFS_TRACE_BAD_BLOCKS, + (TSTR("block %d is bad" TENDSTR), blk)); + } else if (state == YAFFS_BLOCK_STATE_EMPTY) { + T(YAFFS_TRACE_SCAN_DEBUG, + (TSTR("Block empty " TENDSTR))); + dev->nErasedBlocks++; + dev->nFreeChunks += dev->param.nChunksPerBlock; + } + bi++; + } + + startIterator = dev->internalStartBlock; + endIterator = dev->internalEndBlock; + + /* For each block.... */ + for (blockIterator = startIterator; !alloc_failed && blockIterator <= endIterator; + blockIterator++) { + + YYIELD(); + + YYIELD(); + + blk = blockIterator; + + bi = yaffs_GetBlockInfo(dev, blk); + state = bi->blockState; + + deleted = 0; + + /* For each chunk in each block that needs scanning....*/ + for (c = 0; !alloc_failed && c < dev->param.nChunksPerBlock && + state == YAFFS_BLOCK_STATE_NEEDS_SCANNING; c++) { + /* Read the tags and decide what to do */ + chunk = blk * dev->param.nChunksPerBlock + c; + + result = yaffs_ReadChunkWithTagsFromNAND(dev, chunk, NULL, + &tags); + + /* Let's have a good look at this chunk... */ + + if (tags.eccResult == YAFFS_ECC_RESULT_UNFIXED || tags.chunkDeleted) { + /* YAFFS1 only... + * A deleted chunk + */ + deleted++; + dev->nFreeChunks++; + /*T((" %d %d deleted\n",blk,c)); */ + } else if (!tags.chunkUsed) { + /* An unassigned chunk in the block + * This means that either the block is empty or + * this is the one being allocated from + */ + + if (c == 0) { + /* We're looking at the first chunk in the block so the block is unused */ + state = YAFFS_BLOCK_STATE_EMPTY; + dev->nErasedBlocks++; + } else { + /* this is the block being allocated from */ + T(YAFFS_TRACE_SCAN, + (TSTR + (" Allocating from %d %d" TENDSTR), + blk, c)); + state = YAFFS_BLOCK_STATE_ALLOCATING; + dev->allocationBlock = blk; + dev->allocationPage = c; + dev->allocationBlockFinder = blk; + /* Set block finder here to encourage the allocator to go forth from here. */ + + } + + dev->nFreeChunks += (dev->param.nChunksPerBlock - c); + } else if (tags.chunkId > 0) { + /* chunkId > 0 so it is a data chunk... */ + unsigned int endpos; + + yaffs_SetChunkBit(dev, blk, c); + bi->pagesInUse++; + + in = yaffs_FindOrCreateObjectByNumber(dev, + tags. + objectId, + YAFFS_OBJECT_TYPE_FILE); + /* PutChunkIntoFile checks for a clash (two data chunks with + * the same chunkId). + */ + + if (!in) + alloc_failed = 1; + + if (in) { + if (!yaffs_PutChunkIntoFile(in, tags.chunkId, chunk, 1)) + alloc_failed = 1; + } + + endpos = + (tags.chunkId - 1) * dev->nDataBytesPerChunk + + tags.byteCount; + if (in && + in->variantType == YAFFS_OBJECT_TYPE_FILE + && in->variant.fileVariant.scannedFileSize < + endpos) { + in->variant.fileVariant. + scannedFileSize = endpos; + if (!dev->param.useHeaderFileSize) { + in->variant.fileVariant. + fileSize = + in->variant.fileVariant. + scannedFileSize; + } + + } + /* T((" %d %d data %d %d\n",blk,c,tags.objectId,tags.chunkId)); */ + } else { + /* chunkId == 0, so it is an ObjectHeader. + * Thus, we read in the object header and make the object + */ + yaffs_SetChunkBit(dev, blk, c); + bi->pagesInUse++; + + result = yaffs_ReadChunkWithTagsFromNAND(dev, chunk, + chunkData, + NULL); + + oh = (yaffs_ObjectHeader *) chunkData; + + in = yaffs_FindObjectByNumber(dev, + tags.objectId); + if (in && in->variantType != oh->type) { + /* This should not happen, but somehow + * Wev'e ended up with an objectId that has been reused but not yet + * deleted, and worse still it has changed type. Delete the old object. + */ + + yaffs_DeleteObject(in); + + in = 0; + } + + in = yaffs_FindOrCreateObjectByNumber(dev, + tags. + objectId, + oh->type); + + if (!in) + alloc_failed = 1; + + if (in && oh->shadowsObject > 0) { + + struct yaffs_ShadowFixerStruct *fixer; + fixer = YMALLOC(sizeof(struct yaffs_ShadowFixerStruct)); + if (fixer) { + fixer->next = shadowFixerList; + shadowFixerList = fixer; + fixer->objectId = tags.objectId; + fixer->shadowedId = oh->shadowsObject; + } + + } + + if (in && in->valid) { + /* We have already filled this one. We have a duplicate and need to resolve it. */ + + unsigned existingSerial = in->serial; + unsigned newSerial = tags.serialNumber; + + if (((existingSerial + 1) & 3) == newSerial) { + /* Use new one - destroy the exisiting one */ + yaffs_DeleteChunk(dev, + in->hdrChunk, + 1, __LINE__); + in->valid = 0; + } else { + /* Use existing - destroy this one. */ + yaffs_DeleteChunk(dev, chunk, 1, + __LINE__); + } + } + + if (in && !in->valid && + (tags.objectId == YAFFS_OBJECTID_ROOT || + tags.objectId == YAFFS_OBJECTID_LOSTNFOUND)) { + /* We only load some info, don't fiddle with directory structure */ + in->valid = 1; + in->variantType = oh->type; + + in->yst_mode = oh->yst_mode; +#ifdef CONFIG_YAFFS_WINCE + in->win_atime[0] = oh->win_atime[0]; + in->win_ctime[0] = oh->win_ctime[0]; + in->win_mtime[0] = oh->win_mtime[0]; + in->win_atime[1] = oh->win_atime[1]; + in->win_ctime[1] = oh->win_ctime[1]; + in->win_mtime[1] = oh->win_mtime[1]; +#else + in->yst_uid = oh->yst_uid; + in->yst_gid = oh->yst_gid; + in->yst_atime = oh->yst_atime; + in->yst_mtime = oh->yst_mtime; + in->yst_ctime = oh->yst_ctime; + in->yst_rdev = oh->yst_rdev; +#endif + in->hdrChunk = chunk; + in->serial = tags.serialNumber; + + } else if (in && !in->valid) { + /* we need to load this info */ + + in->valid = 1; + in->variantType = oh->type; + + in->yst_mode = oh->yst_mode; +#ifdef CONFIG_YAFFS_WINCE + in->win_atime[0] = oh->win_atime[0]; + in->win_ctime[0] = oh->win_ctime[0]; + in->win_mtime[0] = oh->win_mtime[0]; + in->win_atime[1] = oh->win_atime[1]; + in->win_ctime[1] = oh->win_ctime[1]; + in->win_mtime[1] = oh->win_mtime[1]; +#else + in->yst_uid = oh->yst_uid; + in->yst_gid = oh->yst_gid; + in->yst_atime = oh->yst_atime; + in->yst_mtime = oh->yst_mtime; + in->yst_ctime = oh->yst_ctime; + in->yst_rdev = oh->yst_rdev; +#endif + in->hdrChunk = chunk; + in->serial = tags.serialNumber; + + yaffs_SetObjectName(in, oh->name); + in->dirty = 0; + + /* directory stuff... + * hook up to parent + */ + + parent = + yaffs_FindOrCreateObjectByNumber + (dev, oh->parentObjectId, + YAFFS_OBJECT_TYPE_DIRECTORY); + if (!parent) + alloc_failed = 1; + if (parent && parent->variantType == + YAFFS_OBJECT_TYPE_UNKNOWN) { + /* Set up as a directory */ + parent->variantType = + YAFFS_OBJECT_TYPE_DIRECTORY; + YINIT_LIST_HEAD(&parent->variant. + directoryVariant. + children); + } else if (!parent || parent->variantType != + YAFFS_OBJECT_TYPE_DIRECTORY) { + /* Hoosterman, another problem.... + * We're trying to use a non-directory as a directory + */ + + T(YAFFS_TRACE_ERROR, + (TSTR + ("yaffs tragedy: attempting to use non-directory as a directory in scan. Put in lost+found." + TENDSTR))); + parent = dev->lostNFoundDir; + } + + yaffs_AddObjectToDirectory(parent, in); + + if (0 && (parent == dev->deletedDir || + parent == dev->unlinkedDir)) { + in->deleted = 1; /* If it is unlinked at start up then it wants deleting */ + dev->nDeletedFiles++; + } + /* Note re hardlinks. + * Since we might scan a hardlink before its equivalent object is scanned + * we put them all in a list. + * After scanning is complete, we should have all the objects, so we run through this + * list and fix up all the chains. + */ + + switch (in->variantType) { + case YAFFS_OBJECT_TYPE_UNKNOWN: + /* Todo got a problem */ + break; + case YAFFS_OBJECT_TYPE_FILE: + if (dev->param.useHeaderFileSize) + + in->variant.fileVariant. + fileSize = + oh->fileSize; + + break; + case YAFFS_OBJECT_TYPE_HARDLINK: + in->variant.hardLinkVariant. + equivalentObjectId = + oh->equivalentObjectId; + in->hardLinks.next = + (struct ylist_head *) + hardList; + hardList = in; + break; + case YAFFS_OBJECT_TYPE_DIRECTORY: + /* Do nothing */ + break; + case YAFFS_OBJECT_TYPE_SPECIAL: + /* Do nothing */ + break; + case YAFFS_OBJECT_TYPE_SYMLINK: + in->variant.symLinkVariant.alias = + yaffs_CloneString(oh->alias); + if (!in->variant.symLinkVariant.alias) + alloc_failed = 1; + break; + } + + } + } + } + + if (state == YAFFS_BLOCK_STATE_NEEDS_SCANNING) { + /* If we got this far while scanning, then the block is fully allocated.*/ + state = YAFFS_BLOCK_STATE_FULL; + } + + if (state == YAFFS_BLOCK_STATE_ALLOCATING) { + /* If the block was partially allocated then treat it as fully allocated.*/ + state = YAFFS_BLOCK_STATE_FULL; + dev->allocationBlock = -1; + } + + bi->blockState = state; + + /* Now let's see if it was dirty */ + if (bi->pagesInUse == 0 && + !bi->hasShrinkHeader && + bi->blockState == YAFFS_BLOCK_STATE_FULL) { + yaffs_BlockBecameDirty(dev, blk); + } + + } + + + /* Ok, we've done all the scanning. + * Fix up the hard link chains. + * We should now have scanned all the objects, now it's time to add these + * hardlinks. + */ + + yaffs_HardlinkFixup(dev, hardList); + + /* Fix up any shadowed objects */ + { + struct yaffs_ShadowFixerStruct *fixer; + yaffs_Object *obj; + + while (shadowFixerList) { + fixer = shadowFixerList; + shadowFixerList = fixer->next; + /* Complete the rename transaction by deleting the shadowed object + * then setting the object header to unshadowed. + */ + obj = yaffs_FindObjectByNumber(dev, fixer->shadowedId); + if (obj) + yaffs_DeleteObject(obj); + + obj = yaffs_FindObjectByNumber(dev, fixer->objectId); + + if (obj) + yaffs_UpdateObjectHeader(obj, NULL, 1, 0, 0); + + YFREE(fixer); + } + } + + yaffs_ReleaseTempBuffer(dev, chunkData, __LINE__); + + if (alloc_failed) + return YAFFS_FAIL; + + T(YAFFS_TRACE_SCAN, (TSTR("yaffs_Scan ends" TENDSTR))); + + + return YAFFS_OK; +} + +static void yaffs_CheckObjectDetailsLoaded(yaffs_Object *in) +{ + __u8 *chunkData; + yaffs_ObjectHeader *oh; + yaffs_Device *dev; + yaffs_ExtendedTags tags; + int result; + int alloc_failed = 0; + + if (!in) + return; + + dev = in->myDev; + +#if 0 + T(YAFFS_TRACE_SCAN, (TSTR("details for object %d %s loaded" TENDSTR), + in->objectId, + in->lazyLoaded ? "not yet" : "already")); +#endif + + if (in->lazyLoaded && in->hdrChunk > 0) { + in->lazyLoaded = 0; + chunkData = yaffs_GetTempBuffer(dev, __LINE__); + + result = yaffs_ReadChunkWithTagsFromNAND(dev, in->hdrChunk, chunkData, &tags); + oh = (yaffs_ObjectHeader *) chunkData; + + in->yst_mode = oh->yst_mode; +#ifdef CONFIG_YAFFS_WINCE + in->win_atime[0] = oh->win_atime[0]; + in->win_ctime[0] = oh->win_ctime[0]; + in->win_mtime[0] = oh->win_mtime[0]; + in->win_atime[1] = oh->win_atime[1]; + in->win_ctime[1] = oh->win_ctime[1]; + in->win_mtime[1] = oh->win_mtime[1]; +#else + in->yst_uid = oh->yst_uid; + in->yst_gid = oh->yst_gid; + in->yst_atime = oh->yst_atime; + in->yst_mtime = oh->yst_mtime; + in->yst_ctime = oh->yst_ctime; + in->yst_rdev = oh->yst_rdev; + +#endif + yaffs_SetObjectName(in, oh->name); + + if (in->variantType == YAFFS_OBJECT_TYPE_SYMLINK) { + in->variant.symLinkVariant.alias = + yaffs_CloneString(oh->alias); + if (!in->variant.symLinkVariant.alias) + alloc_failed = 1; /* Not returned to caller */ + } + + yaffs_ReleaseTempBuffer(dev, chunkData, __LINE__); + } +} + +static int yaffs_ScanBackwards(yaffs_Device *dev) +{ + yaffs_ExtendedTags tags; + int blk; + int blockIterator; + int startIterator; + int endIterator; + int nBlocksToScan = 0; + + int chunk; + int result; + int c; + int deleted; + yaffs_BlockState state; + yaffs_Object *hardList = NULL; + yaffs_BlockInfo *bi; + __u32 sequenceNumber; + yaffs_ObjectHeader *oh; + yaffs_Object *in; + yaffs_Object *parent; + int nBlocks = dev->internalEndBlock - dev->internalStartBlock + 1; + int itsUnlinked; + __u8 *chunkData; + + int fileSize; + int isShrink; + int foundChunksInBlock; + int equivalentObjectId; + int alloc_failed = 0; + + + yaffs_BlockIndex *blockIndex = NULL; + int altBlockIndex = 0; + + if (!dev->param.isYaffs2) { + T(YAFFS_TRACE_SCAN, + (TSTR("yaffs_ScanBackwards is only for YAFFS2!" TENDSTR))); + return YAFFS_FAIL; + } + + T(YAFFS_TRACE_SCAN, + (TSTR + ("yaffs_ScanBackwards starts intstartblk %d intendblk %d..." + TENDSTR), dev->internalStartBlock, dev->internalEndBlock)); + + + dev->sequenceNumber = YAFFS_LOWEST_SEQUENCE_NUMBER; + + blockIndex = YMALLOC(nBlocks * sizeof(yaffs_BlockIndex)); + + if (!blockIndex) { + blockIndex = YMALLOC_ALT(nBlocks * sizeof(yaffs_BlockIndex)); + altBlockIndex = 1; + } + + if (!blockIndex) { + T(YAFFS_TRACE_SCAN, + (TSTR("yaffs_Scan() could not allocate block index!" TENDSTR))); + return YAFFS_FAIL; + } + + dev->blocksInCheckpoint = 0; + + chunkData = yaffs_GetTempBuffer(dev, __LINE__); + + /* Scan all the blocks to determine their state */ + bi = dev->blockInfo; + for (blk = dev->internalStartBlock; blk <= dev->internalEndBlock; blk++) { + yaffs_ClearChunkBits(dev, blk); + bi->pagesInUse = 0; + bi->softDeletions = 0; + + yaffs_QueryInitialBlockState(dev, blk, &state, &sequenceNumber); + + bi->blockState = state; + bi->sequenceNumber = sequenceNumber; + + if (bi->sequenceNumber == YAFFS_SEQUENCE_CHECKPOINT_DATA) + bi->blockState = state = YAFFS_BLOCK_STATE_CHECKPOINT; + if (bi->sequenceNumber == YAFFS_SEQUENCE_BAD_BLOCK) + bi->blockState = state = YAFFS_BLOCK_STATE_DEAD; + + T(YAFFS_TRACE_SCAN_DEBUG, + (TSTR("Block scanning block %d state %d seq %d" TENDSTR), blk, + state, sequenceNumber)); + + + if (state == YAFFS_BLOCK_STATE_CHECKPOINT) { + dev->blocksInCheckpoint++; + + } else if (state == YAFFS_BLOCK_STATE_DEAD) { + T(YAFFS_TRACE_BAD_BLOCKS, + (TSTR("block %d is bad" TENDSTR), blk)); + } else if (state == YAFFS_BLOCK_STATE_EMPTY) { + T(YAFFS_TRACE_SCAN_DEBUG, + (TSTR("Block empty " TENDSTR))); + dev->nErasedBlocks++; + dev->nFreeChunks += dev->param.nChunksPerBlock; + } else if (state == YAFFS_BLOCK_STATE_NEEDS_SCANNING) { + + /* Determine the highest sequence number */ + if (sequenceNumber >= YAFFS_LOWEST_SEQUENCE_NUMBER && + sequenceNumber < YAFFS_HIGHEST_SEQUENCE_NUMBER) { + + blockIndex[nBlocksToScan].seq = sequenceNumber; + blockIndex[nBlocksToScan].block = blk; + + nBlocksToScan++; + + if (sequenceNumber >= dev->sequenceNumber) + dev->sequenceNumber = sequenceNumber; + } else { + /* TODO: Nasty sequence number! */ + T(YAFFS_TRACE_SCAN, + (TSTR + ("Block scanning block %d has bad sequence number %d" + TENDSTR), blk, sequenceNumber)); + + } + } + bi++; + } + + T(YAFFS_TRACE_SCAN, + (TSTR("%d blocks to be sorted..." TENDSTR), nBlocksToScan)); + + + + YYIELD(); + + /* Sort the blocks */ +#ifndef CONFIG_YAFFS_USE_OWN_SORT + { + /* Use qsort now. */ + yaffs_qsort(blockIndex, nBlocksToScan, sizeof(yaffs_BlockIndex), ybicmp); + } +#else + { + /* Dungy old bubble sort... */ + + yaffs_BlockIndex temp; + int i; + int j; + + for (i = 0; i < nBlocksToScan; i++) + for (j = i + 1; j < nBlocksToScan; j++) + if (blockIndex[i].seq > blockIndex[j].seq) { + temp = blockIndex[j]; + blockIndex[j] = blockIndex[i]; + blockIndex[i] = temp; + } + } +#endif + + YYIELD(); + + T(YAFFS_TRACE_SCAN, (TSTR("...done" TENDSTR))); + + /* Now scan the blocks looking at the data. */ + startIterator = 0; + endIterator = nBlocksToScan - 1; + T(YAFFS_TRACE_SCAN_DEBUG, + (TSTR("%d blocks to be scanned" TENDSTR), nBlocksToScan)); + + /* For each block.... backwards */ + for (blockIterator = endIterator; !alloc_failed && blockIterator >= startIterator; + blockIterator--) { + /* Cooperative multitasking! This loop can run for so + long that watchdog timers expire. */ + YYIELD(); + + /* get the block to scan in the correct order */ + blk = blockIndex[blockIterator].block; + + bi = yaffs_GetBlockInfo(dev, blk); + + + state = bi->blockState; + + deleted = 0; + + /* For each chunk in each block that needs scanning.... */ + foundChunksInBlock = 0; + for (c = dev->param.nChunksPerBlock - 1; + !alloc_failed && c >= 0 && + (state == YAFFS_BLOCK_STATE_NEEDS_SCANNING || + state == YAFFS_BLOCK_STATE_ALLOCATING); c--) { + /* Scan backwards... + * Read the tags and decide what to do + */ + + chunk = blk * dev->param.nChunksPerBlock + c; + + result = yaffs_ReadChunkWithTagsFromNAND(dev, chunk, NULL, + &tags); + + /* Let's have a good look at this chunk... */ + + if (!tags.chunkUsed) { + /* An unassigned chunk in the block. + * If there are used chunks after this one, then + * it is a chunk that was skipped due to failing the erased + * check. Just skip it so that it can be deleted. + * But, more typically, We get here when this is an unallocated + * chunk and his means that either the block is empty or + * this is the one being allocated from + */ + + if (foundChunksInBlock) { + /* This is a chunk that was skipped due to failing the erased check */ + } else if (c == 0) { + /* We're looking at the first chunk in the block so the block is unused */ + state = YAFFS_BLOCK_STATE_EMPTY; + dev->nErasedBlocks++; + } else { + if (state == YAFFS_BLOCK_STATE_NEEDS_SCANNING || + state == YAFFS_BLOCK_STATE_ALLOCATING) { + if (dev->sequenceNumber == bi->sequenceNumber) { + /* this is the block being allocated from */ + + T(YAFFS_TRACE_SCAN, + (TSTR + (" Allocating from %d %d" + TENDSTR), blk, c)); + + state = YAFFS_BLOCK_STATE_ALLOCATING; + dev->allocationBlock = blk; + dev->allocationPage = c; + dev->allocationBlockFinder = blk; + } else { + /* This is a partially written block that is not + * the current allocation block. + */ + + T(YAFFS_TRACE_SCAN, + (TSTR("Partially written block %d detected" TENDSTR), + blk)); + } + } + } + + dev->nFreeChunks++; + + } else if (tags.eccResult == YAFFS_ECC_RESULT_UNFIXED) { + T(YAFFS_TRACE_SCAN, + (TSTR(" Unfixed ECC in chunk(%d:%d), chunk ignored"TENDSTR), + blk, c)); + + dev->nFreeChunks++; + + } else if (tags.chunkId > 0) { + /* chunkId > 0 so it is a data chunk... */ + unsigned int endpos; + __u32 chunkBase = + (tags.chunkId - 1) * dev->nDataBytesPerChunk; + + foundChunksInBlock = 1; + + + yaffs_SetChunkBit(dev, blk, c); + bi->pagesInUse++; + + in = yaffs_FindOrCreateObjectByNumber(dev, + tags. + objectId, + YAFFS_OBJECT_TYPE_FILE); + if (!in) { + /* Out of memory */ + alloc_failed = 1; + } + + if (in && + in->variantType == YAFFS_OBJECT_TYPE_FILE + && chunkBase < in->variant.fileVariant.shrinkSize) { + /* This has not been invalidated by a resize */ + if (!yaffs_PutChunkIntoFile(in, tags.chunkId, chunk, -1)) { + alloc_failed = 1; + } + + /* File size is calculated by looking at the data chunks if we have not + * seen an object header yet. Stop this practice once we find an object header. + */ + endpos = chunkBase + tags.byteCount; + + if (!in->valid && /* have not got an object header yet */ + in->variant.fileVariant.scannedFileSize < endpos) { + in->variant.fileVariant.scannedFileSize = endpos; + in->variant.fileVariant.fileSize = endpos; + } + + } else if (in) { + /* This chunk has been invalidated by a resize, or a past file deletion + * so delete the chunk*/ + yaffs_DeleteChunk(dev, chunk, 1, __LINE__); + + } + } else { + /* chunkId == 0, so it is an ObjectHeader. + * Thus, we read in the object header and make the object + */ + foundChunksInBlock = 1; + + yaffs_SetChunkBit(dev, blk, c); + bi->pagesInUse++; + + oh = NULL; + in = NULL; + + if (tags.extraHeaderInfoAvailable) { + in = yaffs_FindOrCreateObjectByNumber(dev, + tags.objectId, + tags.extraObjectType); + if (!in) + alloc_failed = 1; + } + + if (!in || + (!in->valid && dev->param.disableLazyLoad) || + tags.extraShadows || + (!in->valid && + (tags.objectId == YAFFS_OBJECTID_ROOT || + tags.objectId == YAFFS_OBJECTID_LOSTNFOUND))) { + + /* If we don't have valid info then we need to read the chunk + * TODO In future we can probably defer reading the chunk and + * living with invalid data until needed. + */ + + result = yaffs_ReadChunkWithTagsFromNAND(dev, + chunk, + chunkData, + NULL); + + oh = (yaffs_ObjectHeader *) chunkData; + + if (dev->param.inbandTags) { + /* Fix up the header if they got corrupted by inband tags */ + oh->shadowsObject = oh->inbandShadowsObject; + oh->isShrink = oh->inbandIsShrink; + } + + if (!in) { + in = yaffs_FindOrCreateObjectByNumber(dev, tags.objectId, oh->type); + if (!in) + alloc_failed = 1; + } + + } + + if (!in) { + /* TODO Hoosterman we have a problem! */ + T(YAFFS_TRACE_ERROR, + (TSTR + ("yaffs tragedy: Could not make object for object %d at chunk %d during scan" + TENDSTR), tags.objectId, chunk)); + continue; + } + + if (in->valid) { + /* We have already filled this one. + * We have a duplicate that will be discarded, but + * we first have to suck out resize info if it is a file. + */ + + if ((in->variantType == YAFFS_OBJECT_TYPE_FILE) && + ((oh && + oh->type == YAFFS_OBJECT_TYPE_FILE) || + (tags.extraHeaderInfoAvailable && + tags.extraObjectType == YAFFS_OBJECT_TYPE_FILE))) { + __u32 thisSize = + (oh) ? oh->fileSize : tags. + extraFileLength; + __u32 parentObjectId = + (oh) ? oh-> + parentObjectId : tags. + extraParentObjectId; + + + isShrink = + (oh) ? oh->isShrink : tags. + extraIsShrinkHeader; + + /* If it is deleted (unlinked at start also means deleted) + * we treat the file size as being zeroed at this point. + */ + if (parentObjectId == + YAFFS_OBJECTID_DELETED + || parentObjectId == + YAFFS_OBJECTID_UNLINKED) { + thisSize = 0; + isShrink = 1; + } + + if (isShrink && in->variant.fileVariant.shrinkSize > thisSize) + in->variant.fileVariant.shrinkSize = thisSize; + + if (isShrink) + bi->hasShrinkHeader = 1; + + } + /* Use existing - destroy this one. */ + yaffs_DeleteChunk(dev, chunk, 1, __LINE__); + + } + + if (!in->valid && in->variantType != + (oh ? oh->type : tags.extraObjectType)) + T(YAFFS_TRACE_ERROR, ( + TSTR("yaffs tragedy: Bad object type, " + TCONT("%d != %d, for object %d at chunk ") + TCONT("%d during scan") + TENDSTR), oh ? + oh->type : tags.extraObjectType, + in->variantType, tags.objectId, + chunk)); + + if (!in->valid && + (tags.objectId == YAFFS_OBJECTID_ROOT || + tags.objectId == + YAFFS_OBJECTID_LOSTNFOUND)) { + /* We only load some info, don't fiddle with directory structure */ + in->valid = 1; + + if (oh) { + in->variantType = oh->type; + + in->yst_mode = oh->yst_mode; +#ifdef CONFIG_YAFFS_WINCE + in->win_atime[0] = oh->win_atime[0]; + in->win_ctime[0] = oh->win_ctime[0]; + in->win_mtime[0] = oh->win_mtime[0]; + in->win_atime[1] = oh->win_atime[1]; + in->win_ctime[1] = oh->win_ctime[1]; + in->win_mtime[1] = oh->win_mtime[1]; +#else + in->yst_uid = oh->yst_uid; + in->yst_gid = oh->yst_gid; + in->yst_atime = oh->yst_atime; + in->yst_mtime = oh->yst_mtime; + in->yst_ctime = oh->yst_ctime; + in->yst_rdev = oh->yst_rdev; + +#endif + } else { + in->variantType = tags.extraObjectType; + in->lazyLoaded = 1; + } + + in->hdrChunk = chunk; + + } else if (!in->valid) { + /* we need to load this info */ + + in->valid = 1; + in->hdrChunk = chunk; + + if (oh) { + in->variantType = oh->type; + + in->yst_mode = oh->yst_mode; +#ifdef CONFIG_YAFFS_WINCE + in->win_atime[0] = oh->win_atime[0]; + in->win_ctime[0] = oh->win_ctime[0]; + in->win_mtime[0] = oh->win_mtime[0]; + in->win_atime[1] = oh->win_atime[1]; + in->win_ctime[1] = oh->win_ctime[1]; + in->win_mtime[1] = oh->win_mtime[1]; +#else + in->yst_uid = oh->yst_uid; + in->yst_gid = oh->yst_gid; + in->yst_atime = oh->yst_atime; + in->yst_mtime = oh->yst_mtime; + in->yst_ctime = oh->yst_ctime; + in->yst_rdev = oh->yst_rdev; +#endif + + if (oh->shadowsObject > 0) + yaffs_HandleShadowedObject(dev, + oh-> + shadowsObject, + 1); + + + + yaffs_SetObjectName(in, oh->name); + parent = + yaffs_FindOrCreateObjectByNumber + (dev, oh->parentObjectId, + YAFFS_OBJECT_TYPE_DIRECTORY); + + fileSize = oh->fileSize; + isShrink = oh->isShrink; + equivalentObjectId = oh->equivalentObjectId; + + } else { + in->variantType = tags.extraObjectType; + parent = + yaffs_FindOrCreateObjectByNumber + (dev, tags.extraParentObjectId, + YAFFS_OBJECT_TYPE_DIRECTORY); + fileSize = tags.extraFileLength; + isShrink = tags.extraIsShrinkHeader; + equivalentObjectId = tags.extraEquivalentObjectId; + in->lazyLoaded = 1; + + } + in->dirty = 0; + + if (!parent) + alloc_failed = 1; + + /* directory stuff... + * hook up to parent + */ + + if (parent && parent->variantType == + YAFFS_OBJECT_TYPE_UNKNOWN) { + /* Set up as a directory */ + parent->variantType = + YAFFS_OBJECT_TYPE_DIRECTORY; + YINIT_LIST_HEAD(&parent->variant. + directoryVariant. + children); + } else if (!parent || parent->variantType != + YAFFS_OBJECT_TYPE_DIRECTORY) { + /* Hoosterman, another problem.... + * We're trying to use a non-directory as a directory + */ + + T(YAFFS_TRACE_ERROR, + (TSTR + ("yaffs tragedy: attempting to use non-directory as a directory in scan. Put in lost+found." + TENDSTR))); + parent = dev->lostNFoundDir; + } + + yaffs_AddObjectToDirectory(parent, in); + + itsUnlinked = (parent == dev->deletedDir) || + (parent == dev->unlinkedDir); + + if (isShrink) { + /* Mark the block as having a shrinkHeader */ + bi->hasShrinkHeader = 1; + } + + /* Note re hardlinks. + * Since we might scan a hardlink before its equivalent object is scanned + * we put them all in a list. + * After scanning is complete, we should have all the objects, so we run + * through this list and fix up all the chains. + */ + + switch (in->variantType) { + case YAFFS_OBJECT_TYPE_UNKNOWN: + /* Todo got a problem */ + break; + case YAFFS_OBJECT_TYPE_FILE: + + if (in->variant.fileVariant. + scannedFileSize < fileSize) { + /* This covers the case where the file size is greater + * than where the data is + * This will happen if the file is resized to be larger + * than its current data extents. + */ + in->variant.fileVariant.fileSize = fileSize; + in->variant.fileVariant.scannedFileSize = fileSize; + } + + if (in->variant.fileVariant.shrinkSize > fileSize) + in->variant.fileVariant.shrinkSize = fileSize; + + + break; + case YAFFS_OBJECT_TYPE_HARDLINK: + if (!itsUnlinked) { + in->variant.hardLinkVariant.equivalentObjectId = + equivalentObjectId; + in->hardLinks.next = + (struct ylist_head *) hardList; + hardList = in; + } + break; + case YAFFS_OBJECT_TYPE_DIRECTORY: + /* Do nothing */ + break; + case YAFFS_OBJECT_TYPE_SPECIAL: + /* Do nothing */ + break; + case YAFFS_OBJECT_TYPE_SYMLINK: + if (oh) { + in->variant.symLinkVariant.alias = + yaffs_CloneString(oh->alias); + if (!in->variant.symLinkVariant.alias) + alloc_failed = 1; + } + break; + } + + } + + } + + } /* End of scanning for each chunk */ + + if (state == YAFFS_BLOCK_STATE_NEEDS_SCANNING) { + /* If we got this far while scanning, then the block is fully allocated. */ + state = YAFFS_BLOCK_STATE_FULL; + } + + + bi->blockState = state; + + /* Now let's see if it was dirty */ + if (bi->pagesInUse == 0 && + !bi->hasShrinkHeader && + bi->blockState == YAFFS_BLOCK_STATE_FULL) { + yaffs_BlockBecameDirty(dev, blk); + } + + } + + yaffs_SkipRestOfBlock(dev); + + if (altBlockIndex) + YFREE_ALT(blockIndex); + else + YFREE(blockIndex); + + /* Ok, we've done all the scanning. + * Fix up the hard link chains. + * We should now have scanned all the objects, now it's time to add these + * hardlinks. + */ + yaffs_HardlinkFixup(dev, hardList); + + + yaffs_ReleaseTempBuffer(dev, chunkData, __LINE__); + + if (alloc_failed) + return YAFFS_FAIL; + + T(YAFFS_TRACE_SCAN, (TSTR("yaffs_ScanBackwards ends" TENDSTR))); + + return YAFFS_OK; +} + +/*------------------------------ Directory Functions ----------------------------- */ + +static void yaffs_VerifyObjectInDirectory(yaffs_Object *obj) +{ + struct ylist_head *lh; + yaffs_Object *listObj; + + int count = 0; + + if (!obj) { + T(YAFFS_TRACE_ALWAYS, (TSTR("No object to verify" TENDSTR))); + YBUG(); + return; + } + + if (yaffs_SkipVerification(obj->myDev)) + return; + + if (!obj->parent) { + T(YAFFS_TRACE_ALWAYS, (TSTR("Object does not have parent" TENDSTR))); + YBUG(); + return; + } + + if (obj->parent->variantType != YAFFS_OBJECT_TYPE_DIRECTORY) { + T(YAFFS_TRACE_ALWAYS, (TSTR("Parent is not directory" TENDSTR))); + YBUG(); + } + + /* Iterate through the objects in each hash entry */ + + ylist_for_each(lh, &obj->parent->variant.directoryVariant.children) { + if (lh) { + listObj = ylist_entry(lh, yaffs_Object, siblings); + yaffs_VerifyObject(listObj); + if (obj == listObj) + count++; + } + } + + if (count != 1) { + T(YAFFS_TRACE_ALWAYS, (TSTR("Object in directory %d times" TENDSTR), count)); + YBUG(); + } +} + +static void yaffs_VerifyDirectory(yaffs_Object *directory) +{ + struct ylist_head *lh; + yaffs_Object *listObj; + + if (!directory) { + YBUG(); + return; + } + + if (yaffs_SkipFullVerification(directory->myDev)) + return; + + if (directory->variantType != YAFFS_OBJECT_TYPE_DIRECTORY) { + T(YAFFS_TRACE_ALWAYS, (TSTR("Directory has wrong type: %d" TENDSTR), directory->variantType)); + YBUG(); + } + + /* Iterate through the objects in each hash entry */ + + ylist_for_each(lh, &directory->variant.directoryVariant.children) { + if (lh) { + listObj = ylist_entry(lh, yaffs_Object, siblings); + if (listObj->parent != directory) { + T(YAFFS_TRACE_ALWAYS, (TSTR("Object in directory list has wrong parent %p" TENDSTR), listObj->parent)); + YBUG(); + } + yaffs_VerifyObjectInDirectory(listObj); + } + } +} + +/* + *yaffs_UpdateParent() handles fixing a directories mtime and ctime when a new + * link (ie. name) is created or deleted in the directory. + * + * ie. + * create dir/a : update dir's mtime/ctime + * rm dir/a: update dir's mtime/ctime + * modify dir/a: don't update dir's mtimme/ctime + * + * This can be handled immediately or defered. Defering helps reduce the number + * of updates when many files in a directory are changed within a brief period. + * + * If the directory updating is defered then yaffs_UpdateDirtyDirecories must be + * called periodically. + */ + +static void yaffs_UpdateParent(yaffs_Object *obj) +{ + yaffs_Device *dev; + if(!obj) + return; + + dev = obj->myDev; + obj->dirty = 1; + obj->yst_mtime = obj->yst_ctime = Y_CURRENT_TIME; + if(dev->param.deferDirectoryUpdate){ + struct ylist_head *link = &obj->variant.directoryVariant.dirty; + + if(ylist_empty(link)){ + ylist_add(link,&dev->dirtyDirectories); + T(YAFFS_TRACE_BACKGROUND, (TSTR("Added object %d to dirty directories" TENDSTR),obj->objectId)); + } + + } else + yaffs_UpdateObjectHeader(obj,NULL,0,0,0); +} + +void yaffs_UpdateDirtyDirectories(yaffs_Device *dev) +{ + struct ylist_head *link; + yaffs_Object *obj; + yaffs_DirectoryStructure *dS; + yaffs_ObjectVariant *oV; + + T(YAFFS_TRACE_BACKGROUND, (TSTR("Update dirty directories" TENDSTR))); + + while(!ylist_empty(&dev->dirtyDirectories)){ + link = dev->dirtyDirectories.next; + ylist_del_init(link); + + dS=ylist_entry(link,yaffs_DirectoryStructure,dirty); + oV = ylist_entry(dS,yaffs_ObjectVariant,directoryVariant); + obj = ylist_entry(oV,yaffs_Object,variant); + + T(YAFFS_TRACE_BACKGROUND, (TSTR("Update directory %d" TENDSTR), obj->objectId)); + + if(obj->dirty) + yaffs_UpdateObjectHeader(obj,NULL,0,0,0); + } +} + +static void yaffs_RemoveObjectFromDirectory(yaffs_Object *obj) +{ + yaffs_Device *dev = obj->myDev; + yaffs_Object *parent; + + yaffs_VerifyObjectInDirectory(obj); + parent = obj->parent; + + yaffs_VerifyDirectory(parent); + + if (dev && dev->param.removeObjectCallback) + dev->param.removeObjectCallback(obj); + + + ylist_del_init(&obj->siblings); + obj->parent = NULL; + + yaffs_VerifyDirectory(parent); +} + +static void yaffs_AddObjectToDirectory(yaffs_Object *directory, + yaffs_Object *obj) +{ + if (!directory) { + T(YAFFS_TRACE_ALWAYS, + (TSTR + ("tragedy: Trying to add an object to a null pointer directory" + TENDSTR))); + YBUG(); + return; + } + if (directory->variantType != YAFFS_OBJECT_TYPE_DIRECTORY) { + T(YAFFS_TRACE_ALWAYS, + (TSTR + ("tragedy: Trying to add an object to a non-directory" + TENDSTR))); + YBUG(); + } + + if (obj->siblings.prev == NULL) { + /* Not initialised */ + YBUG(); + } + + + yaffs_VerifyDirectory(directory); + + yaffs_RemoveObjectFromDirectory(obj); + + + /* Now add it */ + ylist_add(&obj->siblings, &directory->variant.directoryVariant.children); + obj->parent = directory; + + if (directory == obj->myDev->unlinkedDir + || directory == obj->myDev->deletedDir) { + obj->unlinked = 1; + obj->myDev->nUnlinkedFiles++; + obj->renameAllowed = 0; + } + + yaffs_VerifyDirectory(directory); + yaffs_VerifyObjectInDirectory(obj); +} + +yaffs_Object *yaffs_FindObjectByName(yaffs_Object *directory, + const YCHAR *name) +{ + int sum; + + struct ylist_head *i; + YCHAR buffer[YAFFS_MAX_NAME_LENGTH + 1]; + + yaffs_Object *l; + + if (!name) + return NULL; + + if (!directory) { + T(YAFFS_TRACE_ALWAYS, + (TSTR + ("tragedy: yaffs_FindObjectByName: null pointer directory" + TENDSTR))); + YBUG(); + return NULL; + } + if (directory->variantType != YAFFS_OBJECT_TYPE_DIRECTORY) { + T(YAFFS_TRACE_ALWAYS, + (TSTR + ("tragedy: yaffs_FindObjectByName: non-directory" TENDSTR))); + YBUG(); + } + + sum = yaffs_CalcNameSum(name); + + ylist_for_each(i, &directory->variant.directoryVariant.children) { + if (i) { + l = ylist_entry(i, yaffs_Object, siblings); + + if (l->parent != directory) + YBUG(); + + yaffs_CheckObjectDetailsLoaded(l); + + /* Special case for lost-n-found */ + if (l->objectId == YAFFS_OBJECTID_LOSTNFOUND) { + if (yaffs_strcmp(name, YAFFS_LOSTNFOUND_NAME) == 0) + return l; + } else if (yaffs_SumCompare(l->sum, sum) || l->hdrChunk <= 0) { + /* LostnFound chunk called Objxxx + * Do a real check + */ + yaffs_GetObjectName(l, buffer, + YAFFS_MAX_NAME_LENGTH + 1); + if (yaffs_strncmp(name, buffer, YAFFS_MAX_NAME_LENGTH) == 0) + return l; + } + } + } + + return NULL; +} + + +#if 0 +int yaffs_ApplyToDirectoryChildren(yaffs_Object *theDir, + int (*fn) (yaffs_Object *)) +{ + struct ylist_head *i; + yaffs_Object *l; + + if (!theDir) { + T(YAFFS_TRACE_ALWAYS, + (TSTR + ("tragedy: yaffs_FindObjectByName: null pointer directory" + TENDSTR))); + YBUG(); + return YAFFS_FAIL; + } + if (theDir->variantType != YAFFS_OBJECT_TYPE_DIRECTORY) { + T(YAFFS_TRACE_ALWAYS, + (TSTR + ("tragedy: yaffs_FindObjectByName: non-directory" TENDSTR))); + YBUG(); + return YAFFS_FAIL; + } + + ylist_for_each(i, &theDir->variant.directoryVariant.children) { + if (i) { + l = ylist_entry(i, yaffs_Object, siblings); + if (l && !fn(l)) + return YAFFS_FAIL; + } + } + + return YAFFS_OK; + +} +#endif + +/* GetEquivalentObject dereferences any hard links to get to the + * actual object. + */ + +yaffs_Object *yaffs_GetEquivalentObject(yaffs_Object *obj) +{ + if (obj && obj->variantType == YAFFS_OBJECT_TYPE_HARDLINK) { + /* We want the object id of the equivalent object, not this one */ + obj = obj->variant.hardLinkVariant.equivalentObject; + yaffs_CheckObjectDetailsLoaded(obj); + } + return obj; +} + +int yaffs_GetObjectName(yaffs_Object *obj, YCHAR *name, int buffSize) +{ + memset(name, 0, buffSize * sizeof(YCHAR)); + + yaffs_CheckObjectDetailsLoaded(obj); + + if (obj->objectId == YAFFS_OBJECTID_LOSTNFOUND) { + yaffs_strncpy(name, YAFFS_LOSTNFOUND_NAME, buffSize - 1); + } else if (obj->hdrChunk <= 0) { + YCHAR locName[20]; + YCHAR numString[20]; + YCHAR *x = &numString[19]; + unsigned v = obj->objectId; + numString[19] = 0; + while (v > 0) { + x--; + *x = '0' + (v % 10); + v /= 10; + } + /* make up a name */ + yaffs_strcpy(locName, YAFFS_LOSTNFOUND_PREFIX); + yaffs_strcat(locName, x); + yaffs_strncpy(name, locName, buffSize - 1); + + } +#ifdef CONFIG_YAFFS_SHORT_NAMES_IN_RAM + else if (obj->shortName[0]) + yaffs_strncpy(name, obj->shortName,YAFFS_SHORT_NAME_LENGTH+1); +#endif + else { + int result; + __u8 *buffer = yaffs_GetTempBuffer(obj->myDev, __LINE__); + + yaffs_ObjectHeader *oh = (yaffs_ObjectHeader *) buffer; + + memset(buffer, 0, obj->myDev->nDataBytesPerChunk); + + if (obj->hdrChunk > 0) { + result = yaffs_ReadChunkWithTagsFromNAND(obj->myDev, + obj->hdrChunk, buffer, + NULL); + } + yaffs_strncpy(name, oh->name, buffSize - 1); + name[buffSize-1]=0; + + yaffs_ReleaseTempBuffer(obj->myDev, buffer, __LINE__); + } + + return yaffs_strnlen(name,buffSize-1); +} + +int yaffs_GetObjectFileLength(yaffs_Object *obj) +{ + /* Dereference any hard linking */ + obj = yaffs_GetEquivalentObject(obj); + + if (obj->variantType == YAFFS_OBJECT_TYPE_FILE) + return obj->variant.fileVariant.fileSize; + if (obj->variantType == YAFFS_OBJECT_TYPE_SYMLINK){ + if(!obj->variant.symLinkVariant.alias) + return 0; + return yaffs_strnlen(obj->variant.symLinkVariant.alias,YAFFS_MAX_ALIAS_LENGTH); + } else { + /* Only a directory should drop through to here */ + return obj->myDev->nDataBytesPerChunk; + } +} + +int yaffs_GetObjectLinkCount(yaffs_Object *obj) +{ + int count = 0; + struct ylist_head *i; + + if (!obj->unlinked) + count++; /* the object itself */ + + ylist_for_each(i, &obj->hardLinks) + count++; /* add the hard links; */ + + return count; +} + +int yaffs_GetObjectInode(yaffs_Object *obj) +{ + obj = yaffs_GetEquivalentObject(obj); + + return obj->objectId; +} + +unsigned yaffs_GetObjectType(yaffs_Object *obj) +{ + obj = yaffs_GetEquivalentObject(obj); + + switch (obj->variantType) { + case YAFFS_OBJECT_TYPE_FILE: + return DT_REG; + break; + case YAFFS_OBJECT_TYPE_DIRECTORY: + return DT_DIR; + break; + case YAFFS_OBJECT_TYPE_SYMLINK: + return DT_LNK; + break; + case YAFFS_OBJECT_TYPE_HARDLINK: + return DT_REG; + break; + case YAFFS_OBJECT_TYPE_SPECIAL: + if (S_ISFIFO(obj->yst_mode)) + return DT_FIFO; + if (S_ISCHR(obj->yst_mode)) + return DT_CHR; + if (S_ISBLK(obj->yst_mode)) + return DT_BLK; + if (S_ISSOCK(obj->yst_mode)) + return DT_SOCK; + default: + return DT_REG; + break; + } +} + +YCHAR *yaffs_GetSymlinkAlias(yaffs_Object *obj) +{ + obj = yaffs_GetEquivalentObject(obj); + if (obj->variantType == YAFFS_OBJECT_TYPE_SYMLINK) + return yaffs_CloneString(obj->variant.symLinkVariant.alias); + else + return yaffs_CloneString(_Y("")); +} + +#ifndef CONFIG_YAFFS_WINCE + +int yaffs_SetAttributes(yaffs_Object *obj, struct iattr *attr) +{ + unsigned int valid = attr->ia_valid; + + if (valid & ATTR_MODE) + obj->yst_mode = attr->ia_mode; + if (valid & ATTR_UID) + obj->yst_uid = attr->ia_uid; + if (valid & ATTR_GID) + obj->yst_gid = attr->ia_gid; + + if (valid & ATTR_ATIME) + obj->yst_atime = Y_TIME_CONVERT(attr->ia_atime); + if (valid & ATTR_CTIME) + obj->yst_ctime = Y_TIME_CONVERT(attr->ia_ctime); + if (valid & ATTR_MTIME) + obj->yst_mtime = Y_TIME_CONVERT(attr->ia_mtime); + + if (valid & ATTR_SIZE) + yaffs_ResizeFile(obj, attr->ia_size); + + yaffs_UpdateObjectHeader(obj, NULL, 1, 0, 0); + + return YAFFS_OK; + +} +int yaffs_GetAttributes(yaffs_Object *obj, struct iattr *attr) +{ + unsigned int valid = 0; + + attr->ia_mode = obj->yst_mode; + valid |= ATTR_MODE; + attr->ia_uid = obj->yst_uid; + valid |= ATTR_UID; + attr->ia_gid = obj->yst_gid; + valid |= ATTR_GID; + + Y_TIME_CONVERT(attr->ia_atime) = obj->yst_atime; + valid |= ATTR_ATIME; + Y_TIME_CONVERT(attr->ia_ctime) = obj->yst_ctime; + valid |= ATTR_CTIME; + Y_TIME_CONVERT(attr->ia_mtime) = obj->yst_mtime; + valid |= ATTR_MTIME; + + attr->ia_size = yaffs_GetFileSize(obj); + valid |= ATTR_SIZE; + + attr->ia_valid = valid; + + return YAFFS_OK; +} + +#endif + +#if 0 +int yaffs_DumpObject(yaffs_Object *obj) +{ + YCHAR name[257]; + + yaffs_GetObjectName(obj, name, YAFFS_MAX_NAME_LENGTH + 1); + + T(YAFFS_TRACE_ALWAYS, + (TSTR + ("Object %d, inode %d \"%s\"\n dirty %d valid %d serial %d sum %d" + " chunk %d type %d size %d\n" + TENDSTR), obj->objectId, yaffs_GetObjectInode(obj), name, + obj->dirty, obj->valid, obj->serial, obj->sum, obj->hdrChunk, + yaffs_GetObjectType(obj), yaffs_GetObjectFileLength(obj))); + + return YAFFS_OK; +} +#endif + +/*---------------------------- Initialisation code -------------------------------------- */ + +static int yaffs_CheckDevFunctions(const yaffs_Device *dev) +{ + + /* Common functions, gotta have */ + if (!dev->param.eraseBlockInNAND || !dev->param.initialiseNAND) + return 0; + +#ifdef CONFIG_YAFFS_YAFFS2 + + /* Can use the "with tags" style interface for yaffs1 or yaffs2 */ + if (dev->param.writeChunkWithTagsToNAND && + dev->param.readChunkWithTagsFromNAND && + !dev->param.writeChunkToNAND && + !dev->param.readChunkFromNAND && + dev->param.markNANDBlockBad && + dev->param.queryNANDBlock) + return 1; +#endif + + /* Can use the "spare" style interface for yaffs1 */ + if (!dev->param.isYaffs2 && + !dev->param.writeChunkWithTagsToNAND && + !dev->param.readChunkWithTagsFromNAND && + dev->param.writeChunkToNAND && + dev->param.readChunkFromNAND && + !dev->param.markNANDBlockBad && + !dev->param.queryNANDBlock) + return 1; + + return 0; /* bad */ +} + + +static int yaffs_CreateInitialDirectories(yaffs_Device *dev) +{ + /* Initialise the unlinked, deleted, root and lost and found directories */ + + dev->lostNFoundDir = dev->rootDir = NULL; + dev->unlinkedDir = dev->deletedDir = NULL; + + dev->unlinkedDir = + yaffs_CreateFakeDirectory(dev, YAFFS_OBJECTID_UNLINKED, S_IFDIR); + + dev->deletedDir = + yaffs_CreateFakeDirectory(dev, YAFFS_OBJECTID_DELETED, S_IFDIR); + + dev->rootDir = + yaffs_CreateFakeDirectory(dev, YAFFS_OBJECTID_ROOT, + YAFFS_ROOT_MODE | S_IFDIR); + dev->lostNFoundDir = + yaffs_CreateFakeDirectory(dev, YAFFS_OBJECTID_LOSTNFOUND, + YAFFS_LOSTNFOUND_MODE | S_IFDIR); + + if (dev->lostNFoundDir && dev->rootDir && dev->unlinkedDir && dev->deletedDir) { + yaffs_AddObjectToDirectory(dev->rootDir, dev->lostNFoundDir); + return YAFFS_OK; + } + + return YAFFS_FAIL; +} + +int yaffs_GutsInitialise(yaffs_Device *dev) +{ + int init_failed = 0; + unsigned x; + int bits; + + T(YAFFS_TRACE_TRACING, (TSTR("yaffs: yaffs_GutsInitialise()" TENDSTR))); + + /* Check stuff that must be set */ + + if (!dev) { + T(YAFFS_TRACE_ALWAYS, (TSTR("yaffs: Need a device" TENDSTR))); + return YAFFS_FAIL; + } + + dev->internalStartBlock = dev->param.startBlock; + dev->internalEndBlock = dev->param.endBlock; + dev->blockOffset = 0; + dev->chunkOffset = 0; + dev->nFreeChunks = 0; + + dev->gcBlock = 0; + + if (dev->param.startBlock == 0) { + dev->internalStartBlock = dev->param.startBlock + 1; + dev->internalEndBlock = dev->param.endBlock + 1; + dev->blockOffset = 1; + dev->chunkOffset = dev->param.nChunksPerBlock; + } + + /* Check geometry parameters. */ + + if ((!dev->param.inbandTags && dev->param.isYaffs2 && dev->param.totalBytesPerChunk < 1024) || + (!dev->param.isYaffs2 && dev->param.totalBytesPerChunk < 512) || + (dev->param.inbandTags && !dev->param.isYaffs2) || + dev->param.nChunksPerBlock < 2 || + dev->param.nReservedBlocks < 2 || + dev->internalStartBlock <= 0 || + dev->internalEndBlock <= 0 || + dev->internalEndBlock <= (dev->internalStartBlock + dev->param.nReservedBlocks + 2)) { /* otherwise it is too small */ + T(YAFFS_TRACE_ALWAYS, + (TSTR + ("yaffs: NAND geometry problems: chunk size %d, type is yaffs%s, inbandTags %d " + TENDSTR), dev->param.totalBytesPerChunk, dev->param.isYaffs2 ? "2" : "", dev->param.inbandTags)); + return YAFFS_FAIL; + } + + if (yaffs_InitialiseNAND(dev) != YAFFS_OK) { + T(YAFFS_TRACE_ALWAYS, + (TSTR("yaffs: InitialiseNAND failed" TENDSTR))); + return YAFFS_FAIL; + } + + /* Sort out space for inband tags, if required */ + if (dev->param.inbandTags) + dev->nDataBytesPerChunk = dev->param.totalBytesPerChunk - sizeof(yaffs_PackedTags2TagsPart); + else + dev->nDataBytesPerChunk = dev->param.totalBytesPerChunk; + + /* Got the right mix of functions? */ + if (!yaffs_CheckDevFunctions(dev)) { + /* Function missing */ + T(YAFFS_TRACE_ALWAYS, + (TSTR + ("yaffs: device function(s) missing or wrong\n" TENDSTR))); + + return YAFFS_FAIL; + } + + /* This is really a compilation check. */ + if (!yaffs_CheckStructures()) { + T(YAFFS_TRACE_ALWAYS, + (TSTR("yaffs_CheckStructures failed\n" TENDSTR))); + return YAFFS_FAIL; + } + + if (dev->isMounted) { + T(YAFFS_TRACE_ALWAYS, + (TSTR("yaffs: device already mounted\n" TENDSTR))); + return YAFFS_FAIL; + } + + /* Finished with most checks. One or two more checks happen later on too. */ + + dev->isMounted = 1; + + /* OK now calculate a few things for the device */ + + /* + * Calculate all the chunk size manipulation numbers: + */ + x = dev->nDataBytesPerChunk; + /* We always use dev->chunkShift and dev->chunkDiv */ + dev->chunkShift = Shifts(x); + x >>= dev->chunkShift; + dev->chunkDiv = x; + /* We only use chunk mask if chunkDiv is 1 */ + dev->chunkMask = (1<<dev->chunkShift) - 1; + + /* + * Calculate chunkGroupBits. + * We need to find the next power of 2 > than internalEndBlock + */ + + x = dev->param.nChunksPerBlock * (dev->internalEndBlock + 1); + + bits = ShiftsGE(x); + + /* Set up tnode width if wide tnodes are enabled. */ + if (!dev->param.wideTnodesDisabled) { + /* bits must be even so that we end up with 32-bit words */ + if (bits & 1) + bits++; + if (bits < 16) + dev->tnodeWidth = 16; + else + dev->tnodeWidth = bits; + } else + dev->tnodeWidth = 16; + + dev->tnodeMask = (1<<dev->tnodeWidth)-1; + + /* Level0 Tnodes are 16 bits or wider (if wide tnodes are enabled), + * so if the bitwidth of the + * chunk range we're using is greater than 16 we need + * to figure out chunk shift and chunkGroupSize + */ + + if (bits <= dev->tnodeWidth) + dev->chunkGroupBits = 0; + else + dev->chunkGroupBits = bits - dev->tnodeWidth; + + + dev->chunkGroupSize = 1 << dev->chunkGroupBits; + + if (dev->param.nChunksPerBlock < dev->chunkGroupSize) { + /* We have a problem because the soft delete won't work if + * the chunk group size > chunks per block. + * This can be remedied by using larger "virtual blocks". + */ + T(YAFFS_TRACE_ALWAYS, + (TSTR("yaffs: chunk group too large\n" TENDSTR))); + + return YAFFS_FAIL; + } + + /* OK, we've finished verifying the device, lets continue with initialisation */ + + /* More device initialisation */ + dev->allGCs = 0; + dev->passiveGCs = 0; + dev->oldestDirtyGCs = 0; + dev->backgroundGCs = 0; + dev->gcBlockFinder = 0; + dev->bufferedBlock = -1; + dev->doingBufferedBlockRewrite = 0; + dev->nDeletedFiles = 0; + dev->nBackgroundDeletions = 0; + dev->nUnlinkedFiles = 0; + dev->eccFixed = 0; + dev->eccUnfixed = 0; + dev->tagsEccFixed = 0; + dev->tagsEccUnfixed = 0; + dev->nErasureFailures = 0; + dev->nErasedBlocks = 0; + dev->gcDisable= 0; + dev->hasPendingPrioritisedGCs = 1; /* Assume the worst for now, will get fixed on first GC */ + YINIT_LIST_HEAD(&dev->dirtyDirectories); + dev->oldestDirtySequence = 0; + dev->oldestDirtyBlock = 0; + + /* Initialise temporary buffers and caches. */ + if (!yaffs_InitialiseTempBuffers(dev)) + init_failed = 1; + + dev->srCache = NULL; + dev->gcCleanupList = NULL; + + + if (!init_failed && + dev->param.nShortOpCaches > 0) { + int i; + void *buf; + int srCacheBytes = dev->param.nShortOpCaches * sizeof(yaffs_ChunkCache); + + if (dev->param.nShortOpCaches > YAFFS_MAX_SHORT_OP_CACHES) + dev->param.nShortOpCaches = YAFFS_MAX_SHORT_OP_CACHES; + + dev->srCache = YMALLOC(srCacheBytes); + + buf = (__u8 *) dev->srCache; + + if (dev->srCache) + memset(dev->srCache, 0, srCacheBytes); + + for (i = 0; i < dev->param.nShortOpCaches && buf; i++) { + dev->srCache[i].object = NULL; + dev->srCache[i].lastUse = 0; + dev->srCache[i].dirty = 0; + dev->srCache[i].data = buf = YMALLOC_DMA(dev->param.totalBytesPerChunk); + } + if (!buf) + init_failed = 1; + + dev->srLastUse = 0; + } + + dev->cacheHits = 0; + + if (!init_failed) { + dev->gcCleanupList = YMALLOC(dev->param.nChunksPerBlock * sizeof(__u32)); + if (!dev->gcCleanupList) + init_failed = 1; + } + + if (dev->param.isYaffs2) + dev->param.useHeaderFileSize = 1; + + if (!init_failed && !yaffs_InitialiseBlocks(dev)) + init_failed = 1; + + yaffs_InitialiseTnodes(dev); + yaffs_InitialiseObjects(dev); + + if (!init_failed && !yaffs_CreateInitialDirectories(dev)) + init_failed = 1; + + + if (!init_failed) { + /* Now scan the flash. */ + if (dev->param.isYaffs2) { + if (yaffs_CheckpointRestore(dev)) { + yaffs_CheckObjectDetailsLoaded(dev->rootDir); + T(YAFFS_TRACE_ALWAYS, + (TSTR("yaffs: restored from checkpoint" TENDSTR))); + } else { + + /* Clean up the mess caused by an aborted checkpoint load + * and scan backwards. + */ + yaffs_DeinitialiseBlocks(dev); + yaffs_DeinitialiseTnodes(dev); + yaffs_DeinitialiseObjects(dev); + + + dev->nErasedBlocks = 0; + dev->nFreeChunks = 0; + dev->allocationBlock = -1; + dev->allocationPage = -1; + dev->nDeletedFiles = 0; + dev->nUnlinkedFiles = 0; + dev->nBackgroundDeletions = 0; + + if (!init_failed && !yaffs_InitialiseBlocks(dev)) + init_failed = 1; + + yaffs_InitialiseTnodes(dev); + yaffs_InitialiseObjects(dev); + + if (!init_failed && !yaffs_CreateInitialDirectories(dev)) + init_failed = 1; + + if (!init_failed && !yaffs_ScanBackwards(dev)) + init_failed = 1; + } + } else if (!yaffs_Scan(dev)) + init_failed = 1; + + yaffs_StripDeletedObjects(dev); + yaffs_FixHangingObjects(dev); + if(dev->param.emptyLostAndFound) + yaffs_EmptyLostAndFound(dev); + } + + if (init_failed) { + /* Clean up the mess */ + T(YAFFS_TRACE_TRACING, + (TSTR("yaffs: yaffs_GutsInitialise() aborted.\n" TENDSTR))); + + yaffs_Deinitialise(dev); + return YAFFS_FAIL; + } + + /* Zero out stats */ + dev->nPageReads = 0; + dev->nPageWrites = 0; + dev->nBlockErasures = 0; + dev->nGCCopies = 0; + dev->nRetriedWrites = 0; + + dev->nRetiredBlocks = 0; + + yaffs_VerifyFreeChunks(dev); + yaffs_VerifyBlocks(dev); + + /* Clean up any aborted checkpoint data */ + if(!dev->isCheckpointed && dev->blocksInCheckpoint > 0) + yaffs_InvalidateCheckpoint(dev); + + T(YAFFS_TRACE_TRACING, + (TSTR("yaffs: yaffs_GutsInitialise() done.\n" TENDSTR))); + return YAFFS_OK; + +} + +void yaffs_Deinitialise(yaffs_Device *dev) +{ + if (dev->isMounted) { + int i; + + yaffs_DeinitialiseBlocks(dev); + yaffs_DeinitialiseTnodes(dev); + yaffs_DeinitialiseObjects(dev); + if (dev->param.nShortOpCaches > 0 && + dev->srCache) { + + for (i = 0; i < dev->param.nShortOpCaches; i++) { + if (dev->srCache[i].data) + YFREE(dev->srCache[i].data); + dev->srCache[i].data = NULL; + } + + YFREE(dev->srCache); + dev->srCache = NULL; + } + + YFREE(dev->gcCleanupList); + + for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) + YFREE(dev->tempBuffer[i].buffer); + + dev->isMounted = 0; + + if (dev->param.deinitialiseNAND) + dev->param.deinitialiseNAND(dev); + } +} + +static int yaffs_CountFreeChunks(yaffs_Device *dev) +{ + int nFree=0; + int b; + + yaffs_BlockInfo *blk; + + blk = dev->blockInfo; + for (b = dev->internalStartBlock; b <= dev->internalEndBlock; b++) { + switch (blk->blockState) { + case YAFFS_BLOCK_STATE_EMPTY: + case YAFFS_BLOCK_STATE_ALLOCATING: + case YAFFS_BLOCK_STATE_COLLECTING: + case YAFFS_BLOCK_STATE_FULL: + nFree += + (dev->param.nChunksPerBlock - blk->pagesInUse + + blk->softDeletions); + break; + default: + break; + } + blk++; + } + + return nFree; +} + +int yaffs_GetNumberOfFreeChunks(yaffs_Device *dev) +{ + /* This is what we report to the outside world */ + + int nFree; + int nDirtyCacheChunks; + int blocksForCheckpoint; + int i; + +#if 1 + nFree = dev->nFreeChunks; +#else + nFree = yaffs_CountFreeChunks(dev); +#endif + + nFree += dev->nDeletedFiles; + + /* Now count the number of dirty chunks in the cache and subtract those */ + + for (nDirtyCacheChunks = 0, i = 0; i < dev->param.nShortOpCaches; i++) { + if (dev->srCache[i].dirty) + nDirtyCacheChunks++; + } + + nFree -= nDirtyCacheChunks; + + nFree -= ((dev->param.nReservedBlocks + 1) * dev->param.nChunksPerBlock); + + /* Now we figure out how much to reserve for the checkpoint and report that... */ + blocksForCheckpoint = yaffs_CalcCheckpointBlocksRequired(dev) - dev->blocksInCheckpoint; + if (blocksForCheckpoint < 0) + blocksForCheckpoint = 0; + + nFree -= (blocksForCheckpoint * dev->param.nChunksPerBlock); + + if (nFree < 0) + nFree = 0; + + return nFree; + +} + +static int yaffs_freeVerificationFailures; + +static void yaffs_VerifyFreeChunks(yaffs_Device *dev) +{ + int counted; + int difference; + + if (yaffs_SkipVerification(dev)) + return; + + counted = yaffs_CountFreeChunks(dev); + + difference = dev->nFreeChunks - counted; + + if (difference) { + T(YAFFS_TRACE_ALWAYS, + (TSTR("Freechunks verification failure %d %d %d" TENDSTR), + dev->nFreeChunks, counted, difference)); + yaffs_freeVerificationFailures++; + } +} + +/*---------------------------------------- YAFFS test code ----------------------*/ + +#define yaffs_CheckStruct(structure, syze, name) \ + do { \ + if (sizeof(structure) != syze) { \ + T(YAFFS_TRACE_ALWAYS, (TSTR("%s should be %d but is %d\n" TENDSTR),\ + name, syze, (int) sizeof(structure))); \ + return YAFFS_FAIL; \ + } \ + } while (0) + +static int yaffs_CheckStructures(void) +{ +/* yaffs_CheckStruct(yaffs_Tags,8,"yaffs_Tags"); */ +/* yaffs_CheckStruct(yaffs_TagsUnion,8,"yaffs_TagsUnion"); */ +/* yaffs_CheckStruct(yaffs_Spare,16,"yaffs_Spare"); */ +#ifndef CONFIG_YAFFS_TNODE_LIST_DEBUG +/* yaffs_CheckStruct(yaffs_Tnode, 2 * YAFFS_NTNODES_LEVEL0, "yaffs_Tnode"); */ +#endif +#ifndef CONFIG_YAFFS_WINCE + yaffs_CheckStruct(yaffs_ObjectHeader, 512, "yaffs_ObjectHeader"); +#endif + return YAFFS_OK; +} diff --git a/fs/yaffs2/yaffs_guts.h b/fs/yaffs2/yaffs_guts.h new file mode 100644 index 000000000000..3647d6bce6cd --- /dev/null +++ b/fs/yaffs2/yaffs_guts.h @@ -0,0 +1,919 @@ +/* + * YAFFS: Yet another Flash File System . A NAND-flash specific file system. + * + * Copyright (C) 2002-2010 Aleph One Ltd. + * for Toby Churchill Ltd and Brightstar Engineering + * + * Created by Charles Manning <charles@aleph1.co.uk> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License version 2.1 as + * published by the Free Software Foundation. + * + * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL. + */ + +#ifndef __YAFFS_GUTS_H__ +#define __YAFFS_GUTS_H__ + +#include "devextras.h" +#include "yportenv.h" + +#define YAFFS_OK 1 +#define YAFFS_FAIL 0 + +/* Give us a Y=0x59, + * Give us an A=0x41, + * Give us an FF=0xFF + * Give us an S=0x53 + * And what have we got... + */ +#define YAFFS_MAGIC 0x5941FF53 + +#define YAFFS_NTNODES_LEVEL0 16 +#define YAFFS_TNODES_LEVEL0_BITS 4 +#define YAFFS_TNODES_LEVEL0_MASK 0xf + +#define YAFFS_NTNODES_INTERNAL (YAFFS_NTNODES_LEVEL0 / 2) +#define YAFFS_TNODES_INTERNAL_BITS (YAFFS_TNODES_LEVEL0_BITS - 1) +#define YAFFS_TNODES_INTERNAL_MASK 0x7 +#define YAFFS_TNODES_MAX_LEVEL 6 + +#ifndef CONFIG_YAFFS_NO_YAFFS1 +#define YAFFS_BYTES_PER_SPARE 16 +#define YAFFS_BYTES_PER_CHUNK 512 +#define YAFFS_CHUNK_SIZE_SHIFT 9 +#define YAFFS_CHUNKS_PER_BLOCK 32 +#define YAFFS_BYTES_PER_BLOCK (YAFFS_CHUNKS_PER_BLOCK*YAFFS_BYTES_PER_CHUNK) +#endif + +#define YAFFS_MIN_YAFFS2_CHUNK_SIZE 1024 +#define YAFFS_MIN_YAFFS2_SPARE_SIZE 32 + +#define YAFFS_MAX_CHUNK_ID 0x000FFFFF + +#define YAFFS_UNUSED_OBJECT_ID 0x0003FFFF + +#define YAFFS_ALLOCATION_NOBJECTS 100 +#define YAFFS_ALLOCATION_NTNODES 100 +#define YAFFS_ALLOCATION_NLINKS 100 + +#define YAFFS_NOBJECT_BUCKETS 256 + + +#define YAFFS_OBJECT_SPACE 0x40000 + +#define YAFFS_CHECKPOINT_VERSION 4 + +#ifdef CONFIG_YAFFS_UNICODE +#define YAFFS_MAX_NAME_LENGTH 127 +#define YAFFS_MAX_ALIAS_LENGTH 79 +#else +#define YAFFS_MAX_NAME_LENGTH 255 +#define YAFFS_MAX_ALIAS_LENGTH 159 +#endif + +#define YAFFS_SHORT_NAME_LENGTH 15 + +/* Some special object ids for pseudo objects */ +#define YAFFS_OBJECTID_ROOT 1 +#define YAFFS_OBJECTID_LOSTNFOUND 2 +#define YAFFS_OBJECTID_UNLINKED 3 +#define YAFFS_OBJECTID_DELETED 4 + +/* Pseudo object ids for checkpointing */ +#define YAFFS_OBJECTID_SB_HEADER 0x10 +#define YAFFS_OBJECTID_CHECKPOINT_DATA 0x20 +#define YAFFS_SEQUENCE_CHECKPOINT_DATA 0x21 + + +#define YAFFS_MAX_SHORT_OP_CACHES 20 + +#define YAFFS_N_TEMP_BUFFERS 6 + +/* We limit the number attempts at sucessfully saving a chunk of data. + * Small-page devices have 32 pages per block; large-page devices have 64. + * Default to something in the order of 5 to 10 blocks worth of chunks. + */ +#define YAFFS_WR_ATTEMPTS (5*64) + +/* Sequence numbers are used in YAFFS2 to determine block allocation order. + * The range is limited slightly to help distinguish bad numbers from good. + * This also allows us to perhaps in the future use special numbers for + * special purposes. + * EFFFFF00 allows the allocation of 8 blocks per second (~1Mbytes) for 15 years, + * and is a larger number than the lifetime of a 2GB device. + */ +#define YAFFS_LOWEST_SEQUENCE_NUMBER 0x00001000 +#define YAFFS_HIGHEST_SEQUENCE_NUMBER 0xEFFFFF00 + +/* Special sequence number for bad block that failed to be marked bad */ +#define YAFFS_SEQUENCE_BAD_BLOCK 0xFFFF0000 + +/* ChunkCache is used for short read/write operations.*/ +typedef struct { + struct yaffs_ObjectStruct *object; + int chunkId; + int lastUse; + int dirty; + int nBytes; /* Only valid if the cache is dirty */ + int locked; /* Can't push out or flush while locked. */ + __u8 *data; +} yaffs_ChunkCache; + + + +/* Tags structures in RAM + * NB This uses bitfield. Bitfields should not straddle a u32 boundary otherwise + * the structure size will get blown out. + */ + +#ifndef CONFIG_YAFFS_NO_YAFFS1 +typedef struct { + unsigned chunkId:20; + unsigned serialNumber:2; + unsigned byteCountLSB:10; + unsigned objectId:18; + unsigned ecc:12; + unsigned byteCountMSB:2; +} yaffs_Tags; + +typedef union { + yaffs_Tags asTags; + __u8 asBytes[8]; +} yaffs_TagsUnion; + +#endif + +/* Stuff used for extended tags in YAFFS2 */ + +typedef enum { + YAFFS_ECC_RESULT_UNKNOWN, + YAFFS_ECC_RESULT_NO_ERROR, + YAFFS_ECC_RESULT_FIXED, + YAFFS_ECC_RESULT_UNFIXED +} yaffs_ECCResult; + +typedef enum { + YAFFS_OBJECT_TYPE_UNKNOWN, + YAFFS_OBJECT_TYPE_FILE, + YAFFS_OBJECT_TYPE_SYMLINK, + YAFFS_OBJECT_TYPE_DIRECTORY, + YAFFS_OBJECT_TYPE_HARDLINK, + YAFFS_OBJECT_TYPE_SPECIAL +} yaffs_ObjectType; + +#define YAFFS_OBJECT_TYPE_MAX YAFFS_OBJECT_TYPE_SPECIAL + +typedef struct { + + unsigned validMarker0; + unsigned chunkUsed; /* Status of the chunk: used or unused */ + unsigned objectId; /* If 0 then this is not part of an object (unused) */ + unsigned chunkId; /* If 0 then this is a header, else a data chunk */ + unsigned byteCount; /* Only valid for data chunks */ + + /* The following stuff only has meaning when we read */ + yaffs_ECCResult eccResult; + unsigned blockBad; + + /* YAFFS 1 stuff */ + unsigned chunkDeleted; /* The chunk is marked deleted */ + unsigned serialNumber; /* Yaffs1 2-bit serial number */ + + /* YAFFS2 stuff */ + unsigned sequenceNumber; /* The sequence number of this block */ + + /* Extra info if this is an object header (YAFFS2 only) */ + + unsigned extraHeaderInfoAvailable; /* There is extra info available if this is not zero */ + unsigned extraParentObjectId; /* The parent object */ + unsigned extraIsShrinkHeader; /* Is it a shrink header? */ + unsigned extraShadows; /* Does this shadow another object? */ + + yaffs_ObjectType extraObjectType; /* What object type? */ + + unsigned extraFileLength; /* Length if it is a file */ + unsigned extraEquivalentObjectId; /* Equivalent object Id if it is a hard link */ + + unsigned validMarker1; + +} yaffs_ExtendedTags; + +/* Spare structure for YAFFS1 */ +typedef struct { + __u8 tagByte0; + __u8 tagByte1; + __u8 tagByte2; + __u8 tagByte3; + __u8 pageStatus; /* set to 0 to delete the chunk */ + __u8 blockStatus; + __u8 tagByte4; + __u8 tagByte5; + __u8 ecc1[3]; + __u8 tagByte6; + __u8 tagByte7; + __u8 ecc2[3]; +} yaffs_Spare; + +/*Special structure for passing through to mtd */ +struct yaffs_NANDSpare { + yaffs_Spare spare; + int eccres1; + int eccres2; +}; + +/* Block data in RAM */ + +typedef enum { + YAFFS_BLOCK_STATE_UNKNOWN = 0, + + YAFFS_BLOCK_STATE_SCANNING, + /* Being scanned */ + + YAFFS_BLOCK_STATE_NEEDS_SCANNING, + /* The block might have something on it (ie it is allocating or full, perhaps empty) + * but it needs to be scanned to determine its true state. + * This state is only valid during yaffs_Scan. + * NB We tolerate empty because the pre-scanner might be incapable of deciding + * However, if this state is returned on a YAFFS2 device, then we expect a sequence number + */ + + YAFFS_BLOCK_STATE_EMPTY, + /* This block is empty */ + + YAFFS_BLOCK_STATE_ALLOCATING, + /* This block is partially allocated. + * At least one page holds valid data. + * This is the one currently being used for page + * allocation. Should never be more than one of these. + * If a block is only partially allocated at mount it is treated as full. + */ + + YAFFS_BLOCK_STATE_FULL, + /* All the pages in this block have been allocated. + * If a block was only partially allocated when mounted we treat + * it as fully allocated. + */ + + YAFFS_BLOCK_STATE_DIRTY, + /* The block was full and now all chunks have been deleted. + * Erase me, reuse me. + */ + + YAFFS_BLOCK_STATE_CHECKPOINT, + /* This block is assigned to holding checkpoint data. */ + + YAFFS_BLOCK_STATE_COLLECTING, + /* This block is being garbage collected */ + + YAFFS_BLOCK_STATE_DEAD + /* This block has failed and is not in use */ +} yaffs_BlockState; + +#define YAFFS_NUMBER_OF_BLOCK_STATES (YAFFS_BLOCK_STATE_DEAD + 1) + + +typedef struct { + + int softDeletions:10; /* number of soft deleted pages */ + int pagesInUse:10; /* number of pages in use */ + unsigned blockState:4; /* One of the above block states. NB use unsigned because enum is sometimes an int */ + __u32 needsRetiring:1; /* Data has failed on this block, need to get valid data off */ + /* and retire the block. */ + __u32 skipErasedCheck:1; /* If this is set we can skip the erased check on this block */ + __u32 gcPrioritise:1; /* An ECC check or blank check has failed on this block. + It should be prioritised for GC */ + __u32 chunkErrorStrikes:3; /* How many times we've had ecc etc failures on this block and tried to reuse it */ + +#ifdef CONFIG_YAFFS_YAFFS2 + __u32 hasShrinkHeader:1; /* This block has at least one shrink object header */ + __u32 sequenceNumber; /* block sequence number for yaffs2 */ +#endif + +} yaffs_BlockInfo; + +/* -------------------------- Object structure -------------------------------*/ +/* This is the object structure as stored on NAND */ + +typedef struct { + yaffs_ObjectType type; + + /* Apply to everything */ + int parentObjectId; + __u16 sum__NoLongerUsed; /* checksum of name. No longer used */ + YCHAR name[YAFFS_MAX_NAME_LENGTH + 1]; + + /* The following apply to directories, files, symlinks - not hard links */ + __u32 yst_mode; /* protection */ + +#ifdef CONFIG_YAFFS_WINCE + __u32 notForWinCE[5]; +#else + __u32 yst_uid; + __u32 yst_gid; + __u32 yst_atime; + __u32 yst_mtime; + __u32 yst_ctime; +#endif + + /* File size applies to files only */ + int fileSize; + + /* Equivalent object id applies to hard links only. */ + int equivalentObjectId; + + /* Alias is for symlinks only. */ + YCHAR alias[YAFFS_MAX_ALIAS_LENGTH + 1]; + + __u32 yst_rdev; /* device stuff for block and char devices (major/min) */ + +#ifdef CONFIG_YAFFS_WINCE + __u32 win_ctime[2]; + __u32 win_atime[2]; + __u32 win_mtime[2]; +#else + __u32 roomToGrow[6]; + +#endif + __u32 inbandShadowsObject; + __u32 inbandIsShrink; + + __u32 reservedSpace[2]; + int shadowsObject; /* This object header shadows the specified object if > 0 */ + + /* isShrink applies to object headers written when we shrink the file (ie resize) */ + __u32 isShrink; + +} yaffs_ObjectHeader; + +/*--------------------------- Tnode -------------------------- */ + +union yaffs_Tnode_union { +#ifdef CONFIG_YAFFS_TNODE_LIST_DEBUG + union yaffs_Tnode_union *internal[YAFFS_NTNODES_INTERNAL + 1]; +#else + union yaffs_Tnode_union *internal[YAFFS_NTNODES_INTERNAL]; +#endif +/* __u16 level0[YAFFS_NTNODES_LEVEL0]; */ + +}; + +typedef union yaffs_Tnode_union yaffs_Tnode; + +struct yaffs_TnodeList_struct { + struct yaffs_TnodeList_struct *next; + yaffs_Tnode *tnodes; +}; + +typedef struct yaffs_TnodeList_struct yaffs_TnodeList; + +/*------------------------ Object -----------------------------*/ +/* An object can be one of: + * - a directory (no data, has children links + * - a regular file (data.... not prunes :->). + * - a symlink [symbolic link] (the alias). + * - a hard link + */ + +typedef struct { + __u32 fileSize; + __u32 scannedFileSize; + __u32 shrinkSize; + int topLevel; + yaffs_Tnode *top; +} yaffs_FileStructure; + +typedef struct { + struct ylist_head children; /* list of child links */ + struct ylist_head dirty; /* Entry for list of dirty directories */ +} yaffs_DirectoryStructure; + +typedef struct { + YCHAR *alias; +} yaffs_SymLinkStructure; + +typedef struct { + struct yaffs_ObjectStruct *equivalentObject; + __u32 equivalentObjectId; +} yaffs_HardLinkStructure; + +typedef union { + yaffs_FileStructure fileVariant; + yaffs_DirectoryStructure directoryVariant; + yaffs_SymLinkStructure symLinkVariant; + yaffs_HardLinkStructure hardLinkVariant; +} yaffs_ObjectVariant; + + + +struct yaffs_ObjectStruct { + __u8 deleted:1; /* This should only apply to unlinked files. */ + __u8 softDeleted:1; /* it has also been soft deleted */ + __u8 unlinked:1; /* An unlinked file. The file should be in the unlinked directory.*/ + __u8 fake:1; /* A fake object has no presence on NAND. */ + __u8 renameAllowed:1; /* Some objects are not allowed to be renamed. */ + __u8 unlinkAllowed:1; + __u8 dirty:1; /* the object needs to be written to flash */ + __u8 valid:1; /* When the file system is being loaded up, this + * object might be created before the data + * is available (ie. file data records appear before the header). + */ + __u8 lazyLoaded:1; /* This object has been lazy loaded and is missing some detail */ + + __u8 deferedFree:1; /* For Linux kernel. Object is removed from NAND, but is + * still in the inode cache. Free of object is defered. + * until the inode is released. + */ + __u8 beingCreated:1; /* This object is still being created so skip some checks. */ + __u8 isShadowed:1; /* This object is shadowed on the way to being renamed. */ + + __u8 serial; /* serial number of chunk in NAND. Cached here */ + __u16 sum; /* sum of the name to speed searching */ + + struct yaffs_DeviceStruct *myDev; /* The device I'm on */ + + struct ylist_head hashLink; /* list of objects in this hash bucket */ + + struct ylist_head hardLinks; /* all the equivalent hard linked objects */ + + /* directory structure stuff */ + /* also used for linking up the free list */ + struct yaffs_ObjectStruct *parent; + struct ylist_head siblings; + + /* Where's my object header in NAND? */ + int hdrChunk; + + int nDataChunks; /* Number of data chunks attached to the file. */ + + __u32 objectId; /* the object id value */ + + __u32 yst_mode; + +#ifdef CONFIG_YAFFS_SHORT_NAMES_IN_RAM + YCHAR shortName[YAFFS_SHORT_NAME_LENGTH + 1]; +#endif + +#ifdef CONFIG_YAFFS_WINCE + __u32 win_ctime[2]; + __u32 win_mtime[2]; + __u32 win_atime[2]; +#else + __u32 yst_uid; + __u32 yst_gid; + __u32 yst_atime; + __u32 yst_mtime; + __u32 yst_ctime; +#endif + + __u32 yst_rdev; + + void *myInode; + + yaffs_ObjectType variantType; + + yaffs_ObjectVariant variant; + +}; + +typedef struct yaffs_ObjectStruct yaffs_Object; + +struct yaffs_ObjectList_struct { + yaffs_Object *objects; + struct yaffs_ObjectList_struct *next; +}; + +typedef struct yaffs_ObjectList_struct yaffs_ObjectList; + +typedef struct { + struct ylist_head list; + int count; +} yaffs_ObjectBucket; + + +/* yaffs_CheckpointObject holds the definition of an object as dumped + * by checkpointing. + */ + +typedef struct { + int structType; + __u32 objectId; + __u32 parentId; + int hdrChunk; + yaffs_ObjectType variantType:3; + __u8 deleted:1; + __u8 softDeleted:1; + __u8 unlinked:1; + __u8 fake:1; + __u8 renameAllowed:1; + __u8 unlinkAllowed:1; + __u8 serial; + + int nDataChunks; + __u32 fileSizeOrEquivalentObjectId; +} yaffs_CheckpointObject; + +/*--------------------- Temporary buffers ---------------- + * + * These are chunk-sized working buffers. Each device has a few + */ + +typedef struct { + __u8 *buffer; + int line; /* track from whence this buffer was allocated */ + int maxLine; +} yaffs_TempBuffer; + +/*----------------- Device ---------------------------------*/ + + +struct yaffs_DeviceParamStruct { + const char *name; + + /* + * Entry parameters set up way early. Yaffs sets up the rest. + * The structure should be zeroed out before use so that unused + * and defualt values are zero. + */ + + int inbandTags; /* Use unband tags */ + __u32 totalBytesPerChunk; /* Should be >= 512, does not need to be a power of 2 */ + int nChunksPerBlock; /* does not need to be a power of 2 */ + int spareBytesPerChunk; /* spare area size */ + int startBlock; /* Start block we're allowed to use */ + int endBlock; /* End block we're allowed to use */ + int nReservedBlocks; /* We want this tuneable so that we can reduce */ + /* reserved blocks on NOR and RAM. */ + + + int nShortOpCaches; /* If <= 0, then short op caching is disabled, else + * the number of short op caches (don't use too many). + * 10 to 20 is a good bet. + */ + int useNANDECC; /* Flag to decide whether or not to use NANDECC on data (yaffs1) */ + int noTagsECC; /* Flag to decide whether or not to do ECC on packed tags (yaffs2) */ + + int isYaffs2; /* Use yaffs2 mode on this device */ + + int emptyLostAndFound; /* Auto-empty lost+found directory on mount */ + + int refreshPeriod; /* How often we should check to do a block refresh */ + + /* Checkpoint control. Can be set before or after initialisation */ + __u8 skipCheckpointRead; + __u8 skipCheckpointWrite; + + /* NAND access functions (Must be set before calling YAFFS)*/ + + int (*writeChunkToNAND) (struct yaffs_DeviceStruct *dev, + int chunkInNAND, const __u8 *data, + const yaffs_Spare *spare); + int (*readChunkFromNAND) (struct yaffs_DeviceStruct *dev, + int chunkInNAND, __u8 *data, + yaffs_Spare *spare); + int (*eraseBlockInNAND) (struct yaffs_DeviceStruct *dev, + int blockInNAND); + int (*initialiseNAND) (struct yaffs_DeviceStruct *dev); + int (*deinitialiseNAND) (struct yaffs_DeviceStruct *dev); + +#ifdef CONFIG_YAFFS_YAFFS2 + int (*writeChunkWithTagsToNAND) (struct yaffs_DeviceStruct *dev, + int chunkInNAND, const __u8 *data, + const yaffs_ExtendedTags *tags); + int (*readChunkWithTagsFromNAND) (struct yaffs_DeviceStruct *dev, + int chunkInNAND, __u8 *data, + yaffs_ExtendedTags *tags); + int (*markNANDBlockBad) (struct yaffs_DeviceStruct *dev, int blockNo); + int (*queryNANDBlock) (struct yaffs_DeviceStruct *dev, int blockNo, + yaffs_BlockState *state, __u32 *sequenceNumber); +#endif + + /* The removeObjectCallback function must be supplied by OS flavours that + * need it. + * yaffs direct uses it to implement the faster readdir. + * Linux uses it to protect the directory during unlocking. + */ + void (*removeObjectCallback)(struct yaffs_ObjectStruct *obj); + + /* Callback to mark the superblock dirty */ + void (*markSuperBlockDirty)(struct yaffs_DeviceStruct *dev); + + /* Callback to control garbage collection. */ + unsigned (*gcControl)(struct yaffs_DeviceStruct *dev); + + /* Debug control flags. Don't use unless you know what you're doing */ + int useHeaderFileSize; /* Flag to determine if we should use file sizes from the header */ + int disableLazyLoad; /* Disable lazy loading on this device */ + int wideTnodesDisabled; /* Set to disable wide tnodes */ + int disableSoftDelete; /* yaffs 1 only: Set to disable the use of softdeletion. */ + + int deferDirectoryUpdate; /* Set to defer directory updates */ + +}; + +typedef struct yaffs_DeviceParamStruct yaffs_DeviceParam; + +struct yaffs_DeviceStruct { + struct yaffs_DeviceParamStruct param; + + /* Context storage. Holds extra OS specific data for this device */ + + void *context; + + /* Runtime parameters. Set up by YAFFS. */ + int nDataBytesPerChunk; + + /* Non-wide tnode stuff */ + __u16 chunkGroupBits; /* Number of bits that need to be resolved if + * the tnodes are not wide enough. + */ + __u16 chunkGroupSize; /* == 2^^chunkGroupBits */ + + /* Stuff to support wide tnodes */ + __u32 tnodeWidth; + __u32 tnodeMask; + + /* Stuff for figuring out file offset to chunk conversions */ + __u32 chunkShift; /* Shift value */ + __u32 chunkDiv; /* Divisor after shifting: 1 for power-of-2 sizes */ + __u32 chunkMask; /* Mask to use for power-of-2 case */ + + + + int isMounted; + int readOnly; + int isCheckpointed; + + + /* Stuff to support block offsetting to support start block zero */ + int internalStartBlock; + int internalEndBlock; + int blockOffset; + int chunkOffset; + + + /* Runtime checkpointing stuff */ + int checkpointPageSequence; /* running sequence number of checkpoint pages */ + int checkpointByteCount; + int checkpointByteOffset; + __u8 *checkpointBuffer; + int checkpointOpenForWrite; + int blocksInCheckpoint; + int checkpointCurrentChunk; + int checkpointCurrentBlock; + int checkpointNextBlock; + int *checkpointBlockList; + int checkpointMaxBlocks; + __u32 checkpointSum; + __u32 checkpointXor; + + int nCheckpointBlocksRequired; /* Number of blocks needed to store current checkpoint set */ + + /* Block Info */ + yaffs_BlockInfo *blockInfo; + __u8 *chunkBits; /* bitmap of chunks in use */ + unsigned blockInfoAlt:1; /* was allocated using alternative strategy */ + unsigned chunkBitsAlt:1; /* was allocated using alternative strategy */ + int chunkBitmapStride; /* Number of bytes of chunkBits per block. + * Must be consistent with nChunksPerBlock. + */ + + int nErasedBlocks; + int allocationBlock; /* Current block being allocated off */ + __u32 allocationPage; + int allocationBlockFinder; /* Used to search for next allocation block */ + + int nTnodesCreated; + yaffs_Tnode *freeTnodes; + int nFreeTnodes; + yaffs_TnodeList *allocatedTnodeList; + + int nObjectsCreated; + yaffs_Object *freeObjects; + int nFreeObjects; + + int nHardLinks; + + yaffs_ObjectList *allocatedObjectList; + + yaffs_ObjectBucket objectBucket[YAFFS_NOBJECT_BUCKETS]; + __u32 bucketFinder; + + int nFreeChunks; + + /* Garbage collection control */ + __u32 *gcCleanupList; /* objects to delete at the end of a GC. */ + + unsigned hasPendingPrioritisedGCs; /* We think this device might have pending prioritised gcs */ + unsigned gcDisable; + unsigned gcBlockFinder; + unsigned gcDirtiest; + unsigned gcPagesInUse; + unsigned gcNotDone; + unsigned gcBlock; + unsigned gcChunk; + unsigned gcSkip; + + /* Special directories */ + yaffs_Object *rootDir; + yaffs_Object *lostNFoundDir; + + /* Buffer areas for storing data to recover from write failures TODO + * __u8 bufferedData[YAFFS_CHUNKS_PER_BLOCK][YAFFS_BYTES_PER_CHUNK]; + * yaffs_Spare bufferedSpare[YAFFS_CHUNKS_PER_BLOCK]; + */ + + int bufferedBlock; /* Which block is buffered here? */ + int doingBufferedBlockRewrite; + + yaffs_ChunkCache *srCache; + int srLastUse; + + /* Stuff for background deletion and unlinked files.*/ + yaffs_Object *unlinkedDir; /* Directory where unlinked and deleted files live. */ + yaffs_Object *deletedDir; /* Directory where deleted objects are sent to disappear. */ + yaffs_Object *unlinkedDeletion; /* Current file being background deleted.*/ + int nDeletedFiles; /* Count of files awaiting deletion;*/ + int nUnlinkedFiles; /* Count of unlinked files. */ + int nBackgroundDeletions; /* Count of background deletions. */ + + /* Temporary buffer management */ + yaffs_TempBuffer tempBuffer[YAFFS_N_TEMP_BUFFERS]; + int maxTemp; + int tempInUse; + int unmanagedTempAllocations; + int unmanagedTempDeallocations; + + /* yaffs2 runtime stuff */ + unsigned sequenceNumber; /* Sequence number of currently allocating block */ + unsigned oldestDirtySequence; + unsigned oldestDirtyBlock; + + /* Block refreshing */ + int refreshSkip; /* A skip down counter. Refresh happens when this gets to zero. */ + + /* Dirty directory handling */ + struct ylist_head dirtyDirectories; /* List of dirty directories */ + + + /* Statistcs */ + __u32 nPageWrites; + __u32 nPageReads; + __u32 nBlockErasures; + __u32 nErasureFailures; + __u32 nGCCopies; + __u32 allGCs; + __u32 passiveGCs; + __u32 oldestDirtyGCs; + __u32 backgroundGCs; + __u32 nRetriedWrites; + __u32 nRetiredBlocks; + __u32 eccFixed; + __u32 eccUnfixed; + __u32 tagsEccFixed; + __u32 tagsEccUnfixed; + __u32 nDeletions; + __u32 nUnmarkedDeletions; + __u32 refreshCount; + __u32 cacheHits; + +}; + +typedef struct yaffs_DeviceStruct yaffs_Device; + +/* The static layout of block usage etc is stored in the super block header */ +typedef struct { + int StructType; + int version; + int checkpointStartBlock; + int checkpointEndBlock; + int startBlock; + int endBlock; + int rfu[100]; +} yaffs_SuperBlockHeader; + +/* The CheckpointDevice structure holds the device information that changes at runtime and + * must be preserved over unmount/mount cycles. + */ +typedef struct { + int structType; + int nErasedBlocks; + int allocationBlock; /* Current block being allocated off */ + __u32 allocationPage; + int nFreeChunks; + + int nDeletedFiles; /* Count of files awaiting deletion;*/ + int nUnlinkedFiles; /* Count of unlinked files. */ + int nBackgroundDeletions; /* Count of background deletions. */ + + /* yaffs2 runtime stuff */ + unsigned sequenceNumber; /* Sequence number of currently allocating block */ + +} yaffs_CheckpointDevice; + + +typedef struct { + int structType; + __u32 magic; + __u32 version; + __u32 head; +} yaffs_CheckpointValidity; + + +/*----------------------- YAFFS Functions -----------------------*/ + +int yaffs_GutsInitialise(yaffs_Device *dev); +void yaffs_Deinitialise(yaffs_Device *dev); + +int yaffs_GetNumberOfFreeChunks(yaffs_Device *dev); + +int yaffs_RenameObject(yaffs_Object *oldDir, const YCHAR *oldName, + yaffs_Object *newDir, const YCHAR *newName); + +int yaffs_Unlink(yaffs_Object *dir, const YCHAR *name); +int yaffs_DeleteObject(yaffs_Object *obj); + +int yaffs_GetObjectName(yaffs_Object *obj, YCHAR *name, int buffSize); +int yaffs_GetObjectFileLength(yaffs_Object *obj); +int yaffs_GetObjectInode(yaffs_Object *obj); +unsigned yaffs_GetObjectType(yaffs_Object *obj); +int yaffs_GetObjectLinkCount(yaffs_Object *obj); + +int yaffs_SetAttributes(yaffs_Object *obj, struct iattr *attr); +int yaffs_GetAttributes(yaffs_Object *obj, struct iattr *attr); + +/* File operations */ +int yaffs_ReadDataFromFile(yaffs_Object *obj, __u8 *buffer, loff_t offset, + int nBytes); +int yaffs_WriteDataToFile(yaffs_Object *obj, const __u8 *buffer, loff_t offset, + int nBytes, int writeThrough); +int yaffs_ResizeFile(yaffs_Object *obj, loff_t newSize); + +yaffs_Object *yaffs_MknodFile(yaffs_Object *parent, const YCHAR *name, + __u32 mode, __u32 uid, __u32 gid); + +int yaffs_FlushFile(yaffs_Object *obj, int updateTime, int dataSync); + +/* Flushing and checkpointing */ +void yaffs_FlushEntireDeviceCache(yaffs_Device *dev); + +int yaffs_CheckpointSave(yaffs_Device *dev); +int yaffs_CheckpointRestore(yaffs_Device *dev); + +/* Directory operations */ +yaffs_Object *yaffs_MknodDirectory(yaffs_Object *parent, const YCHAR *name, + __u32 mode, __u32 uid, __u32 gid); +yaffs_Object *yaffs_FindObjectByName(yaffs_Object *theDir, const YCHAR *name); +int yaffs_ApplyToDirectoryChildren(yaffs_Object *theDir, + int (*fn) (yaffs_Object *)); + +yaffs_Object *yaffs_FindObjectByNumber(yaffs_Device *dev, __u32 number); + +/* Link operations */ +yaffs_Object *yaffs_Link(yaffs_Object *parent, const YCHAR *name, + yaffs_Object *equivalentObject); + +yaffs_Object *yaffs_GetEquivalentObject(yaffs_Object *obj); + +/* Symlink operations */ +yaffs_Object *yaffs_MknodSymLink(yaffs_Object *parent, const YCHAR *name, + __u32 mode, __u32 uid, __u32 gid, + const YCHAR *alias); +YCHAR *yaffs_GetSymlinkAlias(yaffs_Object *obj); + +/* Special inodes (fifos, sockets and devices) */ +yaffs_Object *yaffs_MknodSpecial(yaffs_Object *parent, const YCHAR *name, + __u32 mode, __u32 uid, __u32 gid, __u32 rdev); + +/* Special directories */ +yaffs_Object *yaffs_Root(yaffs_Device *dev); +yaffs_Object *yaffs_LostNFound(yaffs_Device *dev); + +#ifdef CONFIG_YAFFS_WINCE +/* CONFIG_YAFFS_WINCE special stuff */ +void yfsd_WinFileTimeNow(__u32 target[2]); +#endif + +void yaffs_HandleDeferedFree(yaffs_Object *obj); + +void yaffs_UpdateDirtyDirectories(yaffs_Device *dev); + +int yaffs_BackgroundGarbageCollect(yaffs_Device *dev, unsigned urgency); + +/* Debug dump */ +int yaffs_DumpObject(yaffs_Object *obj); + +void yaffs_GutsTest(yaffs_Device *dev); + +/* A few useful functions */ +void yaffs_InitialiseTags(yaffs_ExtendedTags *tags); +void yaffs_DeleteChunk(yaffs_Device *dev, int chunkId, int markNAND, int lyn); +int yaffs_CheckFF(__u8 *buffer, int nBytes); +void yaffs_HandleChunkError(yaffs_Device *dev, yaffs_BlockInfo *bi); + +__u8 *yaffs_GetTempBuffer(yaffs_Device *dev, int lineNo); +void yaffs_ReleaseTempBuffer(yaffs_Device *dev, __u8 *buffer, int lineNo); + + +#endif diff --git a/fs/yaffs2/yaffs_linux.h b/fs/yaffs2/yaffs_linux.h new file mode 100644 index 000000000000..4434e6c497b9 --- /dev/null +++ b/fs/yaffs2/yaffs_linux.h @@ -0,0 +1,42 @@ +/* + * YAFFS: Yet another Flash File System . A NAND-flash specific file system. + * + * Copyright (C) 2002-2010 Aleph One Ltd. + * for Toby Churchill Ltd and Brightstar Engineering + * + * Created by Charles Manning <charles@aleph1.co.uk> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License version 2.1 as + * published by the Free Software Foundation. + * + * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL. + */ + +#ifndef __YAFFS_LINUX_H__ +#define __YAFFS_LINUX_H__ + +#include "devextras.h" +#include "yportenv.h" + +struct yaffs_LinuxContext { + struct ylist_head contextList; /* List of these we have mounted */ + struct yaffs_DeviceStruct *dev; + struct super_block * superBlock; + struct task_struct *bgThread; /* Background thread for this device */ + int bgRunning; + struct semaphore grossLock; /* Gross locking semaphore */ + __u8 *spareBuffer; /* For mtdif2 use. Don't know the size of the buffer + * at compile time so we have to allocate it. + */ + struct mtd_info *mtd; + struct ylist_head searchContexts; + void (*putSuperFunc)(struct super_block *sb); + + struct task_struct *readdirProcess; +}; + +#define yaffs_DeviceToContext(dev) ((struct yaffs_LinuxContext *)((dev)->context)) + +#endif + diff --git a/fs/yaffs2/yaffs_mtdif.c b/fs/yaffs2/yaffs_mtdif.c new file mode 100644 index 000000000000..5de3193ea2b9 --- /dev/null +++ b/fs/yaffs2/yaffs_mtdif.c @@ -0,0 +1,56 @@ +/* + * YAFFS: Yet Another Flash File System. A NAND-flash specific file system. + * + * Copyright (C) 2002-2010 Aleph One Ltd. + * for Toby Churchill Ltd and Brightstar Engineering + * + * Created by Charles Manning <charles@aleph1.co.uk> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include "yportenv.h" + + +#include "yaffs_mtdif.h" + +#include "linux/mtd/mtd.h" +#include "linux/types.h" +#include "linux/time.h" +#include "linux/mtd/nand.h" + +#include "yaffs_linux.h" + +int nandmtd_EraseBlockInNAND(yaffs_Device *dev, int blockNumber) +{ + struct mtd_info *mtd = yaffs_DeviceToContext(dev)->mtd; + __u32 addr = + ((loff_t) blockNumber) * dev->param.totalBytesPerChunk + * dev->param.nChunksPerBlock; + struct erase_info ei; + + int retval = 0; + + ei.mtd = mtd; + ei.addr = addr; + ei.len = dev->param.totalBytesPerChunk * dev->param.nChunksPerBlock; + ei.time = 1000; + ei.retries = 2; + ei.callback = NULL; + ei.priv = (u_long) dev; + + retval = mtd->erase(mtd, &ei); + + if (retval == 0) + return YAFFS_OK; + else + return YAFFS_FAIL; +} + +int nandmtd_InitialiseNAND(yaffs_Device *dev) +{ + return YAFFS_OK; +} + diff --git a/fs/yaffs2/yaffs_mtdif.h b/fs/yaffs2/yaffs_mtdif.h new file mode 100644 index 000000000000..fad9d287061c --- /dev/null +++ b/fs/yaffs2/yaffs_mtdif.h @@ -0,0 +1,27 @@ +/* + * YAFFS: Yet another Flash File System . A NAND-flash specific file system. + * + * Copyright (C) 2002-2010 Aleph One Ltd. + * for Toby Churchill Ltd and Brightstar Engineering + * + * Created by Charles Manning <charles@aleph1.co.uk> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License version 2.1 as + * published by the Free Software Foundation. + * + * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL. + */ + +#ifndef __YAFFS_MTDIF_H__ +#define __YAFFS_MTDIF_H__ + +#include "yaffs_guts.h" + +#if (MTD_VERSION_CODE < MTD_VERSION(2, 6, 18)) +extern struct nand_oobinfo yaffs_oobinfo; +extern struct nand_oobinfo yaffs_noeccinfo; +#endif +int nandmtd_EraseBlockInNAND(yaffs_Device *dev, int blockNumber); +int nandmtd_InitialiseNAND(yaffs_Device *dev); +#endif diff --git a/fs/yaffs2/yaffs_mtdif1.c b/fs/yaffs2/yaffs_mtdif1.c new file mode 100644 index 000000000000..aaa02db2f665 --- /dev/null +++ b/fs/yaffs2/yaffs_mtdif1.c @@ -0,0 +1,361 @@ +/* + * YAFFS: Yet another FFS. A NAND-flash specific file system. + * yaffs_mtdif1.c NAND mtd interface functions for small-page NAND. + * + * Copyright (C) 2002-2010 Aleph One Ltd. + * for Toby Churchill Ltd and Brightstar Engineering + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +/* + * This module provides the interface between yaffs_nand.c and the + * MTD API. This version is used when the MTD interface supports the + * 'mtd_oob_ops' style calls to read_oob and write_oob, circa 2.6.17, + * and we have small-page NAND device. + * + * These functions are invoked via function pointers in yaffs_nand.c. + * This replaces functionality provided by functions in yaffs_mtdif.c + * and the yaffs_TagsCompatability functions in yaffs_tagscompat.c that are + * called in yaffs_mtdif.c when the function pointers are NULL. + * We assume the MTD layer is performing ECC (useNANDECC is true). + */ + +#include "yportenv.h" +#include "yaffs_trace.h" +#include "yaffs_guts.h" +#include "yaffs_packedtags1.h" +#include "yaffs_tagscompat.h" /* for yaffs_CalcTagsECC */ +#include "yaffs_linux.h" + +#include "linux/kernel.h" +#include "linux/version.h" +#include "linux/types.h" +#include "linux/mtd/mtd.h" + +/* Don't compile this module if we don't have MTD's mtd_oob_ops interface */ +#if (MTD_VERSION_CODE > MTD_VERSION(2, 6, 17)) + +#ifndef CONFIG_YAFFS_9BYTE_TAGS +# define YTAG1_SIZE 8 +#else +# define YTAG1_SIZE 9 +#endif + +#if 0 +/* Use the following nand_ecclayout with MTD when using + * CONFIG_YAFFS_9BYTE_TAGS and the older on-NAND tags layout. + * If you have existing Yaffs images and the byte order differs from this, + * adjust 'oobfree' to match your existing Yaffs data. + * + * This nand_ecclayout scatters/gathers to/from the old-yaffs layout with the + * pageStatus byte (at NAND spare offset 4) scattered/gathered from/to + * the 9th byte. + * + * Old-style on-NAND format: T0,T1,T2,T3,P,B,T4,T5,E0,E1,E2,T6,T7,E3,E4,E5 + * We have/need PackedTags1 plus pageStatus: T0,T1,T2,T3,T4,T5,T6,T7,P + * where Tn are the tag bytes, En are MTD's ECC bytes, P is the pageStatus + * byte and B is the small-page bad-block indicator byte. + */ +static struct nand_ecclayout nand_oob_16 = { + .eccbytes = 6, + .eccpos = { 8, 9, 10, 13, 14, 15 }, + .oobavail = 9, + .oobfree = { { 0, 4 }, { 6, 2 }, { 11, 2 }, { 4, 1 } } +}; +#endif + +/* Write a chunk (page) of data to NAND. + * + * Caller always provides ExtendedTags data which are converted to a more + * compact (packed) form for storage in NAND. A mini-ECC runs over the + * contents of the tags meta-data; used to valid the tags when read. + * + * - Pack ExtendedTags to PackedTags1 form + * - Compute mini-ECC for PackedTags1 + * - Write data and packed tags to NAND. + * + * Note: Due to the use of the PackedTags1 meta-data which does not include + * a full sequence number (as found in the larger PackedTags2 form) it is + * necessary for Yaffs to re-write a chunk/page (just once) to mark it as + * discarded and dirty. This is not ideal: newer NAND parts are supposed + * to be written just once. When Yaffs performs this operation, this + * function is called with a NULL data pointer -- calling MTD write_oob + * without data is valid usage (2.6.17). + * + * Any underlying MTD error results in YAFFS_FAIL. + * Returns YAFFS_OK or YAFFS_FAIL. + */ +int nandmtd1_WriteChunkWithTagsToNAND(yaffs_Device *dev, + int chunkInNAND, const __u8 *data, const yaffs_ExtendedTags *etags) +{ + struct mtd_info *mtd = yaffs_DeviceToContext(dev)->mtd; + int chunkBytes = dev->nDataBytesPerChunk; + loff_t addr = ((loff_t)chunkInNAND) * chunkBytes; + struct mtd_oob_ops ops; + yaffs_PackedTags1 pt1; + int retval; + + /* we assume that PackedTags1 and yaffs_Tags are compatible */ + compile_time_assertion(sizeof(yaffs_PackedTags1) == 12); + compile_time_assertion(sizeof(yaffs_Tags) == 8); + + yaffs_PackTags1(&pt1, etags); + yaffs_CalcTagsECC((yaffs_Tags *)&pt1); + + /* When deleting a chunk, the upper layer provides only skeletal + * etags, one with chunkDeleted set. However, we need to update the + * tags, not erase them completely. So we use the NAND write property + * that only zeroed-bits stick and set tag bytes to all-ones and + * zero just the (not) deleted bit. + */ +#ifndef CONFIG_YAFFS_9BYTE_TAGS + if (etags->chunkDeleted) { + memset(&pt1, 0xff, 8); + /* clear delete status bit to indicate deleted */ + pt1.deleted = 0; + } +#else + ((__u8 *)&pt1)[8] = 0xff; + if (etags->chunkDeleted) { + memset(&pt1, 0xff, 8); + /* zero pageStatus byte to indicate deleted */ + ((__u8 *)&pt1)[8] = 0; + } +#endif + + memset(&ops, 0, sizeof(ops)); + ops.mode = MTD_OOB_AUTO; + ops.len = (data) ? chunkBytes : 0; + ops.ooblen = YTAG1_SIZE; + ops.datbuf = (__u8 *)data; + ops.oobbuf = (__u8 *)&pt1; + + retval = mtd->write_oob(mtd, addr, &ops); + if (retval) { + T(YAFFS_TRACE_MTD, + (TSTR("write_oob failed, chunk %d, mtd error %d"TENDSTR), + chunkInNAND, retval)); + } + return retval ? YAFFS_FAIL : YAFFS_OK; +} + +/* Return with empty ExtendedTags but add eccResult. + */ +static int rettags(yaffs_ExtendedTags *etags, int eccResult, int retval) +{ + if (etags) { + memset(etags, 0, sizeof(*etags)); + etags->eccResult = eccResult; + } + return retval; +} + +/* Read a chunk (page) from NAND. + * + * Caller expects ExtendedTags data to be usable even on error; that is, + * all members except eccResult and blockBad are zeroed. + * + * - Check ECC results for data (if applicable) + * - Check for blank/erased block (return empty ExtendedTags if blank) + * - Check the PackedTags1 mini-ECC (correct if necessary/possible) + * - Convert PackedTags1 to ExtendedTags + * - Update eccResult and blockBad members to refect state. + * + * Returns YAFFS_OK or YAFFS_FAIL. + */ +int nandmtd1_ReadChunkWithTagsFromNAND(yaffs_Device *dev, + int chunkInNAND, __u8 *data, yaffs_ExtendedTags *etags) +{ + struct mtd_info *mtd = yaffs_DeviceToContext(dev)->mtd; + int chunkBytes = dev->nDataBytesPerChunk; + loff_t addr = ((loff_t)chunkInNAND) * chunkBytes; + int eccres = YAFFS_ECC_RESULT_NO_ERROR; + struct mtd_oob_ops ops; + yaffs_PackedTags1 pt1; + int retval; + int deleted; + + memset(&ops, 0, sizeof(ops)); + ops.mode = MTD_OOB_AUTO; + ops.len = (data) ? chunkBytes : 0; + ops.ooblen = YTAG1_SIZE; + ops.datbuf = data; + ops.oobbuf = (__u8 *)&pt1; + +#if (MTD_VERSION_CODE < MTD_VERSION(2, 6, 20)) + /* In MTD 2.6.18 to 2.6.19 nand_base.c:nand_do_read_oob() has a bug; + * help it out with ops.len = ops.ooblen when ops.datbuf == NULL. + */ + ops.len = (ops.datbuf) ? ops.len : ops.ooblen; +#endif + /* Read page and oob using MTD. + * Check status and determine ECC result. + */ + retval = mtd->read_oob(mtd, addr, &ops); + if (retval) { + T(YAFFS_TRACE_MTD, + (TSTR("read_oob failed, chunk %d, mtd error %d"TENDSTR), + chunkInNAND, retval)); + } + + switch (retval) { + case 0: + /* no error */ + break; + + case -EUCLEAN: + /* MTD's ECC fixed the data */ + eccres = YAFFS_ECC_RESULT_FIXED; + dev->eccFixed++; + break; + + case -EBADMSG: + /* MTD's ECC could not fix the data */ + dev->eccUnfixed++; + /* fall into... */ + default: + rettags(etags, YAFFS_ECC_RESULT_UNFIXED, 0); + etags->blockBad = (mtd->block_isbad)(mtd, addr); + return YAFFS_FAIL; + } + + /* Check for a blank/erased chunk. + */ + if (yaffs_CheckFF((__u8 *)&pt1, 8)) { + /* when blank, upper layers want eccResult to be <= NO_ERROR */ + return rettags(etags, YAFFS_ECC_RESULT_NO_ERROR, YAFFS_OK); + } + +#ifndef CONFIG_YAFFS_9BYTE_TAGS + /* Read deleted status (bit) then return it to it's non-deleted + * state before performing tags mini-ECC check. pt1.deleted is + * inverted. + */ + deleted = !pt1.deleted; + pt1.deleted = 1; +#else + deleted = (yaffs_CountBits(((__u8 *)&pt1)[8]) < 7); +#endif + + /* Check the packed tags mini-ECC and correct if necessary/possible. + */ + retval = yaffs_CheckECCOnTags((yaffs_Tags *)&pt1); + switch (retval) { + case 0: + /* no tags error, use MTD result */ + break; + case 1: + /* recovered tags-ECC error */ + dev->tagsEccFixed++; + if (eccres == YAFFS_ECC_RESULT_NO_ERROR) + eccres = YAFFS_ECC_RESULT_FIXED; + break; + default: + /* unrecovered tags-ECC error */ + dev->tagsEccUnfixed++; + return rettags(etags, YAFFS_ECC_RESULT_UNFIXED, YAFFS_FAIL); + } + + /* Unpack the tags to extended form and set ECC result. + * [set shouldBeFF just to keep yaffs_UnpackTags1 happy] + */ + pt1.shouldBeFF = 0xFFFFFFFF; + yaffs_UnpackTags1(etags, &pt1); + etags->eccResult = eccres; + + /* Set deleted state */ + etags->chunkDeleted = deleted; + return YAFFS_OK; +} + +/* Mark a block bad. + * + * This is a persistant state. + * Use of this function should be rare. + * + * Returns YAFFS_OK or YAFFS_FAIL. + */ +int nandmtd1_MarkNANDBlockBad(struct yaffs_DeviceStruct *dev, int blockNo) +{ + struct mtd_info *mtd = yaffs_DeviceToContext(dev)->mtd; + int blocksize = dev->param.nChunksPerBlock * dev->nDataBytesPerChunk; + int retval; + + T(YAFFS_TRACE_BAD_BLOCKS,(TSTR("marking block %d bad"TENDSTR), blockNo)); + + retval = mtd->block_markbad(mtd, (loff_t)blocksize * blockNo); + return (retval) ? YAFFS_FAIL : YAFFS_OK; +} + +/* Check any MTD prerequists. + * + * Returns YAFFS_OK or YAFFS_FAIL. + */ +static int nandmtd1_TestPrerequists(struct mtd_info *mtd) +{ + /* 2.6.18 has mtd->ecclayout->oobavail */ + /* 2.6.21 has mtd->ecclayout->oobavail and mtd->oobavail */ + int oobavail = mtd->ecclayout->oobavail; + + if (oobavail < YTAG1_SIZE) { + T(YAFFS_TRACE_ERROR, + (TSTR("mtd device has only %d bytes for tags, need %d"TENDSTR), + oobavail, YTAG1_SIZE)); + return YAFFS_FAIL; + } + return YAFFS_OK; +} + +/* Query for the current state of a specific block. + * + * Examine the tags of the first chunk of the block and return the state: + * - YAFFS_BLOCK_STATE_DEAD, the block is marked bad + * - YAFFS_BLOCK_STATE_NEEDS_SCANNING, the block is in use + * - YAFFS_BLOCK_STATE_EMPTY, the block is clean + * + * Always returns YAFFS_OK. + */ +int nandmtd1_QueryNANDBlock(struct yaffs_DeviceStruct *dev, int blockNo, + yaffs_BlockState *pState, __u32 *pSequenceNumber) +{ + struct mtd_info *mtd = yaffs_DeviceToContext(dev)->mtd; + int chunkNo = blockNo * dev->param.nChunksPerBlock; + loff_t addr = (loff_t)chunkNo * dev->nDataBytesPerChunk; + yaffs_ExtendedTags etags; + int state = YAFFS_BLOCK_STATE_DEAD; + int seqnum = 0; + int retval; + + /* We don't yet have a good place to test for MTD config prerequists. + * Do it here as we are called during the initial scan. + */ + if (nandmtd1_TestPrerequists(mtd) != YAFFS_OK) + return YAFFS_FAIL; + + retval = nandmtd1_ReadChunkWithTagsFromNAND(dev, chunkNo, NULL, &etags); + etags.blockBad = (mtd->block_isbad)(mtd, addr); + if (etags.blockBad) { + T(YAFFS_TRACE_BAD_BLOCKS, + (TSTR("block %d is marked bad"TENDSTR), blockNo)); + state = YAFFS_BLOCK_STATE_DEAD; + } else if (etags.eccResult != YAFFS_ECC_RESULT_NO_ERROR) { + /* bad tags, need to look more closely */ + state = YAFFS_BLOCK_STATE_NEEDS_SCANNING; + } else if (etags.chunkUsed) { + state = YAFFS_BLOCK_STATE_NEEDS_SCANNING; + seqnum = etags.sequenceNumber; + } else { + state = YAFFS_BLOCK_STATE_EMPTY; + } + + *pState = state; + *pSequenceNumber = seqnum; + + /* query always succeeds */ + return YAFFS_OK; +} + +#endif /*MTD_VERSION*/ diff --git a/fs/yaffs2/yaffs_mtdif1.h b/fs/yaffs2/yaffs_mtdif1.h new file mode 100644 index 000000000000..0e6a56fbd540 --- /dev/null +++ b/fs/yaffs2/yaffs_mtdif1.h @@ -0,0 +1,28 @@ +/* + * YAFFS: Yet another Flash File System. A NAND-flash specific file system. + * + * Copyright (C) 2002-2010 Aleph One Ltd. + * for Toby Churchill Ltd and Brightstar Engineering + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License version 2.1 as + * published by the Free Software Foundation. + * + * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL. + */ + +#ifndef __YAFFS_MTDIF1_H__ +#define __YAFFS_MTDIF1_H__ + +int nandmtd1_WriteChunkWithTagsToNAND(yaffs_Device *dev, int chunkInNAND, + const __u8 *data, const yaffs_ExtendedTags *tags); + +int nandmtd1_ReadChunkWithTagsFromNAND(yaffs_Device *dev, int chunkInNAND, + __u8 *data, yaffs_ExtendedTags *tags); + +int nandmtd1_MarkNANDBlockBad(struct yaffs_DeviceStruct *dev, int blockNo); + +int nandmtd1_QueryNANDBlock(struct yaffs_DeviceStruct *dev, int blockNo, + yaffs_BlockState *state, __u32 *sequenceNumber); + +#endif diff --git a/fs/yaffs2/yaffs_mtdif2.c b/fs/yaffs2/yaffs_mtdif2.c new file mode 100644 index 000000000000..c1d447819970 --- /dev/null +++ b/fs/yaffs2/yaffs_mtdif2.c @@ -0,0 +1,257 @@ +/* + * YAFFS: Yet Another Flash File System. A NAND-flash specific file system. + * + * Copyright (C) 2002-2010 Aleph One Ltd. + * for Toby Churchill Ltd and Brightstar Engineering + * + * Created by Charles Manning <charles@aleph1.co.uk> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +/* mtd interface for YAFFS2 */ + +#include "yportenv.h" +#include "yaffs_trace.h" + +#include "yaffs_mtdif2.h" + +#include "linux/mtd/mtd.h" +#include "linux/types.h" +#include "linux/time.h" + +#include "yaffs_packedtags2.h" + +#include "yaffs_linux.h" + +/* NB For use with inband tags.... + * We assume that the data buffer is of size totalBytersPerChunk so that we can also + * use it to load the tags. + */ +int nandmtd2_WriteChunkWithTagsToNAND(yaffs_Device *dev, int chunkInNAND, + const __u8 *data, + const yaffs_ExtendedTags *tags) +{ + struct mtd_info *mtd = yaffs_DeviceToContext(dev)->mtd; +#if (MTD_VERSION_CODE > MTD_VERSION(2, 6, 17)) + struct mtd_oob_ops ops; +#else + size_t dummy; +#endif + int retval = 0; + + loff_t addr; + + yaffs_PackedTags2 pt; + + int packed_tags_size = dev->param.noTagsECC ? sizeof(pt.t) : sizeof(pt); + void * packed_tags_ptr = dev->param.noTagsECC ? (void *) &pt.t : (void *)&pt; + + T(YAFFS_TRACE_MTD, + (TSTR + ("nandmtd2_WriteChunkWithTagsToNAND chunk %d data %p tags %p" + TENDSTR), chunkInNAND, data, tags)); + + + addr = ((loff_t) chunkInNAND) * dev->param.totalBytesPerChunk; + + /* For yaffs2 writing there must be both data and tags. + * If we're using inband tags, then the tags are stuffed into + * the end of the data buffer. + */ + if (!data || !tags) + BUG(); + else if (dev->param.inbandTags) { + yaffs_PackedTags2TagsPart *pt2tp; + pt2tp = (yaffs_PackedTags2TagsPart *)(data + dev->nDataBytesPerChunk); + yaffs_PackTags2TagsPart(pt2tp, tags); + } else + yaffs_PackTags2(&pt, tags, !dev->param.noTagsECC); + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17)) + ops.mode = MTD_OOB_AUTO; + ops.ooblen = (dev->param.inbandTags) ? 0 : packed_tags_size; + ops.len = dev->param.totalBytesPerChunk; + ops.ooboffs = 0; + ops.datbuf = (__u8 *)data; + ops.oobbuf = (dev->param.inbandTags) ? NULL : packed_tags_ptr; + retval = mtd->write_oob(mtd, addr, &ops); + +#else + if (!dev->param.inbandTags) { + retval = + mtd->write_ecc(mtd, addr, dev->nDataBytesPerChunk, + &dummy, data, (__u8 *) packed_tags_ptr, NULL); + } else { + retval = + mtd->write(mtd, addr, dev->param.totalBytesPerChunk, &dummy, + data); + } +#endif + + if (retval == 0) + return YAFFS_OK; + else + return YAFFS_FAIL; +} + +int nandmtd2_ReadChunkWithTagsFromNAND(yaffs_Device *dev, int chunkInNAND, + __u8 *data, yaffs_ExtendedTags *tags) +{ + struct mtd_info *mtd = yaffs_DeviceToContext(dev)->mtd; +#if (MTD_VERSION_CODE > MTD_VERSION(2, 6, 17)) + struct mtd_oob_ops ops; +#endif + size_t dummy; + int retval = 0; + int localData = 0; + + loff_t addr = ((loff_t) chunkInNAND) * dev->param.totalBytesPerChunk; + + yaffs_PackedTags2 pt; + + int packed_tags_size = dev->param.noTagsECC ? sizeof(pt.t) : sizeof(pt); + void * packed_tags_ptr = dev->param.noTagsECC ? (void *) &pt.t: (void *)&pt; + + T(YAFFS_TRACE_MTD, + (TSTR + ("nandmtd2_ReadChunkWithTagsFromNAND chunk %d data %p tags %p" + TENDSTR), chunkInNAND, data, tags)); + + if (dev->param.inbandTags) { + + if (!data) { + localData = 1; + data = yaffs_GetTempBuffer(dev, __LINE__); + } + + + } + + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17)) + if (dev->param.inbandTags || (data && !tags)) + retval = mtd->read(mtd, addr, dev->param.totalBytesPerChunk, + &dummy, data); + else if (tags) { + ops.mode = MTD_OOB_AUTO; + ops.ooblen = packed_tags_size; + ops.len = data ? dev->nDataBytesPerChunk : packed_tags_size; + ops.ooboffs = 0; + ops.datbuf = data; + ops.oobbuf = yaffs_DeviceToContext(dev)->spareBuffer; + retval = mtd->read_oob(mtd, addr, &ops); + } +#else + if (!dev->param.inbandTags && data && tags) { + + retval = mtd->read_ecc(mtd, addr, dev->nDataBytesPerChunk, + &dummy, data, dev->spareBuffer, + NULL); + } else { + if (data) + retval = + mtd->read(mtd, addr, dev->nDataBytesPerChunk, &dummy, + data); + if (!dev->param.inbandTags && tags) + retval = + mtd->read_oob(mtd, addr, mtd->oobsize, &dummy, + dev->spareBuffer); + } +#endif + + + if (dev->param.inbandTags) { + if (tags) { + yaffs_PackedTags2TagsPart *pt2tp; + pt2tp = (yaffs_PackedTags2TagsPart *)&data[dev->nDataBytesPerChunk]; + yaffs_UnpackTags2TagsPart(tags, pt2tp); + } + } else { + if (tags) { + memcpy(packed_tags_ptr, yaffs_DeviceToContext(dev)->spareBuffer, packed_tags_size); + yaffs_UnpackTags2(tags, &pt, !dev->param.noTagsECC); + } + } + + if (localData) + yaffs_ReleaseTempBuffer(dev, data, __LINE__); + + if (tags && retval == -EBADMSG && tags->eccResult == YAFFS_ECC_RESULT_NO_ERROR) { + tags->eccResult = YAFFS_ECC_RESULT_UNFIXED; + dev->eccUnfixed++; + } + if(tags && retval == -EUCLEAN && tags->eccResult == YAFFS_ECC_RESULT_NO_ERROR) { + tags->eccResult = YAFFS_ECC_RESULT_FIXED; + dev->eccFixed++; + } + if (retval == 0) + return YAFFS_OK; + else + return YAFFS_FAIL; +} + +int nandmtd2_MarkNANDBlockBad(struct yaffs_DeviceStruct *dev, int blockNo) +{ + struct mtd_info *mtd = yaffs_DeviceToContext(dev)->mtd; + int retval; + T(YAFFS_TRACE_MTD, + (TSTR("nandmtd2_MarkNANDBlockBad %d" TENDSTR), blockNo)); + + retval = + mtd->block_markbad(mtd, + blockNo * dev->param.nChunksPerBlock * + dev->param.totalBytesPerChunk); + + if (retval == 0) + return YAFFS_OK; + else + return YAFFS_FAIL; + +} + +int nandmtd2_QueryNANDBlock(struct yaffs_DeviceStruct *dev, int blockNo, + yaffs_BlockState *state, __u32 *sequenceNumber) +{ + struct mtd_info *mtd = yaffs_DeviceToContext(dev)->mtd; + int retval; + + T(YAFFS_TRACE_MTD, + (TSTR("nandmtd2_QueryNANDBlock %d" TENDSTR), blockNo)); + retval = + mtd->block_isbad(mtd, + blockNo * dev->param.nChunksPerBlock * + dev->param.totalBytesPerChunk); + + if (retval) { + T(YAFFS_TRACE_MTD, (TSTR("block is bad" TENDSTR))); + + *state = YAFFS_BLOCK_STATE_DEAD; + *sequenceNumber = 0; + } else { + yaffs_ExtendedTags t; + nandmtd2_ReadChunkWithTagsFromNAND(dev, + blockNo * + dev->param.nChunksPerBlock, NULL, + &t); + + if (t.chunkUsed) { + *sequenceNumber = t.sequenceNumber; + *state = YAFFS_BLOCK_STATE_NEEDS_SCANNING; + } else { + *sequenceNumber = 0; + *state = YAFFS_BLOCK_STATE_EMPTY; + } + } + T(YAFFS_TRACE_MTD, + (TSTR("block is bad seq %d state %d" TENDSTR), *sequenceNumber, + *state)); + + if (retval == 0) + return YAFFS_OK; + else + return YAFFS_FAIL; +} + diff --git a/fs/yaffs2/yaffs_mtdif2.h b/fs/yaffs2/yaffs_mtdif2.h new file mode 100644 index 000000000000..5691267eafa8 --- /dev/null +++ b/fs/yaffs2/yaffs_mtdif2.h @@ -0,0 +1,29 @@ +/* + * YAFFS: Yet another Flash File System . A NAND-flash specific file system. + * + * Copyright (C) 2002-2010 Aleph One Ltd. + * for Toby Churchill Ltd and Brightstar Engineering + * + * Created by Charles Manning <charles@aleph1.co.uk> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License version 2.1 as + * published by the Free Software Foundation. + * + * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL. + */ + +#ifndef __YAFFS_MTDIF2_H__ +#define __YAFFS_MTDIF2_H__ + +#include "yaffs_guts.h" +int nandmtd2_WriteChunkWithTagsToNAND(yaffs_Device *dev, int chunkInNAND, + const __u8 *data, + const yaffs_ExtendedTags *tags); +int nandmtd2_ReadChunkWithTagsFromNAND(yaffs_Device *dev, int chunkInNAND, + __u8 *data, yaffs_ExtendedTags *tags); +int nandmtd2_MarkNANDBlockBad(struct yaffs_DeviceStruct *dev, int blockNo); +int nandmtd2_QueryNANDBlock(struct yaffs_DeviceStruct *dev, int blockNo, + yaffs_BlockState *state, __u32 *sequenceNumber); + +#endif diff --git a/fs/yaffs2/yaffs_nand.c b/fs/yaffs2/yaffs_nand.c new file mode 100644 index 000000000000..6964ad18c0d5 --- /dev/null +++ b/fs/yaffs2/yaffs_nand.c @@ -0,0 +1,140 @@ +/* + * YAFFS: Yet Another Flash File System. A NAND-flash specific file system. + * + * Copyright (C) 2002-2010 Aleph One Ltd. + * for Toby Churchill Ltd and Brightstar Engineering + * + * Created by Charles Manning <charles@aleph1.co.uk> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include "yaffs_nand.h" +#include "yaffs_tagscompat.h" +#include "yaffs_tagsvalidity.h" + +#include "yaffs_getblockinfo.h" + +int yaffs_ReadChunkWithTagsFromNAND(yaffs_Device *dev, int chunkInNAND, + __u8 *buffer, + yaffs_ExtendedTags *tags) +{ + int result; + yaffs_ExtendedTags localTags; + + int realignedChunkInNAND = chunkInNAND - dev->chunkOffset; + + dev->nPageReads++; + + /* If there are no tags provided, use local tags to get prioritised gc working */ + if (!tags) + tags = &localTags; + + if (dev->param.readChunkWithTagsFromNAND) + result = dev->param.readChunkWithTagsFromNAND(dev, realignedChunkInNAND, buffer, + tags); + else + result = yaffs_TagsCompatabilityReadChunkWithTagsFromNAND(dev, + realignedChunkInNAND, + buffer, + tags); + if (tags && + tags->eccResult > YAFFS_ECC_RESULT_NO_ERROR) { + + yaffs_BlockInfo *bi; + bi = yaffs_GetBlockInfo(dev, chunkInNAND/dev->param.nChunksPerBlock); + yaffs_HandleChunkError(dev, bi); + } + + return result; +} + +int yaffs_WriteChunkWithTagsToNAND(yaffs_Device *dev, + int chunkInNAND, + const __u8 *buffer, + yaffs_ExtendedTags *tags) +{ + + dev->nPageWrites++; + + chunkInNAND -= dev->chunkOffset; + + + if (tags) { + tags->sequenceNumber = dev->sequenceNumber; + tags->chunkUsed = 1; + if (!yaffs_ValidateTags(tags)) { + T(YAFFS_TRACE_ERROR, + (TSTR("Writing uninitialised tags" TENDSTR))); + YBUG(); + } + T(YAFFS_TRACE_WRITE, + (TSTR("Writing chunk %d tags %d %d" TENDSTR), chunkInNAND, + tags->objectId, tags->chunkId)); + } else { + T(YAFFS_TRACE_ERROR, (TSTR("Writing with no tags" TENDSTR))); + YBUG(); + } + + if (dev->param.writeChunkWithTagsToNAND) + return dev->param.writeChunkWithTagsToNAND(dev, chunkInNAND, buffer, + tags); + else + return yaffs_TagsCompatabilityWriteChunkWithTagsToNAND(dev, + chunkInNAND, + buffer, + tags); +} + +int yaffs_MarkBlockBad(yaffs_Device *dev, int blockNo) +{ + blockNo -= dev->blockOffset; + + + if (dev->param.markNANDBlockBad) + return dev->param.markNANDBlockBad(dev, blockNo); + else + return yaffs_TagsCompatabilityMarkNANDBlockBad(dev, blockNo); +} + +int yaffs_QueryInitialBlockState(yaffs_Device *dev, + int blockNo, + yaffs_BlockState *state, + __u32 *sequenceNumber) +{ + blockNo -= dev->blockOffset; + + if (dev->param.queryNANDBlock) + return dev->param.queryNANDBlock(dev, blockNo, state, sequenceNumber); + else + return yaffs_TagsCompatabilityQueryNANDBlock(dev, blockNo, + state, + sequenceNumber); +} + + +int yaffs_EraseBlockInNAND(struct yaffs_DeviceStruct *dev, + int blockInNAND) +{ + int result; + + blockInNAND -= dev->blockOffset; + + dev->nBlockErasures++; + + result = dev->param.eraseBlockInNAND(dev, blockInNAND); + + return result; +} + +int yaffs_InitialiseNAND(struct yaffs_DeviceStruct *dev) +{ + if(dev->param.initialiseNAND) + return dev->param.initialiseNAND(dev); + return YAFFS_OK; +} + + + diff --git a/fs/yaffs2/yaffs_nand.h b/fs/yaffs2/yaffs_nand.h new file mode 100644 index 000000000000..62a466596c59 --- /dev/null +++ b/fs/yaffs2/yaffs_nand.h @@ -0,0 +1,44 @@ +/* + * YAFFS: Yet another Flash File System . A NAND-flash specific file system. + * + * Copyright (C) 2002-2010 Aleph One Ltd. + * for Toby Churchill Ltd and Brightstar Engineering + * + * Created by Charles Manning <charles@aleph1.co.uk> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License version 2.1 as + * published by the Free Software Foundation. + * + * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL. + */ + +#ifndef __YAFFS_NAND_H__ +#define __YAFFS_NAND_H__ +#include "yaffs_guts.h" + + + +int yaffs_ReadChunkWithTagsFromNAND(yaffs_Device *dev, int chunkInNAND, + __u8 *buffer, + yaffs_ExtendedTags *tags); + +int yaffs_WriteChunkWithTagsToNAND(yaffs_Device *dev, + int chunkInNAND, + const __u8 *buffer, + yaffs_ExtendedTags *tags); + +int yaffs_MarkBlockBad(yaffs_Device *dev, int blockNo); + +int yaffs_QueryInitialBlockState(yaffs_Device *dev, + int blockNo, + yaffs_BlockState *state, + unsigned *sequenceNumber); + +int yaffs_EraseBlockInNAND(struct yaffs_DeviceStruct *dev, + int blockInNAND); + +int yaffs_InitialiseNAND(struct yaffs_DeviceStruct *dev); + +#endif + diff --git a/fs/yaffs2/yaffs_nandemul2k.h b/fs/yaffs2/yaffs_nandemul2k.h new file mode 100644 index 000000000000..2f4ce97b22bf --- /dev/null +++ b/fs/yaffs2/yaffs_nandemul2k.h @@ -0,0 +1,39 @@ +/* + * YAFFS: Yet another Flash File System . A NAND-flash specific file system. + * + * Copyright (C) 2002-2010 Aleph One Ltd. + * for Toby Churchill Ltd and Brightstar Engineering + * + * Created by Charles Manning <charles@aleph1.co.uk> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License version 2.1 as + * published by the Free Software Foundation. + * + * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL. + */ + +/* Interface to emulated NAND functions (2k page size) */ + +#ifndef __YAFFS_NANDEMUL2K_H__ +#define __YAFFS_NANDEMUL2K_H__ + +#include "yaffs_guts.h" + +int nandemul2k_WriteChunkWithTagsToNAND(struct yaffs_DeviceStruct *dev, + int chunkInNAND, const __u8 *data, + const yaffs_ExtendedTags *tags); +int nandemul2k_ReadChunkWithTagsFromNAND(struct yaffs_DeviceStruct *dev, + int chunkInNAND, __u8 *data, + yaffs_ExtendedTags *tags); +int nandemul2k_MarkNANDBlockBad(struct yaffs_DeviceStruct *dev, int blockNo); +int nandemul2k_QueryNANDBlock(struct yaffs_DeviceStruct *dev, int blockNo, + yaffs_BlockState *state, __u32 *sequenceNumber); +int nandemul2k_EraseBlockInNAND(struct yaffs_DeviceStruct *dev, + int blockInNAND); +int nandemul2k_InitialiseNAND(struct yaffs_DeviceStruct *dev); +int nandemul2k_GetBytesPerChunk(void); +int nandemul2k_GetChunksPerBlock(void); +int nandemul2k_GetNumberOfBlocks(void); + +#endif diff --git a/fs/yaffs2/yaffs_packedtags1.c b/fs/yaffs2/yaffs_packedtags1.c new file mode 100644 index 000000000000..b2160c08b60f --- /dev/null +++ b/fs/yaffs2/yaffs_packedtags1.c @@ -0,0 +1,50 @@ +/* + * YAFFS: Yet Another Flash File System. A NAND-flash specific file system. + * + * Copyright (C) 2002-2010 Aleph One Ltd. + * for Toby Churchill Ltd and Brightstar Engineering + * + * Created by Charles Manning <charles@aleph1.co.uk> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include "yaffs_packedtags1.h" +#include "yportenv.h" + +void yaffs_PackTags1(yaffs_PackedTags1 *pt, const yaffs_ExtendedTags *t) +{ + pt->chunkId = t->chunkId; + pt->serialNumber = t->serialNumber; + pt->byteCount = t->byteCount; + pt->objectId = t->objectId; + pt->ecc = 0; + pt->deleted = (t->chunkDeleted) ? 0 : 1; + pt->unusedStuff = 0; + pt->shouldBeFF = 0xFFFFFFFF; + +} + +void yaffs_UnpackTags1(yaffs_ExtendedTags *t, const yaffs_PackedTags1 *pt) +{ + static const __u8 allFF[] = + { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, +0xff }; + + if (memcmp(allFF, pt, sizeof(yaffs_PackedTags1))) { + t->blockBad = 0; + if (pt->shouldBeFF != 0xFFFFFFFF) + t->blockBad = 1; + t->chunkUsed = 1; + t->objectId = pt->objectId; + t->chunkId = pt->chunkId; + t->byteCount = pt->byteCount; + t->eccResult = YAFFS_ECC_RESULT_NO_ERROR; + t->chunkDeleted = (pt->deleted) ? 0 : 1; + t->serialNumber = pt->serialNumber; + } else { + memset(t, 0, sizeof(yaffs_ExtendedTags)); + } +} diff --git a/fs/yaffs2/yaffs_packedtags1.h b/fs/yaffs2/yaffs_packedtags1.h new file mode 100644 index 000000000000..b9cf1a8bfbc1 --- /dev/null +++ b/fs/yaffs2/yaffs_packedtags1.h @@ -0,0 +1,37 @@ +/* + * YAFFS: Yet another Flash File System . A NAND-flash specific file system. + * + * Copyright (C) 2002-2010 Aleph One Ltd. + * for Toby Churchill Ltd and Brightstar Engineering + * + * Created by Charles Manning <charles@aleph1.co.uk> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License version 2.1 as + * published by the Free Software Foundation. + * + * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL. + */ + +/* This is used to pack YAFFS1 tags, not YAFFS2 tags. */ + +#ifndef __YAFFS_PACKEDTAGS1_H__ +#define __YAFFS_PACKEDTAGS1_H__ + +#include "yaffs_guts.h" + +typedef struct { + unsigned chunkId:20; + unsigned serialNumber:2; + unsigned byteCount:10; + unsigned objectId:18; + unsigned ecc:12; + unsigned deleted:1; + unsigned unusedStuff:1; + unsigned shouldBeFF; + +} yaffs_PackedTags1; + +void yaffs_PackTags1(yaffs_PackedTags1 *pt, const yaffs_ExtendedTags *t); +void yaffs_UnpackTags1(yaffs_ExtendedTags *t, const yaffs_PackedTags1 *pt); +#endif diff --git a/fs/yaffs2/yaffs_packedtags2.c b/fs/yaffs2/yaffs_packedtags2.c new file mode 100644 index 000000000000..f8ae94ccddb4 --- /dev/null +++ b/fs/yaffs2/yaffs_packedtags2.c @@ -0,0 +1,198 @@ +/* + * YAFFS: Yet Another Flash File System. A NAND-flash specific file system. + * + * Copyright (C) 2002-2010 Aleph One Ltd. + * for Toby Churchill Ltd and Brightstar Engineering + * + * Created by Charles Manning <charles@aleph1.co.uk> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include "yaffs_packedtags2.h" +#include "yportenv.h" +#include "yaffs_trace.h" +#include "yaffs_tagsvalidity.h" + +/* This code packs a set of extended tags into a binary structure for + * NAND storage + */ + +/* Some of the information is "extra" struff which can be packed in to + * speed scanning + * This is defined by having the EXTRA_HEADER_INFO_FLAG set. + */ + +/* Extra flags applied to chunkId */ + +#define EXTRA_HEADER_INFO_FLAG 0x80000000 +#define EXTRA_SHRINK_FLAG 0x40000000 +#define EXTRA_SHADOWS_FLAG 0x20000000 +#define EXTRA_SPARE_FLAGS 0x10000000 + +#define ALL_EXTRA_FLAGS 0xF0000000 + +/* Also, the top 4 bits of the object Id are set to the object type. */ +#define EXTRA_OBJECT_TYPE_SHIFT (28) +#define EXTRA_OBJECT_TYPE_MASK ((0x0F) << EXTRA_OBJECT_TYPE_SHIFT) + + +static void yaffs_DumpPackedTags2TagsPart(const yaffs_PackedTags2TagsPart *ptt) +{ + T(YAFFS_TRACE_MTD, + (TSTR("packed tags obj %d chunk %d byte %d seq %d" TENDSTR), + ptt->objectId, ptt->chunkId, ptt->byteCount, + ptt->sequenceNumber)); +} +static void yaffs_DumpPackedTags2(const yaffs_PackedTags2 *pt) +{ + yaffs_DumpPackedTags2TagsPart(&pt->t); +} + +static void yaffs_DumpTags2(const yaffs_ExtendedTags *t) +{ + T(YAFFS_TRACE_MTD, + (TSTR + ("ext.tags eccres %d blkbad %d chused %d obj %d chunk%d byte %d del %d ser %d seq %d" + TENDSTR), t->eccResult, t->blockBad, t->chunkUsed, t->objectId, + t->chunkId, t->byteCount, t->chunkDeleted, t->serialNumber, + t->sequenceNumber)); + +} + +void yaffs_PackTags2TagsPart(yaffs_PackedTags2TagsPart *ptt, + const yaffs_ExtendedTags *t) +{ + ptt->chunkId = t->chunkId; + ptt->sequenceNumber = t->sequenceNumber; + ptt->byteCount = t->byteCount; + ptt->objectId = t->objectId; + + if (t->chunkId == 0 && t->extraHeaderInfoAvailable) { + /* Store the extra header info instead */ + /* We save the parent object in the chunkId */ + ptt->chunkId = EXTRA_HEADER_INFO_FLAG + | t->extraParentObjectId; + if (t->extraIsShrinkHeader) + ptt->chunkId |= EXTRA_SHRINK_FLAG; + if (t->extraShadows) + ptt->chunkId |= EXTRA_SHADOWS_FLAG; + + ptt->objectId &= ~EXTRA_OBJECT_TYPE_MASK; + ptt->objectId |= + (t->extraObjectType << EXTRA_OBJECT_TYPE_SHIFT); + + if (t->extraObjectType == YAFFS_OBJECT_TYPE_HARDLINK) + ptt->byteCount = t->extraEquivalentObjectId; + else if (t->extraObjectType == YAFFS_OBJECT_TYPE_FILE) + ptt->byteCount = t->extraFileLength; + else + ptt->byteCount = 0; + } + + yaffs_DumpPackedTags2TagsPart(ptt); + yaffs_DumpTags2(t); +} + + +void yaffs_PackTags2(yaffs_PackedTags2 *pt, const yaffs_ExtendedTags *t, int tagsECC) +{ + yaffs_PackTags2TagsPart(&pt->t, t); + + if(tagsECC) + yaffs_ECCCalculateOther((unsigned char *)&pt->t, + sizeof(yaffs_PackedTags2TagsPart), + &pt->ecc); +} + + +void yaffs_UnpackTags2TagsPart(yaffs_ExtendedTags *t, + yaffs_PackedTags2TagsPart *ptt) +{ + + memset(t, 0, sizeof(yaffs_ExtendedTags)); + + yaffs_InitialiseTags(t); + + if (ptt->sequenceNumber != 0xFFFFFFFF) { + t->blockBad = 0; + t->chunkUsed = 1; + t->objectId = ptt->objectId; + t->chunkId = ptt->chunkId; + t->byteCount = ptt->byteCount; + t->chunkDeleted = 0; + t->serialNumber = 0; + t->sequenceNumber = ptt->sequenceNumber; + + /* Do extra header info stuff */ + + if (ptt->chunkId & EXTRA_HEADER_INFO_FLAG) { + t->chunkId = 0; + t->byteCount = 0; + + t->extraHeaderInfoAvailable = 1; + t->extraParentObjectId = + ptt->chunkId & (~(ALL_EXTRA_FLAGS)); + t->extraIsShrinkHeader = + (ptt->chunkId & EXTRA_SHRINK_FLAG) ? 1 : 0; + t->extraShadows = + (ptt->chunkId & EXTRA_SHADOWS_FLAG) ? 1 : 0; + t->extraObjectType = + ptt->objectId >> EXTRA_OBJECT_TYPE_SHIFT; + t->objectId &= ~EXTRA_OBJECT_TYPE_MASK; + + if (t->extraObjectType == YAFFS_OBJECT_TYPE_HARDLINK) + t->extraEquivalentObjectId = ptt->byteCount; + else + t->extraFileLength = ptt->byteCount; + } + } + + yaffs_DumpPackedTags2TagsPart(ptt); + yaffs_DumpTags2(t); + +} + + +void yaffs_UnpackTags2(yaffs_ExtendedTags *t, yaffs_PackedTags2 *pt, int tagsECC) +{ + + yaffs_ECCResult eccResult = YAFFS_ECC_RESULT_NO_ERROR; + + if (pt->t.sequenceNumber != 0xFFFFFFFF && + tagsECC){ + /* Chunk is in use and we need to do ECC */ + + yaffs_ECCOther ecc; + int result; + yaffs_ECCCalculateOther((unsigned char *)&pt->t, + sizeof(yaffs_PackedTags2TagsPart), + &ecc); + result = yaffs_ECCCorrectOther((unsigned char *)&pt->t, + sizeof(yaffs_PackedTags2TagsPart), + &pt->ecc, &ecc); + switch (result) { + case 0: + eccResult = YAFFS_ECC_RESULT_NO_ERROR; + break; + case 1: + eccResult = YAFFS_ECC_RESULT_FIXED; + break; + case -1: + eccResult = YAFFS_ECC_RESULT_UNFIXED; + break; + default: + eccResult = YAFFS_ECC_RESULT_UNKNOWN; + } + } + + yaffs_UnpackTags2TagsPart(t, &pt->t); + + t->eccResult = eccResult; + + yaffs_DumpPackedTags2(pt); + yaffs_DumpTags2(t); +} + diff --git a/fs/yaffs2/yaffs_packedtags2.h b/fs/yaffs2/yaffs_packedtags2.h new file mode 100644 index 000000000000..c132a98cdc20 --- /dev/null +++ b/fs/yaffs2/yaffs_packedtags2.h @@ -0,0 +1,43 @@ +/* + * YAFFS: Yet another Flash File System . A NAND-flash specific file system. + * + * Copyright (C) 2002-2010 Aleph One Ltd. + * for Toby Churchill Ltd and Brightstar Engineering + * + * Created by Charles Manning <charles@aleph1.co.uk> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License version 2.1 as + * published by the Free Software Foundation. + * + * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL. + */ + +/* This is used to pack YAFFS2 tags, not YAFFS1tags. */ + +#ifndef __YAFFS_PACKEDTAGS2_H__ +#define __YAFFS_PACKEDTAGS2_H__ + +#include "yaffs_guts.h" +#include "yaffs_ecc.h" + +typedef struct { + unsigned sequenceNumber; + unsigned objectId; + unsigned chunkId; + unsigned byteCount; +} yaffs_PackedTags2TagsPart; + +typedef struct { + yaffs_PackedTags2TagsPart t; + yaffs_ECCOther ecc; +} yaffs_PackedTags2; + +/* Full packed tags with ECC, used for oob tags */ +void yaffs_PackTags2(yaffs_PackedTags2 *pt, const yaffs_ExtendedTags *t, int tagsECC); +void yaffs_UnpackTags2(yaffs_ExtendedTags *t, yaffs_PackedTags2 *pt, int tagsECC); + +/* Only the tags part (no ECC for use with inband tags */ +void yaffs_PackTags2TagsPart(yaffs_PackedTags2TagsPart *pt, const yaffs_ExtendedTags *t); +void yaffs_UnpackTags2TagsPart(yaffs_ExtendedTags *t, yaffs_PackedTags2TagsPart *pt); +#endif diff --git a/fs/yaffs2/yaffs_qsort.c b/fs/yaffs2/yaffs_qsort.c new file mode 100644 index 000000000000..187519fbdb55 --- /dev/null +++ b/fs/yaffs2/yaffs_qsort.c @@ -0,0 +1,163 @@ +/* + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include "yportenv.h" +/* #include <linux/string.h> */ + +/* + * Qsort routine from Bentley & McIlroy's "Engineering a Sort Function". + */ +#define swapcode(TYPE, parmi, parmj, n) do { \ + long i = (n) / sizeof (TYPE); \ + register TYPE *pi = (TYPE *) (parmi); \ + register TYPE *pj = (TYPE *) (parmj); \ + do { \ + register TYPE t = *pi; \ + *pi++ = *pj; \ + *pj++ = t; \ + } while (--i > 0); \ +} while (0) + +#define SWAPINIT(a, es) swaptype = ((char *)a - (char *)0) % sizeof(long) || \ + es % sizeof(long) ? 2 : es == sizeof(long) ? 0 : 1; + +static __inline void +swapfunc(char *a, char *b, int n, int swaptype) +{ + if (swaptype <= 1) + swapcode(long, a, b, n); + else + swapcode(char, a, b, n); +} + +#define yswap(a, b) do { \ + if (swaptype == 0) { \ + long t = *(long *)(a); \ + *(long *)(a) = *(long *)(b); \ + *(long *)(b) = t; \ + } else \ + swapfunc(a, b, es, swaptype); \ +} while (0) + +#define vecswap(a, b, n) if ((n) > 0) swapfunc(a, b, n, swaptype) + +static __inline char * +med3(char *a, char *b, char *c, int (*cmp)(const void *, const void *)) +{ + return cmp(a, b) < 0 ? + (cmp(b, c) < 0 ? b : (cmp(a, c) < 0 ? c : a)) + : (cmp(b, c) > 0 ? b : (cmp(a, c) < 0 ? a : c)); +} + +#ifndef min +#define min(a, b) (((a) < (b)) ? (a) : (b)) +#endif + +void +yaffs_qsort(void *aa, size_t n, size_t es, + int (*cmp)(const void *, const void *)) +{ + char *pa, *pb, *pc, *pd, *pl, *pm, *pn; + int d, r, swaptype, swap_cnt; + register char *a = aa; + +loop: SWAPINIT(a, es); + swap_cnt = 0; + if (n < 7) { + for (pm = (char *)a + es; pm < (char *) a + n * es; pm += es) + for (pl = pm; pl > (char *) a && cmp(pl - es, pl) > 0; + pl -= es) + yswap(pl, pl - es); + return; + } + pm = (char *)a + (n / 2) * es; + if (n > 7) { + pl = (char *)a; + pn = (char *)a + (n - 1) * es; + if (n > 40) { + d = (n / 8) * es; + pl = med3(pl, pl + d, pl + 2 * d, cmp); + pm = med3(pm - d, pm, pm + d, cmp); + pn = med3(pn - 2 * d, pn - d, pn, cmp); + } + pm = med3(pl, pm, pn, cmp); + } + yswap(a, pm); + pa = pb = (char *)a + es; + + pc = pd = (char *)a + (n - 1) * es; + for (;;) { + while (pb <= pc && (r = cmp(pb, a)) <= 0) { + if (r == 0) { + swap_cnt = 1; + yswap(pa, pb); + pa += es; + } + pb += es; + } + while (pb <= pc && (r = cmp(pc, a)) >= 0) { + if (r == 0) { + swap_cnt = 1; + yswap(pc, pd); + pd -= es; + } + pc -= es; + } + if (pb > pc) + break; + yswap(pb, pc); + swap_cnt = 1; + pb += es; + pc -= es; + } + if (swap_cnt == 0) { /* Switch to insertion sort */ + for (pm = (char *) a + es; pm < (char *) a + n * es; pm += es) + for (pl = pm; pl > (char *) a && cmp(pl - es, pl) > 0; + pl -= es) + yswap(pl, pl - es); + return; + } + + pn = (char *)a + n * es; + r = min(pa - (char *)a, pb - pa); + vecswap(a, pb - r, r); + r = min((long)(pd - pc), (long)(pn - pd - es)); + vecswap(pb, pn - r, r); + r = pb - pa; + if (r > es) + yaffs_qsort(a, r / es, es, cmp); + r = pd - pc; + if (r > es) { + /* Iterate rather than recurse to save stack space */ + a = pn - r; + n = r / es; + goto loop; + } +/* yaffs_qsort(pn - r, r / es, es, cmp);*/ +} diff --git a/fs/yaffs2/yaffs_qsort.h b/fs/yaffs2/yaffs_qsort.h new file mode 100644 index 000000000000..a24d58e37583 --- /dev/null +++ b/fs/yaffs2/yaffs_qsort.h @@ -0,0 +1,23 @@ +/* + * YAFFS: Yet another Flash File System . A NAND-flash specific file system. + * + * Copyright (C) 2002-2010 Aleph One Ltd. + * for Toby Churchill Ltd and Brightstar Engineering + * + * Created by Charles Manning <charles@aleph1.co.uk> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License version 2.1 as + * published by the Free Software Foundation. + * + * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL. + */ + + +#ifndef __YAFFS_QSORT_H__ +#define __YAFFS_QSORT_H__ + +extern void yaffs_qsort(void *const base, size_t total_elems, size_t size, + int (*cmp)(const void *, const void *)); + +#endif diff --git a/fs/yaffs2/yaffs_tagscompat.c b/fs/yaffs2/yaffs_tagscompat.c new file mode 100644 index 000000000000..e9ac51d7f94b --- /dev/null +++ b/fs/yaffs2/yaffs_tagscompat.c @@ -0,0 +1,539 @@ +/* + * YAFFS: Yet Another Flash File System. A NAND-flash specific file system. + * + * Copyright (C) 2002-2010 Aleph One Ltd. + * for Toby Churchill Ltd and Brightstar Engineering + * + * Created by Charles Manning <charles@aleph1.co.uk> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include "yaffs_guts.h" +#include "yaffs_tagscompat.h" +#include "yaffs_ecc.h" +#include "yaffs_getblockinfo.h" +#include "yaffs_trace.h" + +static void yaffs_HandleReadDataError(yaffs_Device *dev, int chunkInNAND); +#ifdef NOTYET +static void yaffs_CheckWrittenBlock(yaffs_Device *dev, int chunkInNAND); +static void yaffs_HandleWriteChunkOk(yaffs_Device *dev, int chunkInNAND, + const __u8 *data, + const yaffs_Spare *spare); +static void yaffs_HandleUpdateChunk(yaffs_Device *dev, int chunkInNAND, + const yaffs_Spare *spare); +static void yaffs_HandleWriteChunkError(yaffs_Device *dev, int chunkInNAND); +#endif + +static const char yaffs_countBitsTable[256] = { + 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, + 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, + 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, + 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, + 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, + 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8 +}; + +int yaffs_CountBits(__u8 x) +{ + int retVal; + retVal = yaffs_countBitsTable[x]; + return retVal; +} + +/********** Tags ECC calculations *********/ + +void yaffs_CalcECC(const __u8 *data, yaffs_Spare *spare) +{ + yaffs_ECCCalculate(data, spare->ecc1); + yaffs_ECCCalculate(&data[256], spare->ecc2); +} + +void yaffs_CalcTagsECC(yaffs_Tags *tags) +{ + /* Calculate an ecc */ + + unsigned char *b = ((yaffs_TagsUnion *) tags)->asBytes; + unsigned i, j; + unsigned ecc = 0; + unsigned bit = 0; + + tags->ecc = 0; + + for (i = 0; i < 8; i++) { + for (j = 1; j & 0xff; j <<= 1) { + bit++; + if (b[i] & j) + ecc ^= bit; + } + } + + tags->ecc = ecc; + +} + +int yaffs_CheckECCOnTags(yaffs_Tags *tags) +{ + unsigned ecc = tags->ecc; + + yaffs_CalcTagsECC(tags); + + ecc ^= tags->ecc; + + if (ecc && ecc <= 64) { + /* TODO: Handle the failure better. Retire? */ + unsigned char *b = ((yaffs_TagsUnion *) tags)->asBytes; + + ecc--; + + b[ecc / 8] ^= (1 << (ecc & 7)); + + /* Now recvalc the ecc */ + yaffs_CalcTagsECC(tags); + + return 1; /* recovered error */ + } else if (ecc) { + /* Wierd ecc failure value */ + /* TODO Need to do somethiong here */ + return -1; /* unrecovered error */ + } + + return 0; +} + +/********** Tags **********/ + +static void yaffs_LoadTagsIntoSpare(yaffs_Spare *sparePtr, + yaffs_Tags *tagsPtr) +{ + yaffs_TagsUnion *tu = (yaffs_TagsUnion *) tagsPtr; + + yaffs_CalcTagsECC(tagsPtr); + + sparePtr->tagByte0 = tu->asBytes[0]; + sparePtr->tagByte1 = tu->asBytes[1]; + sparePtr->tagByte2 = tu->asBytes[2]; + sparePtr->tagByte3 = tu->asBytes[3]; + sparePtr->tagByte4 = tu->asBytes[4]; + sparePtr->tagByte5 = tu->asBytes[5]; + sparePtr->tagByte6 = tu->asBytes[6]; + sparePtr->tagByte7 = tu->asBytes[7]; +} + +static void yaffs_GetTagsFromSpare(yaffs_Device *dev, yaffs_Spare *sparePtr, + yaffs_Tags *tagsPtr) +{ + yaffs_TagsUnion *tu = (yaffs_TagsUnion *) tagsPtr; + int result; + + tu->asBytes[0] = sparePtr->tagByte0; + tu->asBytes[1] = sparePtr->tagByte1; + tu->asBytes[2] = sparePtr->tagByte2; + tu->asBytes[3] = sparePtr->tagByte3; + tu->asBytes[4] = sparePtr->tagByte4; + tu->asBytes[5] = sparePtr->tagByte5; + tu->asBytes[6] = sparePtr->tagByte6; + tu->asBytes[7] = sparePtr->tagByte7; + + result = yaffs_CheckECCOnTags(tagsPtr); + if (result > 0) + dev->tagsEccFixed++; + else if (result < 0) + dev->tagsEccUnfixed++; +} + +static void yaffs_SpareInitialise(yaffs_Spare *spare) +{ + memset(spare, 0xFF, sizeof(yaffs_Spare)); +} + +static int yaffs_WriteChunkToNAND(struct yaffs_DeviceStruct *dev, + int chunkInNAND, const __u8 *data, + yaffs_Spare *spare) +{ + if (chunkInNAND < dev->param.startBlock * dev->param.nChunksPerBlock) { + T(YAFFS_TRACE_ERROR, + (TSTR("**>> yaffs chunk %d is not valid" TENDSTR), + chunkInNAND)); + return YAFFS_FAIL; + } + + return dev->param.writeChunkToNAND(dev, chunkInNAND, data, spare); +} + +static int yaffs_ReadChunkFromNAND(struct yaffs_DeviceStruct *dev, + int chunkInNAND, + __u8 *data, + yaffs_Spare *spare, + yaffs_ECCResult *eccResult, + int doErrorCorrection) +{ + int retVal; + yaffs_Spare localSpare; + + if (!spare && data) { + /* If we don't have a real spare, then we use a local one. */ + /* Need this for the calculation of the ecc */ + spare = &localSpare; + } + + if (!dev->param.useNANDECC) { + retVal = dev->param.readChunkFromNAND(dev, chunkInNAND, data, spare); + if (data && doErrorCorrection) { + /* Do ECC correction */ + /* Todo handle any errors */ + int eccResult1, eccResult2; + __u8 calcEcc[3]; + + yaffs_ECCCalculate(data, calcEcc); + eccResult1 = + yaffs_ECCCorrect(data, spare->ecc1, calcEcc); + yaffs_ECCCalculate(&data[256], calcEcc); + eccResult2 = + yaffs_ECCCorrect(&data[256], spare->ecc2, calcEcc); + + if (eccResult1 > 0) { + T(YAFFS_TRACE_ERROR, + (TSTR + ("**>>yaffs ecc error fix performed on chunk %d:0" + TENDSTR), chunkInNAND)); + dev->eccFixed++; + } else if (eccResult1 < 0) { + T(YAFFS_TRACE_ERROR, + (TSTR + ("**>>yaffs ecc error unfixed on chunk %d:0" + TENDSTR), chunkInNAND)); + dev->eccUnfixed++; + } + + if (eccResult2 > 0) { + T(YAFFS_TRACE_ERROR, + (TSTR + ("**>>yaffs ecc error fix performed on chunk %d:1" + TENDSTR), chunkInNAND)); + dev->eccFixed++; + } else if (eccResult2 < 0) { + T(YAFFS_TRACE_ERROR, + (TSTR + ("**>>yaffs ecc error unfixed on chunk %d:1" + TENDSTR), chunkInNAND)); + dev->eccUnfixed++; + } + + if (eccResult1 || eccResult2) { + /* We had a data problem on this page */ + yaffs_HandleReadDataError(dev, chunkInNAND); + } + + if (eccResult1 < 0 || eccResult2 < 0) + *eccResult = YAFFS_ECC_RESULT_UNFIXED; + else if (eccResult1 > 0 || eccResult2 > 0) + *eccResult = YAFFS_ECC_RESULT_FIXED; + else + *eccResult = YAFFS_ECC_RESULT_NO_ERROR; + } + } else { + /* Must allocate enough memory for spare+2*sizeof(int) */ + /* for ecc results from device. */ + struct yaffs_NANDSpare nspare; + + memset(&nspare, 0, sizeof(nspare)); + + retVal = dev->param.readChunkFromNAND(dev, chunkInNAND, data, + (yaffs_Spare *) &nspare); + memcpy(spare, &nspare, sizeof(yaffs_Spare)); + if (data && doErrorCorrection) { + if (nspare.eccres1 > 0) { + T(YAFFS_TRACE_ERROR, + (TSTR + ("**>>mtd ecc error fix performed on chunk %d:0" + TENDSTR), chunkInNAND)); + } else if (nspare.eccres1 < 0) { + T(YAFFS_TRACE_ERROR, + (TSTR + ("**>>mtd ecc error unfixed on chunk %d:0" + TENDSTR), chunkInNAND)); + } + + if (nspare.eccres2 > 0) { + T(YAFFS_TRACE_ERROR, + (TSTR + ("**>>mtd ecc error fix performed on chunk %d:1" + TENDSTR), chunkInNAND)); + } else if (nspare.eccres2 < 0) { + T(YAFFS_TRACE_ERROR, + (TSTR + ("**>>mtd ecc error unfixed on chunk %d:1" + TENDSTR), chunkInNAND)); + } + + if (nspare.eccres1 || nspare.eccres2) { + /* We had a data problem on this page */ + yaffs_HandleReadDataError(dev, chunkInNAND); + } + + if (nspare.eccres1 < 0 || nspare.eccres2 < 0) + *eccResult = YAFFS_ECC_RESULT_UNFIXED; + else if (nspare.eccres1 > 0 || nspare.eccres2 > 0) + *eccResult = YAFFS_ECC_RESULT_FIXED; + else + *eccResult = YAFFS_ECC_RESULT_NO_ERROR; + + } + } + return retVal; +} + +#ifdef NOTYET +static int yaffs_CheckChunkErased(struct yaffs_DeviceStruct *dev, + int chunkInNAND) +{ + static int init; + static __u8 cmpbuf[YAFFS_BYTES_PER_CHUNK]; + static __u8 data[YAFFS_BYTES_PER_CHUNK]; + /* Might as well always allocate the larger size for */ + /* dev->param.useNANDECC == true; */ + static __u8 spare[sizeof(struct yaffs_NANDSpare)]; + + dev->param.readChunkFromNAND(dev, chunkInNAND, data, (yaffs_Spare *) spare); + + if (!init) { + memset(cmpbuf, 0xff, YAFFS_BYTES_PER_CHUNK); + init = 1; + } + + if (memcmp(cmpbuf, data, YAFFS_BYTES_PER_CHUNK)) + return YAFFS_FAIL; + if (memcmp(cmpbuf, spare, 16)) + return YAFFS_FAIL; + + return YAFFS_OK; + +} +#endif + +/* + * Functions for robustisizing + */ + +static void yaffs_HandleReadDataError(yaffs_Device *dev, int chunkInNAND) +{ + int blockInNAND = chunkInNAND / dev->param.nChunksPerBlock; + + /* Mark the block for retirement */ + yaffs_GetBlockInfo(dev, blockInNAND + dev->blockOffset)->needsRetiring = 1; + T(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS, + (TSTR("**>>Block %d marked for retirement" TENDSTR), blockInNAND)); + + /* TODO: + * Just do a garbage collection on the affected block + * then retire the block + * NB recursion + */ +} + +#ifdef NOTYET +static void yaffs_CheckWrittenBlock(yaffs_Device *dev, int chunkInNAND) +{ +} + +static void yaffs_HandleWriteChunkOk(yaffs_Device *dev, int chunkInNAND, + const __u8 *data, + const yaffs_Spare *spare) +{ +} + +static void yaffs_HandleUpdateChunk(yaffs_Device *dev, int chunkInNAND, + const yaffs_Spare *spare) +{ +} + +static void yaffs_HandleWriteChunkError(yaffs_Device *dev, int chunkInNAND) +{ + int blockInNAND = chunkInNAND / dev->param.nChunksPerBlock; + + /* Mark the block for retirement */ + yaffs_GetBlockInfo(dev, blockInNAND)->needsRetiring = 1; + /* Delete the chunk */ + yaffs_DeleteChunk(dev, chunkInNAND, 1, __LINE__); +} + +static int yaffs_VerifyCompare(const __u8 *d0, const __u8 *d1, + const yaffs_Spare *s0, const yaffs_Spare *s1) +{ + + if (memcmp(d0, d1, YAFFS_BYTES_PER_CHUNK) != 0 || + s0->tagByte0 != s1->tagByte0 || + s0->tagByte1 != s1->tagByte1 || + s0->tagByte2 != s1->tagByte2 || + s0->tagByte3 != s1->tagByte3 || + s0->tagByte4 != s1->tagByte4 || + s0->tagByte5 != s1->tagByte5 || + s0->tagByte6 != s1->tagByte6 || + s0->tagByte7 != s1->tagByte7 || + s0->ecc1[0] != s1->ecc1[0] || + s0->ecc1[1] != s1->ecc1[1] || + s0->ecc1[2] != s1->ecc1[2] || + s0->ecc2[0] != s1->ecc2[0] || + s0->ecc2[1] != s1->ecc2[1] || s0->ecc2[2] != s1->ecc2[2]) { + return 0; + } + + return 1; +} +#endif /* NOTYET */ + +int yaffs_TagsCompatabilityWriteChunkWithTagsToNAND(yaffs_Device *dev, + int chunkInNAND, + const __u8 *data, + const yaffs_ExtendedTags *eTags) +{ + yaffs_Spare spare; + yaffs_Tags tags; + + yaffs_SpareInitialise(&spare); + + if (eTags->chunkDeleted) + spare.pageStatus = 0; + else { + tags.objectId = eTags->objectId; + tags.chunkId = eTags->chunkId; + + tags.byteCountLSB = eTags->byteCount & 0x3ff; + + if (dev->nDataBytesPerChunk >= 1024) + tags.byteCountMSB = (eTags->byteCount >> 10) & 3; + else + tags.byteCountMSB = 3; + + + tags.serialNumber = eTags->serialNumber; + + if (!dev->param.useNANDECC && data) + yaffs_CalcECC(data, &spare); + + yaffs_LoadTagsIntoSpare(&spare, &tags); + + } + + return yaffs_WriteChunkToNAND(dev, chunkInNAND, data, &spare); +} + +int yaffs_TagsCompatabilityReadChunkWithTagsFromNAND(yaffs_Device *dev, + int chunkInNAND, + __u8 *data, + yaffs_ExtendedTags *eTags) +{ + + yaffs_Spare spare; + yaffs_Tags tags; + yaffs_ECCResult eccResult = YAFFS_ECC_RESULT_UNKNOWN; + + static yaffs_Spare spareFF; + static int init; + + if (!init) { + memset(&spareFF, 0xFF, sizeof(spareFF)); + init = 1; + } + + if (yaffs_ReadChunkFromNAND + (dev, chunkInNAND, data, &spare, &eccResult, 1)) { + /* eTags may be NULL */ + if (eTags) { + + int deleted = + (yaffs_CountBits(spare.pageStatus) < 7) ? 1 : 0; + + eTags->chunkDeleted = deleted; + eTags->eccResult = eccResult; + eTags->blockBad = 0; /* We're reading it */ + /* therefore it is not a bad block */ + eTags->chunkUsed = + (memcmp(&spareFF, &spare, sizeof(spareFF)) != + 0) ? 1 : 0; + + if (eTags->chunkUsed) { + yaffs_GetTagsFromSpare(dev, &spare, &tags); + + eTags->objectId = tags.objectId; + eTags->chunkId = tags.chunkId; + eTags->byteCount = tags.byteCountLSB; + + if (dev->nDataBytesPerChunk >= 1024) + eTags->byteCount |= (((unsigned) tags.byteCountMSB) << 10); + + eTags->serialNumber = tags.serialNumber; + } + } + + return YAFFS_OK; + } else { + return YAFFS_FAIL; + } +} + +int yaffs_TagsCompatabilityMarkNANDBlockBad(struct yaffs_DeviceStruct *dev, + int blockInNAND) +{ + + yaffs_Spare spare; + + memset(&spare, 0xff, sizeof(yaffs_Spare)); + + spare.blockStatus = 'Y'; + + yaffs_WriteChunkToNAND(dev, blockInNAND * dev->param.nChunksPerBlock, NULL, + &spare); + yaffs_WriteChunkToNAND(dev, blockInNAND * dev->param.nChunksPerBlock + 1, + NULL, &spare); + + return YAFFS_OK; + +} + +int yaffs_TagsCompatabilityQueryNANDBlock(struct yaffs_DeviceStruct *dev, + int blockNo, + yaffs_BlockState *state, + __u32 *sequenceNumber) +{ + + yaffs_Spare spare0, spare1; + static yaffs_Spare spareFF; + static int init; + yaffs_ECCResult dummy; + + if (!init) { + memset(&spareFF, 0xFF, sizeof(spareFF)); + init = 1; + } + + *sequenceNumber = 0; + + yaffs_ReadChunkFromNAND(dev, blockNo * dev->param.nChunksPerBlock, NULL, + &spare0, &dummy, 1); + yaffs_ReadChunkFromNAND(dev, blockNo * dev->param.nChunksPerBlock + 1, NULL, + &spare1, &dummy, 1); + + if (yaffs_CountBits(spare0.blockStatus & spare1.blockStatus) < 7) + *state = YAFFS_BLOCK_STATE_DEAD; + else if (memcmp(&spareFF, &spare0, sizeof(spareFF)) == 0) + *state = YAFFS_BLOCK_STATE_EMPTY; + else + *state = YAFFS_BLOCK_STATE_NEEDS_SCANNING; + + return YAFFS_OK; +} diff --git a/fs/yaffs2/yaffs_tagscompat.h b/fs/yaffs2/yaffs_tagscompat.h new file mode 100644 index 000000000000..8c663db526cf --- /dev/null +++ b/fs/yaffs2/yaffs_tagscompat.h @@ -0,0 +1,39 @@ +/* + * YAFFS: Yet another Flash File System . A NAND-flash specific file system. + * + * Copyright (C) 2002-2010 Aleph One Ltd. + * for Toby Churchill Ltd and Brightstar Engineering + * + * Created by Charles Manning <charles@aleph1.co.uk> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License version 2.1 as + * published by the Free Software Foundation. + * + * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL. + */ + +#ifndef __YAFFS_TAGSCOMPAT_H__ +#define __YAFFS_TAGSCOMPAT_H__ + +#include "yaffs_guts.h" +int yaffs_TagsCompatabilityWriteChunkWithTagsToNAND(yaffs_Device *dev, + int chunkInNAND, + const __u8 *data, + const yaffs_ExtendedTags *tags); +int yaffs_TagsCompatabilityReadChunkWithTagsFromNAND(yaffs_Device *dev, + int chunkInNAND, + __u8 *data, + yaffs_ExtendedTags *tags); +int yaffs_TagsCompatabilityMarkNANDBlockBad(struct yaffs_DeviceStruct *dev, + int blockNo); +int yaffs_TagsCompatabilityQueryNANDBlock(struct yaffs_DeviceStruct *dev, + int blockNo, + yaffs_BlockState *state, + __u32 *sequenceNumber); + +void yaffs_CalcTagsECC(yaffs_Tags *tags); +int yaffs_CheckECCOnTags(yaffs_Tags *tags); +int yaffs_CountBits(__u8 byte); + +#endif diff --git a/fs/yaffs2/yaffs_tagsvalidity.c b/fs/yaffs2/yaffs_tagsvalidity.c new file mode 100644 index 000000000000..93ad08c1f5de --- /dev/null +++ b/fs/yaffs2/yaffs_tagsvalidity.c @@ -0,0 +1,28 @@ +/* + * YAFFS: Yet Another Flash File System. A NAND-flash specific file system. + * + * Copyright (C) 2002-2010 Aleph One Ltd. + * for Toby Churchill Ltd and Brightstar Engineering + * + * Created by Charles Manning <charles@aleph1.co.uk> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include "yaffs_tagsvalidity.h" + +void yaffs_InitialiseTags(yaffs_ExtendedTags *tags) +{ + memset(tags, 0, sizeof(yaffs_ExtendedTags)); + tags->validMarker0 = 0xAAAAAAAA; + tags->validMarker1 = 0x55555555; +} + +int yaffs_ValidateTags(yaffs_ExtendedTags *tags) +{ + return (tags->validMarker0 == 0xAAAAAAAA && + tags->validMarker1 == 0x55555555); + +} diff --git a/fs/yaffs2/yaffs_tagsvalidity.h b/fs/yaffs2/yaffs_tagsvalidity.h new file mode 100644 index 000000000000..60a1aea6c5c7 --- /dev/null +++ b/fs/yaffs2/yaffs_tagsvalidity.h @@ -0,0 +1,24 @@ +/* + * YAFFS: Yet another Flash File System . A NAND-flash specific file system. + * + * Copyright (C) 2002-2010 Aleph One Ltd. + * for Toby Churchill Ltd and Brightstar Engineering + * + * Created by Charles Manning <charles@aleph1.co.uk> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License version 2.1 as + * published by the Free Software Foundation. + * + * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL. + */ + + +#ifndef __YAFFS_TAGS_VALIDITY_H__ +#define __YAFFS_TAGS_VALIDITY_H__ + +#include "yaffs_guts.h" + +void yaffs_InitialiseTags(yaffs_ExtendedTags *tags); +int yaffs_ValidateTags(yaffs_ExtendedTags *tags); +#endif diff --git a/fs/yaffs2/yaffs_trace.h b/fs/yaffs2/yaffs_trace.h new file mode 100644 index 000000000000..8b848edba05e --- /dev/null +++ b/fs/yaffs2/yaffs_trace.h @@ -0,0 +1,60 @@ +/* + * YAFFS: Yet another Flash File System . A NAND-flash specific file system. + * + * Copyright (C) 2002-2010 Aleph One Ltd. + * for Toby Churchill Ltd and Brightstar Engineering + * + * Created by Charles Manning <charles@aleph1.co.uk> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License version 2.1 as + * published by the Free Software Foundation. + * + * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL. + */ + + +#ifndef __YTRACE_H__ +#define __YTRACE_H__ + +extern unsigned int yaffs_traceMask; +extern unsigned int yaffs_wr_attempts; + +/* + * Tracing flags. + * The flags masked in YAFFS_TRACE_ALWAYS are always traced. + */ + +#define YAFFS_TRACE_OS 0x00000002 +#define YAFFS_TRACE_ALLOCATE 0x00000004 +#define YAFFS_TRACE_SCAN 0x00000008 +#define YAFFS_TRACE_BAD_BLOCKS 0x00000010 +#define YAFFS_TRACE_ERASE 0x00000020 +#define YAFFS_TRACE_GC 0x00000040 +#define YAFFS_TRACE_WRITE 0x00000080 +#define YAFFS_TRACE_TRACING 0x00000100 +#define YAFFS_TRACE_DELETION 0x00000200 +#define YAFFS_TRACE_BUFFERS 0x00000400 +#define YAFFS_TRACE_NANDACCESS 0x00000800 +#define YAFFS_TRACE_GC_DETAIL 0x00001000 +#define YAFFS_TRACE_SCAN_DEBUG 0x00002000 +#define YAFFS_TRACE_MTD 0x00004000 +#define YAFFS_TRACE_CHECKPOINT 0x00008000 + +#define YAFFS_TRACE_VERIFY 0x00010000 +#define YAFFS_TRACE_VERIFY_NAND 0x00020000 +#define YAFFS_TRACE_VERIFY_FULL 0x00040000 +#define YAFFS_TRACE_VERIFY_ALL 0x000F0000 + +#define YAFFS_TRACE_SYNC 0x00100000 +#define YAFFS_TRACE_BACKGROUND 0x00200000 +#define YAFFS_TRACE_LOCK 0x00400000 + +#define YAFFS_TRACE_ERROR 0x40000000 +#define YAFFS_TRACE_BUG 0x80000000 +#define YAFFS_TRACE_ALWAYS 0xF0000000 + + +#define T(mask, p) do { if ((mask) & (yaffs_traceMask | YAFFS_TRACE_ALWAYS)) TOUT(p); } while (0) + +#endif diff --git a/fs/yaffs2/yaffsinterface.h b/fs/yaffs2/yaffsinterface.h new file mode 100644 index 000000000000..ca36cfc427a2 --- /dev/null +++ b/fs/yaffs2/yaffsinterface.h @@ -0,0 +1,21 @@ +/* + * YAFFS: Yet another Flash File System . A NAND-flash specific file system. + * + * Copyright (C) 2002-2010 Aleph One Ltd. + * for Toby Churchill Ltd and Brightstar Engineering + * + * Created by Charles Manning <charles@aleph1.co.uk> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License version 2.1 as + * published by the Free Software Foundation. + * + * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL. + */ + +#ifndef __YAFFSINTERFACE_H__ +#define __YAFFSINTERFACE_H__ + +int yaffs_Initialise(unsigned nBlocks); + +#endif diff --git a/fs/yaffs2/yportenv.h b/fs/yaffs2/yportenv.h new file mode 100644 index 000000000000..13cc1eb62178 --- /dev/null +++ b/fs/yaffs2/yportenv.h @@ -0,0 +1,169 @@ +/* + * YAFFS: Yet another Flash File System . A NAND-flash specific file system. + * + * Copyright (C) 2002-2010 Aleph One Ltd. + * for Toby Churchill Ltd and Brightstar Engineering + * + * Created by Charles Manning <charles@aleph1.co.uk> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License version 2.1 as + * published by the Free Software Foundation. + * + * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL. + */ + + +#ifndef __YPORTENV_H__ +#define __YPORTENV_H__ + +/* + * Define the MTD version in terms of Linux Kernel versions + * This allows yaffs to be used independantly of the kernel + * as well as with it. + */ + +#define MTD_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c)) + +#if defined CONFIG_YAFFS_WINCE + +#include "ywinceenv.h" + +#elif defined __KERNEL__ + +#include "moduleconfig.h" + +/* Linux kernel */ + +#include <linux/version.h> +#define MTD_VERSION_CODE LINUX_VERSION_CODE + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19)) +#include <linux/config.h> +#endif +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/sched.h> +#include <linux/string.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> + +#define YCHAR char +#define YUCHAR unsigned char +#define _Y(x) x +#define yaffs_strcat(a, b) strcat(a, b) +#define yaffs_strcpy(a, b) strcpy(a, b) +#define yaffs_strncpy(a, b, c) strncpy(a, b, c) +#define yaffs_strncmp(a, b, c) strncmp(a, b, c) +#define yaffs_strnlen(s,m) strnlen(s,m) +#define yaffs_sprintf sprintf +#define yaffs_toupper(a) toupper(a) + +#define Y_INLINE inline + +#define YAFFS_LOSTNFOUND_NAME "lost+found" +#define YAFFS_LOSTNFOUND_PREFIX "obj" + +/* #define YPRINTF(x) printk x */ +#define YMALLOC(x) kmalloc(x, GFP_NOFS) +#define YFREE(x) kfree(x) +#define YMALLOC_ALT(x) vmalloc(x) +#define YFREE_ALT(x) vfree(x) +#define YMALLOC_DMA(x) YMALLOC(x) + +#define YYIELD() schedule() +#define Y_DUMP_STACK() dump_stack() + +#define YAFFS_ROOT_MODE 0755 +#define YAFFS_LOSTNFOUND_MODE 0700 + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)) +#define Y_CURRENT_TIME CURRENT_TIME.tv_sec +#define Y_TIME_CONVERT(x) (x).tv_sec +#else +#define Y_CURRENT_TIME CURRENT_TIME +#define Y_TIME_CONVERT(x) (x) +#endif + +#define yaffs_SumCompare(x, y) ((x) == (y)) +#define yaffs_strcmp(a, b) strcmp(a, b) + +#define TENDSTR "\n" +#define TSTR(x) KERN_DEBUG x +#define TCONT(x) x +#define TOUT(p) printk p + +#define compile_time_assertion(assertion) \ + ({ int x = __builtin_choose_expr(assertion, 0, (void)0); (void) x; }) + +#elif defined CONFIG_YAFFS_DIRECT + +#define MTD_VERSION_CODE MTD_VERSION(2, 6, 22) + +/* Direct interface */ +#include "ydirectenv.h" + +#elif defined CONFIG_YAFFS_UTIL + +/* Stuff for YAFFS utilities */ + +#include "stdlib.h" +#include "stdio.h" +#include "string.h" + +#include "devextras.h" + +#define YMALLOC(x) malloc(x) +#define YFREE(x) free(x) +#define YMALLOC_ALT(x) malloc(x) +#define YFREE_ALT(x) free(x) + +#define YCHAR char +#define YUCHAR unsigned char +#define _Y(x) x +#define yaffs_strcat(a, b) strcat(a, b) +#define yaffs_strcpy(a, b) strcpy(a, b) +#define yaffs_strncpy(a, b, c) strncpy(a, b, c) +#define yaffs_strnlen(s,m) strnlen(s,m) +#define yaffs_sprintf sprintf +#define yaffs_toupper(a) toupper(a) + +#define Y_INLINE inline + +/* #define YINFO(s) YPRINTF(( __FILE__ " %d %s\n",__LINE__,s)) */ +/* #define YALERT(s) YINFO(s) */ + +#define TENDSTR "\n" +#define TSTR(x) x +#define TOUT(p) printf p + +#define YAFFS_LOSTNFOUND_NAME "lost+found" +#define YAFFS_LOSTNFOUND_PREFIX "obj" +/* #define YPRINTF(x) printf x */ + +#define YAFFS_ROOT_MODE 0755 +#define YAFFS_LOSTNFOUND_MODE 0700 + +#define yaffs_SumCompare(x, y) ((x) == (y)) +#define yaffs_strcmp(a, b) strcmp(a, b) + +#else +/* Should have specified a configuration type */ +#error Unknown configuration + +#endif + +#ifndef Y_DUMP_STACK +#define Y_DUMP_STACK() do { } while (0) +#endif + +#ifndef YBUG +#define YBUG() do {\ + T(YAFFS_TRACE_BUG,\ + (TSTR("==>> yaffs bug: " __FILE__ " %d" TENDSTR),\ + __LINE__));\ + Y_DUMP_STACK();\ +} while (0) +#endif + +#endif diff --git a/include/linux/akm8975.h b/include/linux/akm8975.h new file mode 100644 index 000000000000..6a7c43260018 --- /dev/null +++ b/include/linux/akm8975.h @@ -0,0 +1,87 @@ +/* + * Definitions for akm8975 compass chip. + */ +#ifndef AKM8975_H +#define AKM8975_H + +#include <linux/ioctl.h> + +/*! \name AK8975 operation mode + \anchor AK8975_Mode + Defines an operation mode of the AK8975.*/ +/*! @{*/ +#define AK8975_MODE_SNG_MEASURE 0x01 +#define AK8975_MODE_SELF_TEST 0x08 +#define AK8975_MODE_FUSE_ACCESS 0x0F +#define AK8975_MODE_POWER_DOWN 0x00 +/*! @}*/ + +#define RBUFF_SIZE 8 /* Rx buffer size */ + +/*! \name AK8975 register address +\anchor AK8975_REG +Defines a register address of the AK8975.*/ +/*! @{*/ +#define AK8975_REG_WIA 0x00 +#define AK8975_REG_INFO 0x01 +#define AK8975_REG_ST1 0x02 +#define AK8975_REG_HXL 0x03 +#define AK8975_REG_HXH 0x04 +#define AK8975_REG_HYL 0x05 +#define AK8975_REG_HYH 0x06 +#define AK8975_REG_HZL 0x07 +#define AK8975_REG_HZH 0x08 +#define AK8975_REG_ST2 0x09 +#define AK8975_REG_CNTL 0x0A +#define AK8975_REG_RSV 0x0B +#define AK8975_REG_ASTC 0x0C +#define AK8975_REG_TS1 0x0D +#define AK8975_REG_TS2 0x0E +#define AK8975_REG_I2CDIS 0x0F +/*! @}*/ + +/*! \name AK8975 fuse-rom address +\anchor AK8975_FUSE +Defines a read-only address of the fuse ROM of the AK8975.*/ +/*! @{*/ +#define AK8975_FUSE_ASAX 0x10 +#define AK8975_FUSE_ASAY 0x11 +#define AK8975_FUSE_ASAZ 0x12 +/*! @}*/ + +#define AKMIO 0xA1 + +/* IOCTLs for AKM library */ +#define ECS_IOCTL_WRITE _IOW(AKMIO, 0x02, char[5]) +#define ECS_IOCTL_READ _IOWR(AKMIO, 0x03, char[5]) +#define ECS_IOCTL_GETDATA _IOR(AKMIO, 0x08, char[RBUFF_SIZE]) +#define ECS_IOCTL_SET_YPR _IOW(AKMIO, 0x0C, short[12]) +#define ECS_IOCTL_GET_OPEN_STATUS _IOR(AKMIO, 0x0D, int) +#define ECS_IOCTL_GET_CLOSE_STATUS _IOR(AKMIO, 0x0E, int) +#define ECS_IOCTL_GET_DELAY _IOR(AKMIO, 0x30, short) + +/* IOCTLs for APPs */ +#define ECS_IOCTL_APP_SET_MFLAG _IOW(AKMIO, 0x11, short) +#define ECS_IOCTL_APP_GET_MFLAG _IOW(AKMIO, 0x12, short) +#define ECS_IOCTL_APP_SET_AFLAG _IOW(AKMIO, 0x13, short) +#define ECS_IOCTL_APP_GET_AFLAG _IOR(AKMIO, 0x14, short) +#define ECS_IOCTL_APP_SET_DELAY _IOW(AKMIO, 0x18, short) +#define ECS_IOCTL_APP_GET_DELAY ECS_IOCTL_GET_DELAY +/* Set raw magnetic vector flag */ +#define ECS_IOCTL_APP_SET_MVFLAG _IOW(AKMIO, 0x19, short) +/* Get raw magnetic vector flag */ +#define ECS_IOCTL_APP_GET_MVFLAG _IOR(AKMIO, 0x1A, short) +#define ECS_IOCTL_APP_SET_TFLAG _IOR(AKMIO, 0x15, short) + + +struct akm8975_platform_data { + int intr; + + int (*init)(void); + void (*exit)(void); + int (*power_on)(void); + int (*power_off)(void); +}; + +#endif + diff --git a/include/linux/amba/mmci.h b/include/linux/amba/mmci.h index ca84ce70d5d5..36e3a7cd73e5 100644 --- a/include/linux/amba/mmci.h +++ b/include/linux/amba/mmci.h @@ -5,6 +5,15 @@ #define AMBA_MMCI_H #include <linux/mmc/host.h> +#include <linux/mmc/card.h> +#include <linux/mmc/sdio_func.h> + +struct embedded_sdio_data { + struct sdio_cis cis; + struct sdio_cccr cccr; + struct sdio_embedded_func *funcs; + int num_funcs; +}; /** * struct mmci_platform_data - platform configuration for the MMCI @@ -36,6 +45,9 @@ struct mmci_platform_data { int gpio_wp; int gpio_cd; unsigned long capabilities; + unsigned int status_irq; + struct embedded_sdio_data *embedded_sdio; + int (*register_status_notify)(void (*callback)(int card_present, void *dev_id), void *dev_id); }; #endif diff --git a/include/linux/android_aid.h b/include/linux/android_aid.h new file mode 100644 index 000000000000..7f16a14c0fe7 --- /dev/null +++ b/include/linux/android_aid.h @@ -0,0 +1,26 @@ +/* include/linux/android_aid.h + * + * Copyright (C) 2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _LINUX_ANDROID_AID_H +#define _LINUX_ANDROID_AID_H + +/* AIDs that the kernel treats differently */ +#define AID_NET_BT_ADMIN 3001 +#define AID_NET_BT 3002 +#define AID_INET 3003 +#define AID_NET_RAW 3004 +#define AID_NET_ADMIN 3005 + +#endif diff --git a/include/linux/android_alarm.h b/include/linux/android_alarm.h new file mode 100644 index 000000000000..f8f14e793dbf --- /dev/null +++ b/include/linux/android_alarm.h @@ -0,0 +1,106 @@ +/* include/linux/android_alarm.h + * + * Copyright (C) 2006-2007 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _LINUX_ANDROID_ALARM_H +#define _LINUX_ANDROID_ALARM_H + +#include <linux/ioctl.h> +#include <linux/time.h> + +enum android_alarm_type { + /* return code bit numbers or set alarm arg */ + ANDROID_ALARM_RTC_WAKEUP, + ANDROID_ALARM_RTC, + ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP, + ANDROID_ALARM_ELAPSED_REALTIME, + ANDROID_ALARM_SYSTEMTIME, + + ANDROID_ALARM_TYPE_COUNT, + + /* return code bit numbers */ + /* ANDROID_ALARM_TIME_CHANGE = 16 */ +}; + +#ifdef __KERNEL__ + +#include <linux/ktime.h> +#include <linux/rbtree.h> + +/* + * The alarm interface is similar to the hrtimer interface but adds support + * for wakeup from suspend. It also adds an elapsed realtime clock that can + * be used for periodic timers that need to keep runing while the system is + * suspended and not be disrupted when the wall time is set. + */ + +/** + * struct alarm - the basic alarm structure + * @node: red black tree node for time ordered insertion + * @type: alarm type. rtc/elapsed-realtime/systemtime, wakeup/non-wakeup. + * @softexpires: the absolute earliest expiry time of the alarm. + * @expires: the absolute expiry time. + * @function: alarm expiry callback function + * + * The alarm structure must be initialized by alarm_init() + * + */ + +struct alarm { + struct rb_node node; + enum android_alarm_type type; + ktime_t softexpires; + ktime_t expires; + void (*function)(struct alarm *); +}; + +void alarm_init(struct alarm *alarm, + enum android_alarm_type type, void (*function)(struct alarm *)); +void alarm_start_range(struct alarm *alarm, ktime_t start, ktime_t end); +int alarm_try_to_cancel(struct alarm *alarm); +int alarm_cancel(struct alarm *alarm); +ktime_t alarm_get_elapsed_realtime(void); + +/* set rtc while preserving elapsed realtime */ +int alarm_set_rtc(const struct timespec ts); + +#endif + +enum android_alarm_return_flags { + ANDROID_ALARM_RTC_WAKEUP_MASK = 1U << ANDROID_ALARM_RTC_WAKEUP, + ANDROID_ALARM_RTC_MASK = 1U << ANDROID_ALARM_RTC, + ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK = + 1U << ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP, + ANDROID_ALARM_ELAPSED_REALTIME_MASK = + 1U << ANDROID_ALARM_ELAPSED_REALTIME, + ANDROID_ALARM_SYSTEMTIME_MASK = 1U << ANDROID_ALARM_SYSTEMTIME, + ANDROID_ALARM_TIME_CHANGE_MASK = 1U << 16 +}; + +/* Disable alarm */ +#define ANDROID_ALARM_CLEAR(type) _IO('a', 0 | ((type) << 4)) + +/* Ack last alarm and wait for next */ +#define ANDROID_ALARM_WAIT _IO('a', 1) + +#define ALARM_IOW(c, type, size) _IOW('a', (c) | ((type) << 4), size) +/* Set alarm */ +#define ANDROID_ALARM_SET(type) ALARM_IOW(2, type, struct timespec) +#define ANDROID_ALARM_SET_AND_WAIT(type) ALARM_IOW(3, type, struct timespec) +#define ANDROID_ALARM_GET_TIME(type) ALARM_IOW(4, type, struct timespec) +#define ANDROID_ALARM_SET_RTC _IOW('a', 5, struct timespec) +#define ANDROID_ALARM_BASE_CMD(cmd) (cmd & ~(_IOC(0, 0, 0xf0, 0))) +#define ANDROID_ALARM_IOCTL_TO_TYPE(cmd) (_IOC_NR(cmd) >> 4) + +#endif diff --git a/include/linux/android_pmem.h b/include/linux/android_pmem.h new file mode 100644 index 000000000000..f633621f5be3 --- /dev/null +++ b/include/linux/android_pmem.h @@ -0,0 +1,93 @@ +/* include/linux/android_pmem.h + * + * Copyright (C) 2007 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _ANDROID_PMEM_H_ +#define _ANDROID_PMEM_H_ + +#define PMEM_IOCTL_MAGIC 'p' +#define PMEM_GET_PHYS _IOW(PMEM_IOCTL_MAGIC, 1, unsigned int) +#define PMEM_MAP _IOW(PMEM_IOCTL_MAGIC, 2, unsigned int) +#define PMEM_GET_SIZE _IOW(PMEM_IOCTL_MAGIC, 3, unsigned int) +#define PMEM_UNMAP _IOW(PMEM_IOCTL_MAGIC, 4, unsigned int) +/* This ioctl will allocate pmem space, backing the file, it will fail + * if the file already has an allocation, pass it the len as the argument + * to the ioctl */ +#define PMEM_ALLOCATE _IOW(PMEM_IOCTL_MAGIC, 5, unsigned int) +/* This will connect a one pmem file to another, pass the file that is already + * backed in memory as the argument to the ioctl + */ +#define PMEM_CONNECT _IOW(PMEM_IOCTL_MAGIC, 6, unsigned int) +/* Returns the total size of the pmem region it is sent to as a pmem_region + * struct (with offset set to 0). + */ +#define PMEM_GET_TOTAL_SIZE _IOW(PMEM_IOCTL_MAGIC, 7, unsigned int) +#define PMEM_CACHE_FLUSH _IOW(PMEM_IOCTL_MAGIC, 8, unsigned int) + +struct android_pmem_platform_data +{ + const char* name; + /* starting physical address of memory region */ + unsigned long start; + /* size of memory region */ + unsigned long size; + /* set to indicate the region should not be managed with an allocator */ + unsigned no_allocator; + /* set to indicate maps of this region should be cached, if a mix of + * cached and uncached is desired, set this and open the device with + * O_SYNC to get an uncached region */ + unsigned cached; + /* The MSM7k has bits to enable a write buffer in the bus controller*/ + unsigned buffered; +}; + +struct pmem_region { + unsigned long offset; + unsigned long len; +}; + +#ifdef CONFIG_ANDROID_PMEM +int is_pmem_file(struct file *file); +int get_pmem_file(int fd, unsigned long *start, unsigned long *vstart, + unsigned long *end, struct file **filp); +int get_pmem_user_addr(struct file *file, unsigned long *start, + unsigned long *end); +void put_pmem_file(struct file* file); +void flush_pmem_file(struct file *file, unsigned long start, unsigned long len); +int pmem_setup(struct android_pmem_platform_data *pdata, + long (*ioctl)(struct file *, unsigned int, unsigned long), + int (*release)(struct inode *, struct file *)); +int pmem_remap(struct pmem_region *region, struct file *file, + unsigned operation); + +#else +static inline int is_pmem_file(struct file *file) { return 0; } +static inline int get_pmem_file(int fd, unsigned long *start, + unsigned long *vstart, unsigned long *end, + struct file **filp) { return -ENOSYS; } +static inline int get_pmem_user_addr(struct file *file, unsigned long *start, + unsigned long *end) { return -ENOSYS; } +static inline void put_pmem_file(struct file* file) { return; } +static inline void flush_pmem_file(struct file *file, unsigned long start, + unsigned long len) { return; } +static inline int pmem_setup(struct android_pmem_platform_data *pdata, + long (*ioctl)(struct file *, unsigned int, unsigned long), + int (*release)(struct inode *, struct file *)) { return -ENOSYS; } + +static inline int pmem_remap(struct pmem_region *region, struct file *file, + unsigned operation) { return -ENOSYS; } +#endif + +#endif //_ANDROID_PPP_H_ + diff --git a/include/linux/ashmem.h b/include/linux/ashmem.h new file mode 100644 index 000000000000..1976b10ef93e --- /dev/null +++ b/include/linux/ashmem.h @@ -0,0 +1,48 @@ +/* + * include/linux/ashmem.h + * + * Copyright 2008 Google Inc. + * Author: Robert Love + * + * This file is dual licensed. It may be redistributed and/or modified + * under the terms of the Apache 2.0 License OR version 2 of the GNU + * General Public License. + */ + +#ifndef _LINUX_ASHMEM_H +#define _LINUX_ASHMEM_H + +#include <linux/limits.h> +#include <linux/ioctl.h> + +#define ASHMEM_NAME_LEN 256 + +#define ASHMEM_NAME_DEF "dev/ashmem" + +/* Return values from ASHMEM_PIN: Was the mapping purged while unpinned? */ +#define ASHMEM_NOT_PURGED 0 +#define ASHMEM_WAS_PURGED 1 + +/* Return values from ASHMEM_GET_PIN_STATUS: Is the mapping pinned? */ +#define ASHMEM_IS_UNPINNED 0 +#define ASHMEM_IS_PINNED 1 + +struct ashmem_pin { + __u32 offset; /* offset into region, in bytes, page-aligned */ + __u32 len; /* length forward from offset, in bytes, page-aligned */ +}; + +#define __ASHMEMIOC 0x77 + +#define ASHMEM_SET_NAME _IOW(__ASHMEMIOC, 1, char[ASHMEM_NAME_LEN]) +#define ASHMEM_GET_NAME _IOR(__ASHMEMIOC, 2, char[ASHMEM_NAME_LEN]) +#define ASHMEM_SET_SIZE _IOW(__ASHMEMIOC, 3, size_t) +#define ASHMEM_GET_SIZE _IO(__ASHMEMIOC, 4) +#define ASHMEM_SET_PROT_MASK _IOW(__ASHMEMIOC, 5, unsigned long) +#define ASHMEM_GET_PROT_MASK _IO(__ASHMEMIOC, 6) +#define ASHMEM_PIN _IOW(__ASHMEMIOC, 7, struct ashmem_pin) +#define ASHMEM_UNPIN _IOW(__ASHMEMIOC, 8, struct ashmem_pin) +#define ASHMEM_GET_PIN_STATUS _IO(__ASHMEMIOC, 9) +#define ASHMEM_PURGE_ALL_CACHES _IO(__ASHMEMIOC, 10) + +#endif /* _LINUX_ASHMEM_H */ diff --git a/include/linux/cpuacct.h b/include/linux/cpuacct.h new file mode 100644 index 000000000000..8f68e733fe19 --- /dev/null +++ b/include/linux/cpuacct.h @@ -0,0 +1,43 @@ +/* include/linux/cpuacct.h + * + * Copyright (C) 2010 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _CPUACCT_H_ +#define _CPUACCT_H_ + +#include <linux/cgroup.h> + +#ifdef CONFIG_CGROUP_CPUACCT + +/* + * Platform specific CPU frequency hooks for cpuacct. These functions are + * called from the scheduler. + */ +struct cpuacct_charge_calls { + /* + * Platforms can take advantage of this data and use + * per-cpu allocations if necessary. + */ + void (*init) (void **cpuacct_data); + void (*charge) (void *cpuacct_data, u64 cputime, unsigned int cpu); + void (*cpufreq_show) (void *cpuacct_data, struct cgroup_map_cb *cb); + /* Returns power consumed in milliWatt seconds */ + u64 (*power_usage) (void *cpuacct_data); +}; + +int cpuacct_charge_register(struct cpuacct_charge_calls *fn); + +#endif /* CONFIG_CGROUP_CPUACCT */ + +#endif // _CPUACCT_H_ diff --git a/include/linux/earlysuspend.h b/include/linux/earlysuspend.h new file mode 100755 index 000000000000..8343b817af31 --- /dev/null +++ b/include/linux/earlysuspend.h @@ -0,0 +1,56 @@ +/* include/linux/earlysuspend.h + * + * Copyright (C) 2007-2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _LINUX_EARLYSUSPEND_H +#define _LINUX_EARLYSUSPEND_H + +#ifdef CONFIG_HAS_EARLYSUSPEND +#include <linux/list.h> +#endif + +/* The early_suspend structure defines suspend and resume hooks to be called + * when the user visible sleep state of the system changes, and a level to + * control the order. They can be used to turn off the screen and input + * devices that are not used for wakeup. + * Suspend handlers are called in low to high level order, resume handlers are + * called in the opposite order. If, when calling register_early_suspend, + * the suspend handlers have already been called without a matching call to the + * resume handlers, the suspend handler will be called directly from + * register_early_suspend. This direct call can violate the normal level order. + */ +enum { + EARLY_SUSPEND_LEVEL_BLANK_SCREEN = 50, + EARLY_SUSPEND_LEVEL_STOP_DRAWING = 100, + EARLY_SUSPEND_LEVEL_DISABLE_FB = 150, +}; +struct early_suspend { +#ifdef CONFIG_HAS_EARLYSUSPEND + struct list_head link; + int level; + void (*suspend)(struct early_suspend *h); + void (*resume)(struct early_suspend *h); +#endif +}; + +#ifdef CONFIG_HAS_EARLYSUSPEND +void register_early_suspend(struct early_suspend *handler); +void unregister_early_suspend(struct early_suspend *handler); +#else +#define register_early_suspend(handler) do { } while (0) +#define unregister_early_suspend(handler) do { } while (0) +#endif + +#endif + diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 5f2f4c4d8fb0..27bc65f3cef0 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -21,6 +21,8 @@ #define disk_to_dev(disk) (&(disk)->part0.__dev) #define part_to_dev(part) (&((part)->__dev)) +#define GENHD_PART_NAME_SIZE 128 + extern struct device_type part_type; extern struct kobject *block_depr; extern struct class block_class; @@ -106,6 +108,7 @@ struct hd_struct { struct disk_stats dkstats; #endif struct rcu_head rcu_head; + char partition_name[GENHD_PART_NAME_SIZE]; }; #define GENHD_FL_REMOVABLE 1 diff --git a/include/linux/gpio_event.h b/include/linux/gpio_event.h new file mode 100644 index 000000000000..360b4ddb46a6 --- /dev/null +++ b/include/linux/gpio_event.h @@ -0,0 +1,169 @@ +/* include/linux/gpio_event.h + * + * Copyright (C) 2007 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _LINUX_GPIO_EVENT_H +#define _LINUX_GPIO_EVENT_H + +#include <linux/input.h> + +struct gpio_event_input_devs { + int count; + struct input_dev *dev[]; +}; +enum { + GPIO_EVENT_FUNC_UNINIT = 0x0, + GPIO_EVENT_FUNC_INIT = 0x1, + GPIO_EVENT_FUNC_SUSPEND = 0x2, + GPIO_EVENT_FUNC_RESUME = 0x3, +}; +struct gpio_event_info { + int (*func)(struct gpio_event_input_devs *input_devs, + struct gpio_event_info *info, + void **data, int func); + int (*event)(struct gpio_event_input_devs *input_devs, + struct gpio_event_info *info, + void **data, unsigned int dev, unsigned int type, + unsigned int code, int value); /* out events */ + bool no_suspend; +}; + +struct gpio_event_platform_data { + const char *name; + struct gpio_event_info **info; + size_t info_count; + int (*power)(const struct gpio_event_platform_data *pdata, bool on); + const char *names[]; /* If name is NULL, names contain a NULL */ + /* terminated list of input devices to create */ +}; + +#define GPIO_EVENT_DEV_NAME "gpio-event" + +/* Key matrix */ + +enum gpio_event_matrix_flags { + /* unset: drive active output low, set: drive active output high */ + GPIOKPF_ACTIVE_HIGH = 1U << 0, + GPIOKPF_DEBOUNCE = 1U << 1, + GPIOKPF_REMOVE_SOME_PHANTOM_KEYS = 1U << 2, + GPIOKPF_REMOVE_PHANTOM_KEYS = GPIOKPF_REMOVE_SOME_PHANTOM_KEYS | + GPIOKPF_DEBOUNCE, + GPIOKPF_DRIVE_INACTIVE = 1U << 3, + GPIOKPF_LEVEL_TRIGGERED_IRQ = 1U << 4, + GPIOKPF_PRINT_UNMAPPED_KEYS = 1U << 16, + GPIOKPF_PRINT_MAPPED_KEYS = 1U << 17, + GPIOKPF_PRINT_PHANTOM_KEYS = 1U << 18, +}; + +#define MATRIX_CODE_BITS (10) +#define MATRIX_KEY_MASK ((1U << MATRIX_CODE_BITS) - 1) +#define MATRIX_KEY(dev, code) \ + (((dev) << MATRIX_CODE_BITS) | (code & MATRIX_KEY_MASK)) + +extern int gpio_event_matrix_func(struct gpio_event_input_devs *input_devs, + struct gpio_event_info *info, void **data, int func); +struct gpio_event_matrix_info { + /* initialize to gpio_event_matrix_func */ + struct gpio_event_info info; + /* size must be ninputs * noutputs */ + const unsigned short *keymap; + unsigned int *input_gpios; + unsigned int *output_gpios; + unsigned int ninputs; + unsigned int noutputs; + /* time to wait before reading inputs after driving each output */ + ktime_t settle_time; + /* time to wait before scanning the keypad a second time */ + ktime_t debounce_delay; + ktime_t poll_time; + unsigned flags; +}; + +/* Directly connected inputs and outputs */ + +enum gpio_event_direct_flags { + GPIOEDF_ACTIVE_HIGH = 1U << 0, +/* GPIOEDF_USE_DOWN_IRQ = 1U << 1, */ +/* GPIOEDF_USE_IRQ = (1U << 2) | GPIOIDF_USE_DOWN_IRQ, */ + GPIOEDF_PRINT_KEYS = 1U << 8, + GPIOEDF_PRINT_KEY_DEBOUNCE = 1U << 9, +}; + +struct gpio_event_direct_entry { + uint32_t gpio:16; + uint32_t code:10; + uint32_t dev:6; +}; + +/* inputs */ +extern int gpio_event_input_func(struct gpio_event_input_devs *input_devs, + struct gpio_event_info *info, void **data, int func); +struct gpio_event_input_info { + /* initialize to gpio_event_input_func */ + struct gpio_event_info info; + ktime_t debounce_time; + ktime_t poll_time; + uint16_t flags; + uint16_t type; + const struct gpio_event_direct_entry *keymap; + size_t keymap_size; +}; + +/* outputs */ +extern int gpio_event_output_func(struct gpio_event_input_devs *input_devs, + struct gpio_event_info *info, void **data, int func); +extern int gpio_event_output_event(struct gpio_event_input_devs *input_devs, + struct gpio_event_info *info, void **data, + unsigned int dev, unsigned int type, + unsigned int code, int value); +struct gpio_event_output_info { + /* initialize to gpio_event_output_func and gpio_event_output_event */ + struct gpio_event_info info; + uint16_t flags; + uint16_t type; + const struct gpio_event_direct_entry *keymap; + size_t keymap_size; +}; + + +/* axes */ + +enum gpio_event_axis_flags { + GPIOEAF_PRINT_UNKNOWN_DIRECTION = 1U << 16, + GPIOEAF_PRINT_RAW = 1U << 17, + GPIOEAF_PRINT_EVENT = 1U << 18, +}; + +extern int gpio_event_axis_func(struct gpio_event_input_devs *input_devs, + struct gpio_event_info *info, void **data, int func); +struct gpio_event_axis_info { + /* initialize to gpio_event_axis_func */ + struct gpio_event_info info; + uint8_t count; /* number of gpios for this axis */ + uint8_t dev; /* device index when using multiple input devices */ + uint8_t type; /* EV_REL or EV_ABS */ + uint16_t code; + uint16_t decoded_size; + uint16_t (*map)(struct gpio_event_axis_info *info, uint16_t in); + uint32_t *gpio; + uint32_t flags; +}; +#define gpio_axis_2bit_gray_map gpio_axis_4bit_gray_map +#define gpio_axis_3bit_gray_map gpio_axis_4bit_gray_map +uint16_t gpio_axis_4bit_gray_map( + struct gpio_event_axis_info *info, uint16_t in); +uint16_t gpio_axis_5bit_singletrack_map( + struct gpio_event_axis_info *info, uint16_t in); + +#endif diff --git a/include/linux/if_pppolac.h b/include/linux/if_pppolac.h new file mode 100644 index 000000000000..8d827ea66425 --- /dev/null +++ b/include/linux/if_pppolac.h @@ -0,0 +1,35 @@ +/* include/linux/if_pppolac.h + * + * Header for PPP on L2TP Access Concentrator / PPPoLAC Socket (RFC 2661) + * + * Copyright (C) 2009 Google, Inc. + * Author: Chia-chi Yeh <chiachi@android.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __LINUX_IF_PPPOLAC_H +#define __LINUX_IF_PPPOLAC_H + +#include <linux/socket.h> +#include <linux/types.h> + +#define PX_PROTO_OLAC 2 + +struct sockaddr_pppolac { + sa_family_t sa_family; /* AF_PPPOX */ + unsigned int sa_protocol; /* PX_PROTO_OLAC */ + int udp_socket; + struct __attribute__((packed)) { + __u16 tunnel, session; + } local, remote; +} __attribute__((packed)); + +#endif /* __LINUX_IF_PPPOLAC_H */ diff --git a/include/linux/if_pppopns.h b/include/linux/if_pppopns.h new file mode 100644 index 000000000000..305d4c40459a --- /dev/null +++ b/include/linux/if_pppopns.h @@ -0,0 +1,34 @@ +/* include/linux/if_pppopns.h + * + * Header for PPP on PPTP Network Server / PPPoPNS Socket (RFC 2637) + * + * Copyright (C) 2009 Google, Inc. + * Author: Chia-chi Yeh <chiachi@android.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __LINUX_IF_PPPOPNS_H +#define __LINUX_IF_PPPOPNS_H + +#include <linux/socket.h> +#include <linux/types.h> + +#define PX_PROTO_OPNS 3 + +struct sockaddr_pppopns { + sa_family_t sa_family; /* AF_PPPOX */ + unsigned int sa_protocol; /* PX_PROTO_OPNS */ + int tcp_socket; + __u16 local; + __u16 remote; +} __attribute__((packed)); + +#endif /* __LINUX_IF_PPPOPNS_H */ diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h index 27741e05446f..03749f41131f 100644 --- a/include/linux/if_pppox.h +++ b/include/linux/if_pppox.h @@ -27,6 +27,8 @@ #include <linux/ppp_channel.h> #endif /* __KERNEL__ */ #include <linux/if_pppol2tp.h> +#include <linux/if_pppolac.h> +#include <linux/if_pppopns.h> /* For user-space programs to pick up these definitions * which they wouldn't get otherwise without defining __KERNEL__ @@ -51,7 +53,9 @@ struct pppoe_addr{ */ #define PX_PROTO_OE 0 /* Currently just PPPoE */ #define PX_PROTO_OL2TP 1 /* Now L2TP also */ -#define PX_MAX_PROTO 2 +#define PX_PROTO_OLAC 2 +#define PX_PROTO_OPNS 3 +#define PX_MAX_PROTO 4 struct sockaddr_pppox { sa_family_t sa_family; /* address family, AF_PPPOX */ @@ -150,6 +154,22 @@ struct pppoe_opt { relayed to (PPPoE relaying) */ }; +struct pppolac_opt { + __u32 local; + __u32 remote; + __u16 sequence; + __u8 sequencing; + int (*backlog_rcv)(struct sock *sk_udp, struct sk_buff *skb); +}; + +struct pppopns_opt { + __u16 local; + __u16 remote; + __u32 sequence; + void (*data_ready)(struct sock *sk_raw, int length); + int (*backlog_rcv)(struct sock *sk_raw, struct sk_buff *skb); +}; + #include <net/sock.h> struct pppox_sock { @@ -159,6 +179,8 @@ struct pppox_sock { struct pppox_sock *next; /* for hash table */ union { struct pppoe_opt pppoe; + struct pppolac_opt lac; + struct pppopns_opt pns; } proto; __be16 num; }; diff --git a/include/linux/kernel_debugger.h b/include/linux/kernel_debugger.h new file mode 100644 index 000000000000..b4dbfe99d79e --- /dev/null +++ b/include/linux/kernel_debugger.h @@ -0,0 +1,41 @@ +/* + * include/linux/kernel_debugger.h + * + * Copyright (C) 2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _LINUX_KERNEL_DEBUGGER_H_ +#define _LINUX_KERNEL_DEBUGGER_H_ + +struct kdbg_ctxt { + int (*printf)(void *cookie, const char *fmt, ...); + void *cookie; +}; + +/* kernel_debugger() is called from IRQ context and should + * use the kdbg_ctxt.printf to write output (do NOT call + * printk, do operations not safe from IRQ context, etc). + * + * kdbg_ctxt.printf will return -1 if there is not enough + * buffer space or if you are being aborted. In this case + * you must return as soon as possible. + * + * Return non-zero if more data is available -- if buffer + * space ran and you had to stop, but could print more, + * for example. + * + * Additional calls where cmd is "more" will be made if + * the additional data is desired. + */ +int kernel_debugger(struct kdbg_ctxt *ctxt, char *cmd); + +#endif diff --git a/include/linux/keychord.h b/include/linux/keychord.h new file mode 100644 index 000000000000..856a5850217b --- /dev/null +++ b/include/linux/keychord.h @@ -0,0 +1,52 @@ +/* + * Key chord input driver + * + * Copyright (C) 2008 Google, Inc. + * Author: Mike Lockwood <lockwood@android.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * +*/ + +#ifndef __LINUX_KEYCHORD_H_ +#define __LINUX_KEYCHORD_H_ + +#include <linux/input.h> + +#define KEYCHORD_VERSION 1 + +/* + * One or more input_keychord structs are written to /dev/keychord + * at once to specify the list of keychords to monitor. + * Reading /dev/keychord returns the id of a keychord when the + * keychord combination is pressed. A keychord is signalled when + * all of the keys in the keycode list are in the pressed state. + * The order in which the keys are pressed does not matter. + * The keychord will not be signalled if keys not in the keycode + * list are pressed. + * Keychords will not be signalled on key release events. + */ +struct input_keychord { + /* should be KEYCHORD_VERSION */ + __u16 version; + /* + * client specified ID, returned from read() + * when this keychord is pressed. + */ + __u16 id; + + /* number of keycodes in this keychord */ + __u16 count; + + /* variable length array of keycodes */ + __u16 keycodes[]; +}; + +#endif /* __LINUX_KEYCHORD_H_ */ diff --git a/include/linux/keyreset.h b/include/linux/keyreset.h new file mode 100644 index 000000000000..50c4b952b8f7 --- /dev/null +++ b/include/linux/keyreset.h @@ -0,0 +1,27 @@ +/* + * include/linux/keyreset.h - platform data structure for resetkeys driver + * + * Copyright (C) 2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _LINUX_KEYRESET_H +#define _LINUX_KEYRESET_H + +#define KEYRESET_NAME "keyreset" + +struct keyreset_platform_data { + int *keys_up; + int keys_down[]; /* 0 terminated */ +}; + +#endif /* _LINUX_KEYRESET_H */ diff --git a/include/linux/mm.h b/include/linux/mm.h index 74949fbef8c6..7961c748d944 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -736,6 +736,7 @@ extern void show_free_areas(void); int shmem_lock(struct file *file, int lock, struct user_struct *user); struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags); +void shmem_set_file(struct vm_area_struct *vma, struct file *file); int shmem_zero_setup(struct vm_area_struct *); #ifndef CONFIG_MMU diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index d9115ba280c5..cdfbe5953482 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -203,6 +203,10 @@ struct mmc_host { const struct mmc_bus_ops *bus_ops; /* current bus driver */ unsigned int bus_refs; /* reference counter */ + unsigned int bus_resume_flags; +#define MMC_BUSRESUME_MANUAL_RESUME (1 << 0) +#define MMC_BUSRESUME_NEEDS_RESUME (1 << 1) + unsigned int sdio_irqs; struct task_struct *sdio_irq_thread; atomic_t sdio_irq_thread_abort; @@ -215,6 +219,15 @@ struct mmc_host { struct dentry *debugfs_root; +#ifdef CONFIG_MMC_EMBEDDED_SDIO + struct { + struct sdio_cis *cis; + struct sdio_cccr *cccr; + struct sdio_embedded_func *funcs; + int num_funcs; + } embedded_sdio_data; +#endif + unsigned long private[0] ____cacheline_aligned; }; @@ -223,6 +236,14 @@ extern int mmc_add_host(struct mmc_host *); extern void mmc_remove_host(struct mmc_host *); extern void mmc_free_host(struct mmc_host *); +#ifdef CONFIG_MMC_EMBEDDED_SDIO +extern void mmc_set_embedded_sdio_data(struct mmc_host *host, + struct sdio_cis *cis, + struct sdio_cccr *cccr, + struct sdio_embedded_func *funcs, + int num_funcs); +#endif + static inline void *mmc_priv(struct mmc_host *host) { return (void *)host->private; @@ -233,6 +254,17 @@ static inline void *mmc_priv(struct mmc_host *host) #define mmc_dev(x) ((x)->parent) #define mmc_classdev(x) (&(x)->class_dev) #define mmc_hostname(x) (dev_name(&(x)->class_dev)) +#define mmc_bus_needs_resume(host) ((host)->bus_resume_flags & MMC_BUSRESUME_NEEDS_RESUME) + +static inline void mmc_set_bus_resume_policy(struct mmc_host *host, int manual) +{ + if (manual) + host->bus_resume_flags |= MMC_BUSRESUME_MANUAL_RESUME; + else + host->bus_resume_flags &= ~MMC_BUSRESUME_MANUAL_RESUME; +} + +extern int mmc_resume_bus(struct mmc_host *host); extern int mmc_suspend_host(struct mmc_host *); extern int mmc_resume_host(struct mmc_host *); diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h index 5b35987e691b..a5d765c01281 100644 --- a/include/linux/mmc/mmc.h +++ b/include/linux/mmc/mmc.h @@ -267,6 +267,7 @@ struct _mmc_csd { #define EXT_CSD_SEC_ERASE_MULT 230 /* RO */ #define EXT_CSD_SEC_FEATURE_SUPPORT 231 /* RO */ #define EXT_CSD_TRIM_MULT 232 /* RO */ +#define EXT_CSD_BOOT_SIZE_MULTI 226 /* RO */ /* * EXT_CSD field definitions diff --git a/include/linux/mmc/pm.h b/include/linux/mmc/pm.h index d37aac49cf9a..390382367acb 100644 --- a/include/linux/mmc/pm.h +++ b/include/linux/mmc/pm.h @@ -26,5 +26,6 @@ typedef unsigned int mmc_pm_flag_t; #define MMC_PM_KEEP_POWER (1 << 0) /* preserve card power during suspend */ #define MMC_PM_WAKE_SDIO_IRQ (1 << 1) /* wake up host system on SDIO IRQ assertion */ +#define MMC_PM_IGNORE_PM_NOTIFY (1 << 2) /* ignore mmc pm notify */ #endif diff --git a/include/linux/mmc/sdio_func.h b/include/linux/mmc/sdio_func.h index 31baaf82f458..557acae8cf95 100644..100755 --- a/include/linux/mmc/sdio_func.h +++ b/include/linux/mmc/sdio_func.h @@ -23,6 +23,14 @@ struct sdio_func; typedef void (sdio_irq_handler_t)(struct sdio_func *); /* + * Structure used to hold embedded SDIO device data from platform layer + */ +struct sdio_embedded_func { + uint8_t f_class; + uint32_t f_maxblksize; +}; + +/* * SDIO function CIS tuple (unknown to the core) */ struct sdio_func_tuple { @@ -130,6 +138,8 @@ extern int sdio_release_irq(struct sdio_func *func); extern unsigned int sdio_align_size(struct sdio_func *func, unsigned int sz); extern u8 sdio_readb(struct sdio_func *func, unsigned int addr, int *err_ret); +extern u8 sdio_readb_ext(struct sdio_func *func, unsigned int addr, int *err_ret, + unsigned in); extern u16 sdio_readw(struct sdio_func *func, unsigned int addr, int *err_ret); extern u32 sdio_readl(struct sdio_func *func, unsigned int addr, int *err_ret); diff --git a/include/linux/msdos_fs.h b/include/linux/msdos_fs.h index 34066e65fdeb..f38d4f0a5ae8 100644 --- a/include/linux/msdos_fs.h +++ b/include/linux/msdos_fs.h @@ -101,6 +101,7 @@ struct __fat_dirent { /* <linux/videotext.h> has used 0x72 ('r') in collision, so skip a few */ #define FAT_IOCTL_GET_ATTRIBUTES _IOR('r', 0x10, __u32) #define FAT_IOCTL_SET_ATTRIBUTES _IOW('r', 0x11, __u32) +#define VFAT_IOCTL_GET_VOLUME_ID _IOR('r', 0x12, __u32) struct fat_boot_sector { __u8 ignored[3]; /* Boot strap short or near jump */ @@ -138,6 +139,17 @@ struct fat_boot_fsinfo { __le32 reserved2[4]; }; +struct fat_boot_bsx { + __u8 drive; /* drive number */ + __u8 reserved1; + __u8 signature; /* extended boot signature */ + __u8 vol_id[4]; /* volume ID */ + __u8 vol_label[11]; /* volume label */ + __u8 type[8]; /* file system type */ +}; +#define FAT16_BSX_OFFSET 36 /* offset of fat_boot_bsx in FAT12 and FAT16 */ +#define FAT32_BSX_OFFSET 64 /* offset of fat_boot_bsx in FAT32 */ + struct msdos_dir_entry { __u8 name[MSDOS_NAME];/* name and extension */ __u8 attr; /* attribute bits */ diff --git a/include/linux/poll.h b/include/linux/poll.h index 600cc1fde64d..56e76af78102 100644 --- a/include/linux/poll.h +++ b/include/linux/poll.h @@ -73,6 +73,8 @@ extern void poll_initwait(struct poll_wqueues *pwq); extern void poll_freewait(struct poll_wqueues *pwq); extern int poll_schedule_timeout(struct poll_wqueues *pwq, int state, ktime_t *expires, unsigned long slack); +extern long select_estimate_accuracy(struct timespec *tv); + static inline int poll_schedule(struct poll_wqueues *pwq, int state) { diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index 30083a896f36..f5a8453203f4 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -14,6 +14,7 @@ #define __LINUX_POWER_SUPPLY_H__ #include <linux/device.h> +#include <linux/wakelock.h> #include <linux/workqueue.h> #include <linux/leds.h> @@ -159,6 +160,9 @@ struct power_supply { /* private */ struct device *dev; struct work_struct changed_work; + spinlock_t changed_lock; + bool changed; + struct wake_lock work_wake_lock; #ifdef CONFIG_LEDS_TRIGGERS struct led_trigger *charging_full_trig; diff --git a/include/linux/sched.h b/include/linux/sched.h index 1e2a6db2d7dd..6e3dd4fef93c 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1073,6 +1073,7 @@ struct sched_class { #ifdef CONFIG_FAIR_GROUP_SCHED void (*moved_group) (struct task_struct *p, int on_rq); + void (*prep_move_group) (struct task_struct *p, int on_rq); #endif }; @@ -1678,6 +1679,9 @@ static inline void put_task_struct(struct task_struct *t) extern void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st); extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st); +extern int task_free_register(struct notifier_block *n); +extern int task_free_unregister(struct notifier_block *n); + /* * Per process flags */ diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 563e23400913..7b8a09e2ec7f 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -234,6 +234,7 @@ struct uart_ops { void (*pm)(struct uart_port *, unsigned int state, unsigned int oldstate); int (*set_wake)(struct uart_port *, unsigned int state); + void (*wake_peer)(struct uart_port *); /* * Return a string describing the type of the port diff --git a/include/linux/sockios.h b/include/linux/sockios.h index 241f179347d9..f4fa1a164c74 100644 --- a/include/linux/sockios.h +++ b/include/linux/sockios.h @@ -65,6 +65,7 @@ #define SIOCDIFADDR 0x8936 /* delete PA address */ #define SIOCSIFHWBROADCAST 0x8937 /* set hardware broadcast addr */ #define SIOCGIFCOUNT 0x8938 /* get number of devices */ +#define SIOCKILLADDR 0x8939 /* kill sockets with this local addr */ #define SIOCGIFBR 0x8940 /* Bridging support */ #define SIOCSIFBR 0x8941 /* Set bridging options */ diff --git a/include/linux/switch.h b/include/linux/switch.h new file mode 100644 index 000000000000..3e4c748e343a --- /dev/null +++ b/include/linux/switch.h @@ -0,0 +1,53 @@ +/* + * Switch class driver + * + * Copyright (C) 2008 Google, Inc. + * Author: Mike Lockwood <lockwood@android.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * +*/ + +#ifndef __LINUX_SWITCH_H__ +#define __LINUX_SWITCH_H__ + +struct switch_dev { + const char *name; + struct device *dev; + int index; + int state; + + ssize_t (*print_name)(struct switch_dev *sdev, char *buf); + ssize_t (*print_state)(struct switch_dev *sdev, char *buf); +}; + +struct gpio_switch_platform_data { + const char *name; + unsigned gpio; + + /* if NULL, switch_dev.name will be printed */ + const char *name_on; + const char *name_off; + /* if NULL, "0" or "1" will be printed */ + const char *state_on; + const char *state_off; +}; + +extern int switch_dev_register(struct switch_dev *sdev); +extern void switch_dev_unregister(struct switch_dev *sdev); + +static inline int switch_get_state(struct switch_dev *sdev) +{ + return sdev->state; +} + +extern void switch_set_state(struct switch_dev *sdev, int state); + +#endif /* __LINUX_SWITCH_H__ */ diff --git a/include/linux/synaptics_i2c_rmi.h b/include/linux/synaptics_i2c_rmi.h new file mode 100644 index 000000000000..5539cc520779 --- /dev/null +++ b/include/linux/synaptics_i2c_rmi.h @@ -0,0 +1,55 @@ +/* + * include/linux/synaptics_i2c_rmi.h - platform data structure for f75375s sensor + * + * Copyright (C) 2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _LINUX_SYNAPTICS_I2C_RMI_H +#define _LINUX_SYNAPTICS_I2C_RMI_H + +#define SYNAPTICS_I2C_RMI_NAME "synaptics-rmi-ts" + +enum { + SYNAPTICS_FLIP_X = 1UL << 0, + SYNAPTICS_FLIP_Y = 1UL << 1, + SYNAPTICS_SWAP_XY = 1UL << 2, + SYNAPTICS_SNAP_TO_INACTIVE_EDGE = 1UL << 3, +}; + +struct synaptics_i2c_rmi_platform_data { + uint32_t version; /* Use this entry for panels with */ + /* (major << 8 | minor) version or above. */ + /* If non-zero another array entry follows */ + int (*power)(int on); /* Only valid in first array entry */ + uint32_t flags; + unsigned long irqflags; + uint32_t inactive_left; /* 0x10000 = screen width */ + uint32_t inactive_right; /* 0x10000 = screen width */ + uint32_t inactive_top; /* 0x10000 = screen height */ + uint32_t inactive_bottom; /* 0x10000 = screen height */ + uint32_t snap_left_on; /* 0x10000 = screen width */ + uint32_t snap_left_off; /* 0x10000 = screen width */ + uint32_t snap_right_on; /* 0x10000 = screen width */ + uint32_t snap_right_off; /* 0x10000 = screen width */ + uint32_t snap_top_on; /* 0x10000 = screen height */ + uint32_t snap_top_off; /* 0x10000 = screen height */ + uint32_t snap_bottom_on; /* 0x10000 = screen height */ + uint32_t snap_bottom_off; /* 0x10000 = screen height */ + uint32_t fuzz_x; /* 0x10000 = screen width */ + uint32_t fuzz_y; /* 0x10000 = screen height */ + int fuzz_p; + int fuzz_w; + int8_t sensitivity_adjust; +}; + +#endif /* _LINUX_SYNAPTICS_I2C_RMI_H */ diff --git a/include/linux/uid_stat.h b/include/linux/uid_stat.h new file mode 100644 index 000000000000..6bd6c4e52d17 --- /dev/null +++ b/include/linux/uid_stat.h @@ -0,0 +1,29 @@ +/* include/linux/uid_stat.h + * + * Copyright (C) 2008-2009 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __uid_stat_h +#define __uid_stat_h + +/* Contains definitions for resource tracking per uid. */ + +#ifdef CONFIG_UID_STAT +int uid_stat_tcp_snd(uid_t uid, int size); +int uid_stat_tcp_rcv(uid_t uid, int size); +#else +#define uid_stat_tcp_snd(uid, size) do {} while (0); +#define uid_stat_tcp_rcv(uid, size) do {} while (0); +#endif + +#endif /* _LINUX_UID_STAT_H */ diff --git a/include/linux/usb/android_composite.h b/include/linux/usb/android_composite.h new file mode 100644 index 000000000000..ac09dcb71775 --- /dev/null +++ b/include/linux/usb/android_composite.h @@ -0,0 +1,96 @@ +/* + * Platform data for Android USB + * + * Copyright (C) 2008 Google, Inc. + * Author: Mike Lockwood <lockwood@android.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef __LINUX_USB_ANDROID_H +#define __LINUX_USB_ANDROID_H + +#include <linux/usb/composite.h> +#include <linux/if_ether.h> + +struct android_usb_function { + struct list_head list; + char *name; + int (*bind_config)(struct usb_configuration *c); +}; + +struct android_usb_product { + /* Default product ID. */ + __u16 product_id; + + /* List of function names associated with this product. + * This is used to compute the USB product ID dynamically + * based on which functions are enabled. + */ + int num_functions; + char **functions; +}; + +struct android_usb_platform_data { + /* USB device descriptor fields */ + __u16 vendor_id; + + /* Default product ID. */ + __u16 product_id; + + __u16 version; + + char *product_name; + char *manufacturer_name; + char *serial_number; + + /* List of available USB products. + * This is used to compute the USB product ID dynamically + * based on which functions are enabled. + * if num_products is zero or no match can be found, + * we use the default product ID + */ + int num_products; + struct android_usb_product *products; + + /* List of all supported USB functions. + * This list is used to define the order in which + * the functions appear in the configuration's list of USB interfaces. + * This is necessary to avoid depending upon the order in which + * the individual function drivers are initialized. + */ + int num_functions; + char **functions; +}; + +/* Platform data for "usb_mass_storage" driver. */ +struct usb_mass_storage_platform_data { + /* Contains values for the SC_INQUIRY SCSI command. */ + char *vendor; + char *product; + int release; + + /* number of LUNS */ + int nluns; +}; + +/* Platform data for USB ethernet driver. */ +struct usb_ether_platform_data { + u8 ethaddr[ETH_ALEN]; + u32 vendorID; + const char *vendorDescr; +}; + +extern void android_register_function(struct android_usb_function *f); + +extern void android_enable_function(struct usb_function *f, int enable); + + +#endif /* __LINUX_USB_ANDROID_H */ diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h index 617068134ae8..f491e48b21e6 100644 --- a/include/linux/usb/composite.h +++ b/include/linux/usb/composite.h @@ -36,8 +36,10 @@ #include <linux/usb/ch9.h> #include <linux/usb/gadget.h> +#include <linux/switch.h> +struct usb_composite_dev; struct usb_configuration; /** @@ -101,6 +103,9 @@ struct usb_function { struct usb_configuration *config; + /* disabled is zero if the function is enabled */ + int disabled; + /* REVISIT: bind() functions can be marked __init, which * makes trouble for section mismatch analysis. See if * we can't restructure things to avoid mismatching. @@ -128,6 +133,7 @@ struct usb_function { /* internals */ struct list_head list; DECLARE_BITMAP(endpoints, 32); + struct device *dev; }; int usb_add_function(struct usb_configuration *, struct usb_function *); @@ -137,6 +143,9 @@ int usb_function_activate(struct usb_function *); int usb_interface_id(struct usb_configuration *, struct usb_function *); +void usb_function_set_enabled(struct usb_function *, int); +void usb_composite_force_reset(struct usb_composite_dev *); + /** * ep_choose - select descriptor endpoint at current device speed * @g: gadget, connected and running at some speed @@ -269,6 +278,9 @@ struct usb_composite_driver { const struct usb_device_descriptor *dev; struct usb_gadget_strings **strings; + struct class *class; + atomic_t function_count; + /* REVISIT: bind() functions can be marked __init, which * makes trouble for section mismatch analysis. See if * we can't restructure things to avoid mismatching... @@ -282,6 +294,8 @@ struct usb_composite_driver { /* global suspend hooks */ void (*suspend)(struct usb_composite_dev *); void (*resume)(struct usb_composite_dev *); + + void (*enable_function)(struct usb_function *f, int enable); }; extern int usb_composite_register(struct usb_composite_driver *); @@ -342,6 +356,11 @@ struct usb_composite_dev { /* protects at least deactivation count */ spinlock_t lock; + + struct switch_dev sdev; + /* used by usb_composite_force_reset to avoid signalling switch changes */ + bool mute_switch; + struct work_struct switch_work; }; extern int usb_string_id(struct usb_composite_dev *c); diff --git a/include/linux/usb/f_mtp.h b/include/linux/usb/f_mtp.h new file mode 100644 index 000000000000..426c6b5cc06a --- /dev/null +++ b/include/linux/usb/f_mtp.h @@ -0,0 +1,53 @@ +/* + * Gadget Function Driver for MTP + * + * Copyright (C) 2010 Google, Inc. + * Author: Mike Lockwood <lockwood@android.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __LINUX_USB_F_MTP_H +#define __LINUX_USB_F_MTP_H + +/* Constants for MTP_SET_INTERFACE_MODE */ +#define MTP_INTERFACE_MODE_MTP 0 +#define MTP_INTERFACE_MODE_PTP 1 + + +struct mtp_file_range { + /* file descriptor for file to transfer */ + int fd; + /* offset in file for start of transfer */ + loff_t offset; + /* number of bytes to transfer */ + size_t length; +}; + +struct mtp_event { + /* size of the event */ + size_t length; + /* event data to send */ + void *data; +}; + +/* Sends the specified file range to the host */ +#define MTP_SEND_FILE _IOW('M', 0, struct mtp_file_range) +/* Receives data from the host and writes it to a file. + * The file is created if it does not exist. + */ +#define MTP_RECEIVE_FILE _IOW('M', 1, struct mtp_file_range) +/* Sets the driver mode to either MTP or PTP */ +#define MTP_SET_INTERFACE_MODE _IOW('M', 2, int) +/* Sends an event to the host via the interrupt endpoint */ +#define MTP_SEND_EVENT _IOW('M', 3, struct mtp_event) + +#endif /* __LINUX_USB_F_MTP_H */ diff --git a/include/linux/wakelock.h b/include/linux/wakelock.h new file mode 100755 index 000000000000..a096d24ada1d --- /dev/null +++ b/include/linux/wakelock.h @@ -0,0 +1,91 @@ +/* include/linux/wakelock.h + * + * Copyright (C) 2007-2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _LINUX_WAKELOCK_H +#define _LINUX_WAKELOCK_H + +#include <linux/list.h> +#include <linux/ktime.h> + +/* A wake_lock prevents the system from entering suspend or other low power + * states when active. If the type is set to WAKE_LOCK_SUSPEND, the wake_lock + * prevents a full system suspend. If the type is WAKE_LOCK_IDLE, low power + * states that cause large interrupt latencies or that disable a set of + * interrupts will not entered from idle until the wake_locks are released. + */ + +enum { + WAKE_LOCK_SUSPEND, /* Prevent suspend */ + WAKE_LOCK_IDLE, /* Prevent low power idle */ + WAKE_LOCK_TYPE_COUNT +}; + +struct wake_lock { +#ifdef CONFIG_HAS_WAKELOCK + struct list_head link; + int flags; + const char *name; + unsigned long expires; +#ifdef CONFIG_WAKELOCK_STAT + struct { + int count; + int expire_count; + int wakeup_count; + ktime_t total_time; + ktime_t prevent_suspend_time; + ktime_t max_time; + ktime_t last_time; + } stat; +#endif +#endif +}; + +#ifdef CONFIG_HAS_WAKELOCK + +void wake_lock_init(struct wake_lock *lock, int type, const char *name); +void wake_lock_destroy(struct wake_lock *lock); +void wake_lock(struct wake_lock *lock); +void wake_lock_timeout(struct wake_lock *lock, long timeout); +void wake_unlock(struct wake_lock *lock); + +/* wake_lock_active returns a non-zero value if the wake_lock is currently + * locked. If the wake_lock has a timeout, it does not check the timeout + * but if the timeout had aready been checked it will return 0. + */ +int wake_lock_active(struct wake_lock *lock); + +/* has_wake_lock returns 0 if no wake locks of the specified type are active, + * and non-zero if one or more wake locks are held. Specifically it returns + * -1 if one or more wake locks with no timeout are active or the + * number of jiffies until all active wake locks time out. + */ +long has_wake_lock(int type); + +#else + +static inline void wake_lock_init(struct wake_lock *lock, int type, + const char *name) {} +static inline void wake_lock_destroy(struct wake_lock *lock) {} +static inline void wake_lock(struct wake_lock *lock) {} +static inline void wake_lock_timeout(struct wake_lock *lock, long timeout) {} +static inline void wake_unlock(struct wake_lock *lock) {} + +static inline int wake_lock_active(struct wake_lock *lock) { return 0; } +static inline long has_wake_lock(int type) { return 0; } + +#endif + +#endif + diff --git a/include/linux/wifi_tiwlan.h b/include/linux/wifi_tiwlan.h new file mode 100644 index 000000000000..f07e0679fb82 --- /dev/null +++ b/include/linux/wifi_tiwlan.h @@ -0,0 +1,27 @@ +/* include/linux/wifi_tiwlan.h + * + * Copyright (C) 2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef _LINUX_WIFI_TIWLAN_H_ +#define _LINUX_WIFI_TIWLAN_H_ + +#include <linux/wlan_plat.h> + +#define WMPA_NUMBER_OF_SECTIONS 3 +#define WMPA_NUMBER_OF_BUFFERS 160 +#define WMPA_SECTION_HEADER 24 +#define WMPA_SECTION_SIZE_0 (WMPA_NUMBER_OF_BUFFERS * 64) +#define WMPA_SECTION_SIZE_1 (WMPA_NUMBER_OF_BUFFERS * 256) +#define WMPA_SECTION_SIZE_2 (WMPA_NUMBER_OF_BUFFERS * 2048) + +#endif diff --git a/include/linux/wl127x-rfkill.h b/include/linux/wl127x-rfkill.h new file mode 100644 index 000000000000..9057ec63d5d3 --- /dev/null +++ b/include/linux/wl127x-rfkill.h @@ -0,0 +1,35 @@ +/* + * Bluetooth TI wl127x rfkill power control via GPIO + * + * Copyright (C) 2009 Motorola, Inc. + * Copyright (C) 2008 Texas Instruments + * Initial code: Pavan Savoy <pavan.savoy@gmail.com> (wl127x_power.c) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#ifndef _LINUX_WL127X_RFKILL_H +#define _LINUX_WL127X_RFKILL_H + +#include <linux/rfkill.h> + +struct wl127x_rfkill_platform_data { + int nshutdown_gpio; + + struct rfkill *rfkill; /* for driver only */ +}; + +#endif diff --git a/include/linux/wlan_plat.h b/include/linux/wlan_plat.h new file mode 100644 index 000000000000..3b1e2e054fd5 --- /dev/null +++ b/include/linux/wlan_plat.h @@ -0,0 +1,26 @@ +/* include/linux/wlan_plat.h + * + * Copyright (C) 2010 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef _LINUX_WLAN_PLAT_H_ +#define _LINUX_WLAN_PLAT_H_ + +struct wifi_platform_data { + int (*set_power)(int val); + int (*set_reset)(int val); + int (*set_carddetect)(int val); + void *(*mem_prealloc)(int section, unsigned long size); + int (*get_mac_addr)(unsigned char *buf); +}; + +#endif diff --git a/include/net/activity_stats.h b/include/net/activity_stats.h new file mode 100644 index 000000000000..10e4c1506eeb --- /dev/null +++ b/include/net/activity_stats.h @@ -0,0 +1,25 @@ +/* + * Copyright (C) 2010 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Author: Mike Chan (mike@android.com) + */ + +#ifndef __activity_stats_h +#define __activity_stats_h + +#ifdef CONFIG_NET_ACTIVITY_STATS +void activity_stats_update(void); +#else +#define activity_stats_update(void) {} +#endif + +#endif /* _NET_ACTIVITY_STATS_H */ diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index bcbdd6d4e6dd..90076fbe3446 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -146,15 +146,20 @@ enum { #define ESCO_2EV5 0x0100 #define ESCO_3EV5 0x0200 -#define SCO_ESCO_MASK (ESCO_HV1 | ESCO_HV2 | ESCO_HV3) -#define EDR_ESCO_MASK (ESCO_2EV3 | ESCO_3EV3 | ESCO_2EV5 | ESCO_3EV5) +#define SCO_ESCO_MASK (ESCO_HV1 | ESCO_HV2 | ESCO_HV3) +#define EDR_ESCO_MASK (ESCO_2EV3 | ESCO_3EV3 | ESCO_2EV5 | ESCO_3EV5) +#define ALL_ESCO_MASK (SCO_ESCO_MASK | ESCO_EV3 | ESCO_EV4 | ESCO_EV5 | \ + EDR_ESCO_MASK) /* ACL flags */ +#define ACL_START_NO_FLUSH 0x00 #define ACL_CONT 0x01 #define ACL_START 0x02 #define ACL_ACTIVE_BCAST 0x04 #define ACL_PICO_BCAST 0x08 +#define ACL_PB_MASK (ACL_CONT | ACL_START) + /* Baseband links */ #define SCO_LINK 0x00 #define ACL_LINK 0x01 @@ -193,6 +198,7 @@ enum { #define LMP_EDR_ESCO_3M 0x40 #define LMP_EDR_3S_ESCO 0x80 +#define LMP_NO_FLUSH 0x01 #define LMP_SIMPLE_PAIR 0x08 /* Connection modes */ @@ -998,6 +1004,9 @@ struct hci_conn_info { __u8 out; __u16 state; __u32 link_mode; + __u32 mtu; + __u32 cnt; + __u32 pkts; }; struct hci_dev_req { diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 4568b938ca35..1ce134720cfe 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -339,12 +339,15 @@ void hci_add_sco(struct hci_conn *conn, __u16 handle); void hci_setup_sync(struct hci_conn *conn, __u16 handle); void hci_sco_setup(struct hci_conn *conn, __u8 status); -struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst); +struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, + __u16 pkt_type, bdaddr_t *dst); int hci_conn_del(struct hci_conn *conn); void hci_conn_hash_flush(struct hci_dev *hdev); void hci_conn_check_pending(struct hci_dev *hdev); -struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type); +struct hci_conn *hci_connect(struct hci_dev *hdev, int type, + __u16 pkt_type, bdaddr_t *dst, + __u8 sec_level, __u8 auth_type); int hci_conn_check_link_mode(struct hci_conn *conn); int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type); int hci_conn_change_link_key(struct hci_conn *conn); @@ -371,7 +374,7 @@ static inline void hci_conn_put(struct hci_conn *conn) if (conn->state == BT_CONNECTED) { timeo = msecs_to_jiffies(conn->disc_timeout); if (!conn->out) - timeo *= 2; + timeo *= 20; } else timeo = msecs_to_jiffies(10); } else @@ -456,6 +459,7 @@ void hci_conn_del_sysfs(struct hci_conn *conn); #define lmp_sniffsubr_capable(dev) ((dev)->features[5] & LMP_SNIFF_SUBR) #define lmp_esco_capable(dev) ((dev)->features[3] & LMP_ESCO) #define lmp_ssp_capable(dev) ((dev)->features[6] & LMP_SIMPLE_PAIR) +#define lmp_no_flush_capable(dev) ((dev)->features[6] & LMP_NO_FLUSH) /* ----- HCI protocols ----- */ struct hci_proto { diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h index 6c241444f902..8f8e648032e8 100644 --- a/include/net/bluetooth/l2cap.h +++ b/include/net/bluetooth/l2cap.h @@ -75,6 +75,7 @@ struct l2cap_conninfo { #define L2CAP_LM_TRUSTED 0x0008 #define L2CAP_LM_RELIABLE 0x0010 #define L2CAP_LM_SECURE 0x0020 +#define L2CAP_LM_FLUSHABLE 0x0040 /* L2CAP command codes */ #define L2CAP_COMMAND_REJ 0x01 @@ -327,6 +328,7 @@ struct l2cap_pinfo { __u8 sec_level; __u8 role_switch; __u8 force_reliable; + __u8 flushable; __u8 conf_req[64]; __u8 conf_len; diff --git a/include/net/bluetooth/rfcomm.h b/include/net/bluetooth/rfcomm.h index a140847d622c..76937710e516 100644 --- a/include/net/bluetooth/rfcomm.h +++ b/include/net/bluetooth/rfcomm.h @@ -29,7 +29,6 @@ #define RFCOMM_CONN_TIMEOUT (HZ * 30) #define RFCOMM_DISC_TIMEOUT (HZ * 20) #define RFCOMM_AUTH_TIMEOUT (HZ * 25) -#define RFCOMM_IDLE_TIMEOUT (HZ * 2) #define RFCOMM_DEFAULT_MTU 127 #define RFCOMM_DEFAULT_CREDITS 7 @@ -155,7 +154,6 @@ struct rfcomm_msc { struct rfcomm_session { struct list_head list; struct socket *sock; - struct timer_list timer; unsigned long state; unsigned long flags; atomic_t refcnt; diff --git a/include/net/bluetooth/sco.h b/include/net/bluetooth/sco.h index e28a2a771471..924338ac3223 100644 --- a/include/net/bluetooth/sco.h +++ b/include/net/bluetooth/sco.h @@ -37,6 +37,7 @@ struct sockaddr_sco { sa_family_t sco_family; bdaddr_t sco_bdaddr; + __u16 sco_pkt_type; }; /* SCO socket options */ @@ -72,7 +73,8 @@ struct sco_conn { struct sco_pinfo { struct bt_sock bt; - __u32 flags; + __u16 pkt_type; + struct sco_conn *conn; }; diff --git a/include/net/tcp.h b/include/net/tcp.h index 3e4b33e36602..22c912c5dde6 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1403,6 +1403,8 @@ extern struct sk_buff **tcp4_gro_receive(struct sk_buff **head, extern int tcp_gro_complete(struct sk_buff *skb); extern int tcp4_gro_complete(struct sk_buff *skb); +extern void tcp_v4_nuke_addr(__u32 saddr); + #ifdef CONFIG_PROC_FS extern int tcp4_proc_init(void); extern void tcp4_proc_exit(void); diff --git a/init/Kconfig b/init/Kconfig index 2de5b1cbadd9..724e9b809318 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -790,6 +790,12 @@ config SYSCTL config ANON_INODES bool +config PANIC_TIMEOUT + int "Default panic timeout" + default 0 + help + Set default panic timeout. + menuconfig EMBEDDED bool "Configure standard kernel features (for small systems)" help @@ -962,6 +968,15 @@ config SHMEM option replaces shmem and tmpfs with the much simpler ramfs code, which may be appropriate on small systems without swap. +config ASHMEM + bool "Enable the Anonymous Shared Memory Subsystem" + default n + depends on SHMEM || TINY_SHMEM + help + The ashmem subsystem is a new shared memory allocator, similar to + POSIX SHM but with different behavior and sporting a simpler + file-based API. + config AIO bool "Enable AIO support" if EMBEDDED default y diff --git a/kernel/cgroup.c b/kernel/cgroup.c index c9483d8f6140..b90316504417 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -58,6 +58,7 @@ #include <linux/vmalloc.h> /* TODO: replace with more sophisticated array */ #include <linux/eventfd.h> #include <linux/poll.h> +#include <linux/capability.h> #include <asm/atomic.h> @@ -1723,6 +1724,15 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk) failed_ss = ss; goto out; } + } else if (!capable(CAP_SYS_ADMIN)) { + const struct cred *cred = current_cred(), *tcred; + + /* No can_attach() - check perms generically */ + tcred = __task_cred(tsk); + if (cred->euid != tcred->uid && + cred->euid != tcred->suid) { + return -EACCES; + } } } @@ -1821,7 +1831,6 @@ EXPORT_SYMBOL_GPL(cgroup_attach_task_all); static int attach_task_by_pid(struct cgroup *cgrp, u64 pid) { struct task_struct *tsk; - const struct cred *cred = current_cred(), *tcred; int ret; if (pid) { @@ -1831,14 +1840,6 @@ static int attach_task_by_pid(struct cgroup *cgrp, u64 pid) rcu_read_unlock(); return -ESRCH; } - - tcred = __task_cred(tsk); - if (cred->euid && - cred->euid != tcred->uid && - cred->euid != tcred->suid) { - rcu_read_unlock(); - return -EACCES; - } get_task_struct(tsk); rcu_read_unlock(); } else { diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c index ce71ed53e88f..8e4c614d9553 100644 --- a/kernel/cgroup_freezer.c +++ b/kernel/cgroup_freezer.c @@ -172,6 +172,14 @@ static int freezer_can_attach(struct cgroup_subsys *ss, { struct freezer *freezer; + if ((current != task) && (!capable(CAP_SYS_ADMIN))) { + const struct cred *cred = current_cred(), *tcred; + + tcred = __task_cred(task); + if (cred->euid != tcred->uid && cred->euid != tcred->suid) + return -EPERM; + } + /* * Anything frozen can't move or be moved to/from. * diff --git a/kernel/cpuset.c b/kernel/cpuset.c index b23c0979bbe7..328f1e7ed782 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -1383,6 +1383,13 @@ static int cpuset_can_attach(struct cgroup_subsys *ss, struct cgroup *cont, int ret; struct cpuset *cs = cgroup_cs(cont); + if ((current != task) && (!capable(CAP_SYS_ADMIN))) { + const struct cred *cred = current_cred(), *tcred; + + if (cred->euid != tcred->uid && cred->euid != tcred->suid) + return -EPERM; + } + if (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)) return -ENOSPC; diff --git a/kernel/fork.c b/kernel/fork.c index c445f8cc408d..f3d93ab730c6 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -147,6 +147,9 @@ struct kmem_cache *vm_area_cachep; /* SLAB cache for mm_struct structures (tsk->mm) */ static struct kmem_cache *mm_cachep; +/* Notifier list called when a task struct is freed */ +static ATOMIC_NOTIFIER_HEAD(task_free_notifier); + static void account_kernel_stack(struct thread_info *ti, int account) { struct zone *zone = page_zone(virt_to_page(ti)); @@ -177,6 +180,18 @@ static inline void put_signal_struct(struct signal_struct *sig) free_signal_struct(sig); } +int task_free_register(struct notifier_block *n) +{ + return atomic_notifier_chain_register(&task_free_notifier, n); +} +EXPORT_SYMBOL(task_free_register); + +int task_free_unregister(struct notifier_block *n) +{ + return atomic_notifier_chain_unregister(&task_free_notifier, n); +} +EXPORT_SYMBOL(task_free_unregister); + void __put_task_struct(struct task_struct *tsk) { WARN_ON(!tsk->exit_state); @@ -187,6 +202,7 @@ void __put_task_struct(struct task_struct *tsk) delayacct_tsk_free(tsk); put_signal_struct(tsk->signal); + atomic_notifier_call_chain(&task_free_notifier, 0, tsk); if (!profile_handoff_task(tsk)) free_task(tsk); } diff --git a/kernel/futex.c b/kernel/futex.c index 6a3a5fa1526d..257db2460ab8 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -220,6 +220,7 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key) struct mm_struct *mm = current->mm; struct page *page; int err; + struct vm_area_struct *vma; /* * The futex address must be "naturally" aligned. @@ -245,6 +246,37 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key) return 0; } + /* + * The futex is hashed differently depending on whether + * it's in a shared or private mapping. So check vma first. + */ + vma = find_extend_vma(mm, address); + if (unlikely(!vma)) + return -EFAULT; + + /* + * Permissions. + */ + if (unlikely((vma->vm_flags & (VM_IO|VM_READ)) != VM_READ)) + return (vma->vm_flags & VM_IO) ? -EPERM : -EACCES; + + /* + * Private mappings are handled in a simple way. + * + * NOTE: When userspace waits on a MAP_SHARED mapping, even if + * it's a read-only handle, it's expected that futexes attach to + * the object not the particular process. Therefore we use + * VM_MAYSHARE here, not VM_SHARED which is restricted to shared + * mappings of _writable_ handles. + */ + if (likely(!(vma->vm_flags & VM_MAYSHARE))) { + key->both.offset |= FUT_OFF_MMSHARED; /* reference taken on mm */ + key->private.mm = mm; + key->private.address = address; + get_futex_key_refs(key); + return 0; + } + again: err = get_user_pages_fast(address, 1, 1, &page); if (err < 0) diff --git a/kernel/panic.c b/kernel/panic.c index 4c13b1a88ebb..c4c6bff9ff63 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -33,7 +33,10 @@ static int pause_on_oops; static int pause_on_oops_flag; static DEFINE_SPINLOCK(pause_on_oops_lock); -int panic_timeout; +#ifndef CONFIG_PANIC_TIMEOUT +#define CONFIG_PANIC_TIMEOUT 0 +#endif +int panic_timeout = CONFIG_PANIC_TIMEOUT; ATOMIC_NOTIFIER_HEAD(panic_notifier_list); diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index ca6066a6952e..33be5707b882 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig @@ -134,6 +134,73 @@ config SUSPEND_FREEZER Turning OFF this setting is NOT recommended! If in doubt, say Y. +config HAS_WAKELOCK + bool + +config HAS_EARLYSUSPEND + bool + +config WAKELOCK + bool "Wake lock" + depends on PM && RTC_CLASS + default n + select HAS_WAKELOCK + ---help--- + Enable wakelocks. When user space request a sleep state the + sleep request will be delayed until no wake locks are held. + +config WAKELOCK_STAT + bool "Wake lock stats" + depends on WAKELOCK + default y + ---help--- + Report wake lock stats in /proc/wakelocks + +config USER_WAKELOCK + bool "Userspace wake locks" + depends on WAKELOCK + default y + ---help--- + User-space wake lock api. Write "lockname" or "lockname timeout" + to /sys/power/wake_lock lock and if needed create a wake lock. + Write "lockname" to /sys/power/wake_unlock to unlock a user wake + lock. + +config EARLYSUSPEND + bool "Early suspend" + depends on WAKELOCK + default y + select HAS_EARLYSUSPEND + ---help--- + Call early suspend handlers when the user requested sleep state + changes. + +choice + prompt "User-space screen access" + default FB_EARLYSUSPEND if !FRAMEBUFFER_CONSOLE + default CONSOLE_EARLYSUSPEND + depends on HAS_EARLYSUSPEND + + config NO_USER_SPACE_SCREEN_ACCESS_CONTROL + bool "None" + + config CONSOLE_EARLYSUSPEND + bool "Console switch on early-suspend" + depends on HAS_EARLYSUSPEND && VT + ---help--- + Register early suspend handler to perform a console switch to + when user-space should stop drawing to the screen and a switch + back when it should resume. + + config FB_EARLYSUSPEND + bool "Sysfs interface" + depends on HAS_EARLYSUSPEND + ---help--- + Register early suspend handler that notifies and waits for + user-space through sysfs when user-space should stop drawing + to the screen and notifies user-space when it should resume. +endchoice + config HIBERNATION bool "Hibernation (aka 'suspend to disk')" depends on PM && SWAP && ARCH_HIBERNATION_POSSIBLE diff --git a/kernel/power/Makefile b/kernel/power/Makefile index f9063c6b185d..b9c82465499b 100644 --- a/kernel/power/Makefile +++ b/kernel/power/Makefile @@ -11,5 +11,10 @@ obj-$(CONFIG_PM_TEST_SUSPEND) += suspend_test.o obj-$(CONFIG_HIBERNATION) += hibernate.o snapshot.o swap.o user.o \ block_io.o obj-$(CONFIG_SUSPEND_NVS) += nvs.o +obj-$(CONFIG_WAKELOCK) += wakelock.o +obj-$(CONFIG_USER_WAKELOCK) += userwakelock.o +obj-$(CONFIG_EARLYSUSPEND) += earlysuspend.o +obj-$(CONFIG_CONSOLE_EARLYSUSPEND) += consoleearlysuspend.o +obj-$(CONFIG_FB_EARLYSUSPEND) += fbearlysuspend.o obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o diff --git a/kernel/power/consoleearlysuspend.c b/kernel/power/consoleearlysuspend.c new file mode 100644 index 000000000000..a3edcb267389 --- /dev/null +++ b/kernel/power/consoleearlysuspend.c @@ -0,0 +1,78 @@ +/* kernel/power/consoleearlysuspend.c + * + * Copyright (C) 2005-2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/console.h> +#include <linux/earlysuspend.h> +#include <linux/kbd_kern.h> +#include <linux/module.h> +#include <linux/vt_kern.h> +#include <linux/wait.h> + +#define EARLY_SUSPEND_CONSOLE (MAX_NR_CONSOLES-1) + +static int orig_fgconsole; +static void console_early_suspend(struct early_suspend *h) +{ + acquire_console_sem(); + orig_fgconsole = fg_console; + if (vc_allocate(EARLY_SUSPEND_CONSOLE)) + goto err; + if (set_console(EARLY_SUSPEND_CONSOLE)) + goto err; + release_console_sem(); + + if (vt_waitactive(EARLY_SUSPEND_CONSOLE + 1)) + pr_warning("console_early_suspend: Can't switch VCs.\n"); + return; +err: + pr_warning("console_early_suspend: Can't set console\n"); + release_console_sem(); +} + +static void console_late_resume(struct early_suspend *h) +{ + int ret; + acquire_console_sem(); + ret = set_console(orig_fgconsole); + release_console_sem(); + if (ret) { + pr_warning("console_late_resume: Can't set console.\n"); + return; + } + + if (vt_waitactive(orig_fgconsole + 1)) + pr_warning("console_late_resume: Can't switch VCs.\n"); +} + +static struct early_suspend console_early_suspend_desc = { + .level = EARLY_SUSPEND_LEVEL_STOP_DRAWING, + .suspend = console_early_suspend, + .resume = console_late_resume, +}; + +static int __init console_early_suspend_init(void) +{ + register_early_suspend(&console_early_suspend_desc); + return 0; +} + +static void __exit console_early_suspend_exit(void) +{ + unregister_early_suspend(&console_early_suspend_desc); +} + +module_init(console_early_suspend_init); +module_exit(console_early_suspend_exit); + diff --git a/kernel/power/earlysuspend.c b/kernel/power/earlysuspend.c new file mode 100644 index 000000000000..84bed51dcdce --- /dev/null +++ b/kernel/power/earlysuspend.c @@ -0,0 +1,178 @@ +/* kernel/power/earlysuspend.c + * + * Copyright (C) 2005-2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/earlysuspend.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/rtc.h> +#include <linux/syscalls.h> /* sys_sync */ +#include <linux/wakelock.h> +#include <linux/workqueue.h> + +#include "power.h" + +enum { + DEBUG_USER_STATE = 1U << 0, + DEBUG_SUSPEND = 1U << 2, +}; +static int debug_mask = DEBUG_USER_STATE; +module_param_named(debug_mask, debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP); + +static DEFINE_MUTEX(early_suspend_lock); +static LIST_HEAD(early_suspend_handlers); +static void early_suspend(struct work_struct *work); +static void late_resume(struct work_struct *work); +static DECLARE_WORK(early_suspend_work, early_suspend); +static DECLARE_WORK(late_resume_work, late_resume); +static DEFINE_SPINLOCK(state_lock); +enum { + SUSPEND_REQUESTED = 0x1, + SUSPENDED = 0x2, + SUSPEND_REQUESTED_AND_SUSPENDED = SUSPEND_REQUESTED | SUSPENDED, +}; +static int state; + +void register_early_suspend(struct early_suspend *handler) +{ + struct list_head *pos; + + mutex_lock(&early_suspend_lock); + list_for_each(pos, &early_suspend_handlers) { + struct early_suspend *e; + e = list_entry(pos, struct early_suspend, link); + if (e->level > handler->level) + break; + } + list_add_tail(&handler->link, pos); + if ((state & SUSPENDED) && handler->suspend) + handler->suspend(handler); + mutex_unlock(&early_suspend_lock); +} +EXPORT_SYMBOL(register_early_suspend); + +void unregister_early_suspend(struct early_suspend *handler) +{ + mutex_lock(&early_suspend_lock); + list_del(&handler->link); + mutex_unlock(&early_suspend_lock); +} +EXPORT_SYMBOL(unregister_early_suspend); + +static void early_suspend(struct work_struct *work) +{ + struct early_suspend *pos; + unsigned long irqflags; + int abort = 0; + + mutex_lock(&early_suspend_lock); + spin_lock_irqsave(&state_lock, irqflags); + if (state == SUSPEND_REQUESTED) + state |= SUSPENDED; + else + abort = 1; + spin_unlock_irqrestore(&state_lock, irqflags); + + if (abort) { + if (debug_mask & DEBUG_SUSPEND) + pr_info("early_suspend: abort, state %d\n", state); + mutex_unlock(&early_suspend_lock); + goto abort; + } + + if (debug_mask & DEBUG_SUSPEND) + pr_info("early_suspend: call handlers\n"); + list_for_each_entry(pos, &early_suspend_handlers, link) { + if (pos->suspend != NULL) + pos->suspend(pos); + } + mutex_unlock(&early_suspend_lock); + + if (debug_mask & DEBUG_SUSPEND) + pr_info("early_suspend: sync\n"); + + sys_sync(); +abort: + spin_lock_irqsave(&state_lock, irqflags); + if (state == SUSPEND_REQUESTED_AND_SUSPENDED) + wake_unlock(&main_wake_lock); + spin_unlock_irqrestore(&state_lock, irqflags); +} + +static void late_resume(struct work_struct *work) +{ + struct early_suspend *pos; + unsigned long irqflags; + int abort = 0; + + mutex_lock(&early_suspend_lock); + spin_lock_irqsave(&state_lock, irqflags); + if (state == SUSPENDED) + state &= ~SUSPENDED; + else + abort = 1; + spin_unlock_irqrestore(&state_lock, irqflags); + + if (abort) { + if (debug_mask & DEBUG_SUSPEND) + pr_info("late_resume: abort, state %d\n", state); + goto abort; + } + if (debug_mask & DEBUG_SUSPEND) + pr_info("late_resume: call handlers\n"); + list_for_each_entry_reverse(pos, &early_suspend_handlers, link) + if (pos->resume != NULL) + pos->resume(pos); + if (debug_mask & DEBUG_SUSPEND) + pr_info("late_resume: done\n"); +abort: + mutex_unlock(&early_suspend_lock); +} + +void request_suspend_state(suspend_state_t new_state) +{ + unsigned long irqflags; + int old_sleep; + + spin_lock_irqsave(&state_lock, irqflags); + old_sleep = state & SUSPEND_REQUESTED; + if (debug_mask & DEBUG_USER_STATE) { + struct timespec ts; + struct rtc_time tm; + getnstimeofday(&ts); + rtc_time_to_tm(ts.tv_sec, &tm); + pr_info("request_suspend_state: %s (%d->%d) at %lld " + "(%d-%02d-%02d %02d:%02d:%02d.%09lu UTC)\n", + new_state != PM_SUSPEND_ON ? "sleep" : "wakeup", + requested_suspend_state, new_state, + ktime_to_ns(ktime_get()), + tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, + tm.tm_hour, tm.tm_min, tm.tm_sec, ts.tv_nsec); + } + if (!old_sleep && new_state != PM_SUSPEND_ON) { + state |= SUSPEND_REQUESTED; + queue_work(suspend_work_queue, &early_suspend_work); + } else if (old_sleep && new_state == PM_SUSPEND_ON) { + state &= ~SUSPEND_REQUESTED; + wake_lock(&main_wake_lock); + queue_work(suspend_work_queue, &late_resume_work); + } + requested_suspend_state = new_state; + spin_unlock_irqrestore(&state_lock, irqflags); +} + +suspend_state_t get_suspend_state(void) +{ + return requested_suspend_state; +} diff --git a/kernel/power/fbearlysuspend.c b/kernel/power/fbearlysuspend.c new file mode 100644 index 000000000000..15137650149c --- /dev/null +++ b/kernel/power/fbearlysuspend.c @@ -0,0 +1,153 @@ +/* kernel/power/fbearlysuspend.c + * + * Copyright (C) 2005-2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/earlysuspend.h> +#include <linux/module.h> +#include <linux/wait.h> + +#include "power.h" + +static wait_queue_head_t fb_state_wq; +static DEFINE_SPINLOCK(fb_state_lock); +static enum { + FB_STATE_STOPPED_DRAWING, + FB_STATE_REQUEST_STOP_DRAWING, + FB_STATE_DRAWING_OK, +} fb_state; + +/* tell userspace to stop drawing, wait for it to stop */ +static void stop_drawing_early_suspend(struct early_suspend *h) +{ + int ret; + unsigned long irq_flags; + + spin_lock_irqsave(&fb_state_lock, irq_flags); + fb_state = FB_STATE_REQUEST_STOP_DRAWING; + spin_unlock_irqrestore(&fb_state_lock, irq_flags); + + wake_up_all(&fb_state_wq); + ret = wait_event_timeout(fb_state_wq, + fb_state == FB_STATE_STOPPED_DRAWING, + HZ); + if (unlikely(fb_state != FB_STATE_STOPPED_DRAWING)) + pr_warning("stop_drawing_early_suspend: timeout waiting for " + "userspace to stop drawing\n"); +} + +/* tell userspace to start drawing */ +static void start_drawing_late_resume(struct early_suspend *h) +{ + unsigned long irq_flags; + + spin_lock_irqsave(&fb_state_lock, irq_flags); + fb_state = FB_STATE_DRAWING_OK; + spin_unlock_irqrestore(&fb_state_lock, irq_flags); + wake_up(&fb_state_wq); +} + +static struct early_suspend stop_drawing_early_suspend_desc = { + .level = EARLY_SUSPEND_LEVEL_STOP_DRAWING, + .suspend = stop_drawing_early_suspend, + .resume = start_drawing_late_resume, +}; + +static ssize_t wait_for_fb_sleep_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + char *s = buf; + int ret; + + ret = wait_event_interruptible(fb_state_wq, + fb_state != FB_STATE_DRAWING_OK); + if (ret && fb_state == FB_STATE_DRAWING_OK) + return ret; + else + s += sprintf(buf, "sleeping"); + return s - buf; +} + +static ssize_t wait_for_fb_wake_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + char *s = buf; + int ret; + unsigned long irq_flags; + + spin_lock_irqsave(&fb_state_lock, irq_flags); + if (fb_state == FB_STATE_REQUEST_STOP_DRAWING) { + fb_state = FB_STATE_STOPPED_DRAWING; + wake_up(&fb_state_wq); + } + spin_unlock_irqrestore(&fb_state_lock, irq_flags); + + ret = wait_event_interruptible(fb_state_wq, + fb_state == FB_STATE_DRAWING_OK); + if (ret && fb_state != FB_STATE_DRAWING_OK) + return ret; + else + s += sprintf(buf, "awake"); + + return s - buf; +} + +#define power_ro_attr(_name) \ +static struct kobj_attribute _name##_attr = { \ + .attr = { \ + .name = __stringify(_name), \ + .mode = 0444, \ + }, \ + .show = _name##_show, \ + .store = NULL, \ +} + +power_ro_attr(wait_for_fb_sleep); +power_ro_attr(wait_for_fb_wake); + +static struct attribute *g[] = { + &wait_for_fb_sleep_attr.attr, + &wait_for_fb_wake_attr.attr, + NULL, +}; + +static struct attribute_group attr_group = { + .attrs = g, +}; + +static int __init android_power_init(void) +{ + int ret; + + init_waitqueue_head(&fb_state_wq); + fb_state = FB_STATE_DRAWING_OK; + + ret = sysfs_create_group(power_kobj, &attr_group); + if (ret) { + pr_err("android_power_init: sysfs_create_group failed\n"); + return ret; + } + + register_early_suspend(&stop_drawing_early_suspend_desc); + return 0; +} + +static void __exit android_power_exit(void) +{ + unregister_early_suspend(&stop_drawing_early_suspend_desc); + sysfs_remove_group(power_kobj, &attr_group); +} + +module_init(android_power_init); +module_exit(android_power_exit); + diff --git a/kernel/power/main.c b/kernel/power/main.c index 62b0bc6e4983..7c2c6ed53000 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c @@ -173,7 +173,11 @@ static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t n) { #ifdef CONFIG_SUSPEND +#ifdef CONFIG_EARLYSUSPEND + suspend_state_t state = PM_SUSPEND_ON; +#else suspend_state_t state = PM_SUSPEND_STANDBY; +#endif const char * const *s; #endif char *p; @@ -195,8 +199,15 @@ static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr, break; } if (state < PM_SUSPEND_MAX && *s) +#ifdef CONFIG_EARLYSUSPEND + if (state == PM_SUSPEND_ON || valid_state(state)) { + error = 0; + request_suspend_state(state); + } +#else error = enter_state(state); #endif +#endif Exit: return error ? error : n; @@ -283,6 +294,11 @@ pm_trace_store(struct kobject *kobj, struct kobj_attribute *attr, power_attr(pm_trace); #endif /* CONFIG_PM_TRACE */ +#ifdef CONFIG_USER_WAKELOCK +power_attr(wake_lock); +power_attr(wake_unlock); +#endif + static struct attribute * g[] = { &state_attr.attr, #ifdef CONFIG_PM_TRACE @@ -294,6 +310,10 @@ static struct attribute * g[] = { #ifdef CONFIG_PM_DEBUG &pm_test_attr.attr, #endif +#ifdef CONFIG_USER_WAKELOCK + &wake_lock_attr.attr, + &wake_unlock_attr.attr, +#endif #endif NULL, }; diff --git a/kernel/power/power.h b/kernel/power/power.h index 006270fe382d..02fc4a66e5c9 100644 --- a/kernel/power/power.h +++ b/kernel/power/power.h @@ -233,3 +233,27 @@ static inline void suspend_thaw_processes(void) { } #endif + +#ifdef CONFIG_WAKELOCK +/* kernel/power/wakelock.c */ +extern struct workqueue_struct *suspend_work_queue; +extern struct wake_lock main_wake_lock; +extern suspend_state_t requested_suspend_state; +#endif + +#ifdef CONFIG_USER_WAKELOCK +ssize_t wake_lock_show(struct kobject *kobj, struct kobj_attribute *attr, + char *buf); +ssize_t wake_lock_store(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t n); +ssize_t wake_unlock_show(struct kobject *kobj, struct kobj_attribute *attr, + char *buf); +ssize_t wake_unlock_store(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t n); +#endif + +#ifdef CONFIG_EARLYSUSPEND +/* kernel/power/earlysuspend.c */ +void request_suspend_state(suspend_state_t state); +suspend_state_t get_suspend_state(void); +#endif diff --git a/kernel/power/process.c b/kernel/power/process.c index 028a99598f49..cdac5f955b85 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c @@ -16,6 +16,7 @@ #include <linux/freezer.h> #include <linux/delay.h> #include <linux/workqueue.h> +#include <linux/wakelock.h> /* * Timeout for stopping processes @@ -40,6 +41,7 @@ static int try_to_freeze_tasks(bool sig_only) struct timeval start, end; u64 elapsed_csecs64; unsigned int elapsed_csecs; + unsigned int wakeup = 0; do_gettimeofday(&start); @@ -75,6 +77,10 @@ static int try_to_freeze_tasks(bool sig_only) todo += wq_busy; } + if (todo && has_wake_lock(WAKE_LOCK_SUSPEND)) { + wakeup = 1; + break; + } if (!todo || time_after(jiffies, end_time)) break; @@ -96,18 +102,25 @@ static int try_to_freeze_tasks(bool sig_only) * and caller must call thaw_processes() if something fails), * but it cleans up leftover PF_FREEZE requests. */ - printk("\n"); - printk(KERN_ERR "Freezing of tasks failed after %d.%02d seconds " - "(%d tasks refusing to freeze, wq_busy=%d):\n", - elapsed_csecs / 100, elapsed_csecs % 100, - todo - wq_busy, wq_busy); - + if(wakeup) { + printk("\n"); + printk(KERN_ERR "Freezing of %s aborted\n", + sig_only ? "user space " : "tasks "); + } + else { + printk("\n"); + printk(KERN_ERR "Freezing of tasks failed after %d.%02d seconds " + "(%d tasks refusing to freeze, wq_busy=%d):\n", + elapsed_csecs / 100, elapsed_csecs % 100, + todo - wq_busy, wq_busy); + } thaw_workqueues(); read_lock(&tasklist_lock); do_each_thread(g, p) { task_lock(p); - if (freezing(p) && !freezer_should_skip(p)) + if (freezing(p) && !freezer_should_skip(p) && + elapsed_csecs > 100) sched_show_task(p); cancel_freezing(p); task_unlock(p); diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index 7335952ee473..e84f5b26f1dd 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c @@ -26,6 +26,9 @@ #include "power.h" const char *const pm_states[PM_SUSPEND_MAX] = { +#ifdef CONFIG_EARLYSUSPEND + [PM_SUSPEND_ON] = "on", +#endif [PM_SUSPEND_STANDBY] = "standby", [PM_SUSPEND_MEM] = "mem", }; diff --git a/kernel/power/userwakelock.c b/kernel/power/userwakelock.c new file mode 100644 index 000000000000..a28a8db41468 --- /dev/null +++ b/kernel/power/userwakelock.c @@ -0,0 +1,219 @@ +/* kernel/power/userwakelock.c + * + * Copyright (C) 2005-2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/ctype.h> +#include <linux/module.h> +#include <linux/wakelock.h> +#include <linux/slab.h> + +#include "power.h" + +enum { + DEBUG_FAILURE = BIT(0), + DEBUG_ERROR = BIT(1), + DEBUG_NEW = BIT(2), + DEBUG_ACCESS = BIT(3), + DEBUG_LOOKUP = BIT(4), +}; +static int debug_mask = DEBUG_FAILURE; +module_param_named(debug_mask, debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP); + +static DEFINE_MUTEX(tree_lock); + +struct user_wake_lock { + struct rb_node node; + struct wake_lock wake_lock; + char name[0]; +}; +struct rb_root user_wake_locks; + +static struct user_wake_lock *lookup_wake_lock_name( + const char *buf, int allocate, long *timeoutptr) +{ + struct rb_node **p = &user_wake_locks.rb_node; + struct rb_node *parent = NULL; + struct user_wake_lock *l; + int diff; + u64 timeout; + int name_len; + const char *arg; + + /* Find length of lock name and start of optional timeout string */ + arg = buf; + while (*arg && !isspace(*arg)) + arg++; + name_len = arg - buf; + if (!name_len) + goto bad_arg; + while (isspace(*arg)) + arg++; + + /* Process timeout string */ + if (timeoutptr && *arg) { + timeout = simple_strtoull(arg, (char **)&arg, 0); + while (isspace(*arg)) + arg++; + if (*arg) + goto bad_arg; + /* convert timeout from nanoseconds to jiffies > 0 */ + timeout += (NSEC_PER_SEC / HZ) - 1; + do_div(timeout, (NSEC_PER_SEC / HZ)); + if (timeout <= 0) + timeout = 1; + *timeoutptr = timeout; + } else if (*arg) + goto bad_arg; + else if (timeoutptr) + *timeoutptr = 0; + + /* Lookup wake lock in rbtree */ + while (*p) { + parent = *p; + l = rb_entry(parent, struct user_wake_lock, node); + diff = strncmp(buf, l->name, name_len); + if (!diff && l->name[name_len]) + diff = -1; + if (debug_mask & DEBUG_ERROR) + pr_info("lookup_wake_lock_name: compare %.*s %s %d\n", + name_len, buf, l->name, diff); + + if (diff < 0) + p = &(*p)->rb_left; + else if (diff > 0) + p = &(*p)->rb_right; + else + return l; + } + + /* Allocate and add new wakelock to rbtree */ + if (!allocate) { + if (debug_mask & DEBUG_ERROR) + pr_info("lookup_wake_lock_name: %.*s not found\n", + name_len, buf); + return ERR_PTR(-EINVAL); + } + l = kzalloc(sizeof(*l) + name_len + 1, GFP_KERNEL); + if (l == NULL) { + if (debug_mask & DEBUG_FAILURE) + pr_err("lookup_wake_lock_name: failed to allocate " + "memory for %.*s\n", name_len, buf); + return ERR_PTR(-ENOMEM); + } + memcpy(l->name, buf, name_len); + if (debug_mask & DEBUG_NEW) + pr_info("lookup_wake_lock_name: new wake lock %s\n", l->name); + wake_lock_init(&l->wake_lock, WAKE_LOCK_SUSPEND, l->name); + rb_link_node(&l->node, parent, p); + rb_insert_color(&l->node, &user_wake_locks); + return l; + +bad_arg: + if (debug_mask & DEBUG_ERROR) + pr_info("lookup_wake_lock_name: wake lock, %.*s, bad arg, %s\n", + name_len, buf, arg); + return ERR_PTR(-EINVAL); +} + +ssize_t wake_lock_show( + struct kobject *kobj, struct kobj_attribute *attr, char *buf) +{ + char *s = buf; + char *end = buf + PAGE_SIZE; + struct rb_node *n; + struct user_wake_lock *l; + + mutex_lock(&tree_lock); + + for (n = rb_first(&user_wake_locks); n != NULL; n = rb_next(n)) { + l = rb_entry(n, struct user_wake_lock, node); + if (wake_lock_active(&l->wake_lock)) + s += scnprintf(s, end - s, "%s ", l->name); + } + s += scnprintf(s, end - s, "\n"); + + mutex_unlock(&tree_lock); + return (s - buf); +} + +ssize_t wake_lock_store( + struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t n) +{ + long timeout; + struct user_wake_lock *l; + + mutex_lock(&tree_lock); + l = lookup_wake_lock_name(buf, 1, &timeout); + if (IS_ERR(l)) { + n = PTR_ERR(l); + goto bad_name; + } + + if (debug_mask & DEBUG_ACCESS) + pr_info("wake_lock_store: %s, timeout %ld\n", l->name, timeout); + + if (timeout) + wake_lock_timeout(&l->wake_lock, timeout); + else + wake_lock(&l->wake_lock); +bad_name: + mutex_unlock(&tree_lock); + return n; +} + + +ssize_t wake_unlock_show( + struct kobject *kobj, struct kobj_attribute *attr, char *buf) +{ + char *s = buf; + char *end = buf + PAGE_SIZE; + struct rb_node *n; + struct user_wake_lock *l; + + mutex_lock(&tree_lock); + + for (n = rb_first(&user_wake_locks); n != NULL; n = rb_next(n)) { + l = rb_entry(n, struct user_wake_lock, node); + if (!wake_lock_active(&l->wake_lock)) + s += scnprintf(s, end - s, "%s ", l->name); + } + s += scnprintf(s, end - s, "\n"); + + mutex_unlock(&tree_lock); + return (s - buf); +} + +ssize_t wake_unlock_store( + struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t n) +{ + struct user_wake_lock *l; + + mutex_lock(&tree_lock); + l = lookup_wake_lock_name(buf, 0, NULL); + if (IS_ERR(l)) { + n = PTR_ERR(l); + goto not_found; + } + + if (debug_mask & DEBUG_ACCESS) + pr_info("wake_unlock_store: %s\n", l->name); + + wake_unlock(&l->wake_lock); +not_found: + mutex_unlock(&tree_lock); + return n; +} + diff --git a/kernel/power/wakelock.c b/kernel/power/wakelock.c new file mode 100644 index 000000000000..ee9781c5adb2 --- /dev/null +++ b/kernel/power/wakelock.c @@ -0,0 +1,607 @@ +/* kernel/power/wakelock.c + * + * Copyright (C) 2005-2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/rtc.h> +#include <linux/suspend.h> +#include <linux/syscalls.h> /* sys_sync */ +#include <linux/wakelock.h> +#ifdef CONFIG_WAKELOCK_STAT +#include <linux/proc_fs.h> +#endif +#include "power.h" + +enum { + DEBUG_EXIT_SUSPEND = 1U << 0, + DEBUG_WAKEUP = 1U << 1, + DEBUG_SUSPEND = 1U << 2, + DEBUG_EXPIRE = 1U << 3, + DEBUG_WAKE_LOCK = 1U << 4, +}; +static int debug_mask = DEBUG_EXIT_SUSPEND | DEBUG_WAKEUP; +module_param_named(debug_mask, debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP); + +#define WAKE_LOCK_TYPE_MASK (0x0f) +#define WAKE_LOCK_INITIALIZED (1U << 8) +#define WAKE_LOCK_ACTIVE (1U << 9) +#define WAKE_LOCK_AUTO_EXPIRE (1U << 10) +#define WAKE_LOCK_PREVENTING_SUSPEND (1U << 11) + +static DEFINE_SPINLOCK(list_lock); +static LIST_HEAD(inactive_locks); +static struct list_head active_wake_locks[WAKE_LOCK_TYPE_COUNT]; +static int current_event_num; +struct workqueue_struct *suspend_work_queue; +struct wake_lock main_wake_lock; +suspend_state_t requested_suspend_state = PM_SUSPEND_MEM; +static struct wake_lock unknown_wakeup; + +#ifdef CONFIG_WAKELOCK_STAT +static struct wake_lock deleted_wake_locks; +static ktime_t last_sleep_time_update; +static int wait_for_wakeup; + +int get_expired_time(struct wake_lock *lock, ktime_t *expire_time) +{ + struct timespec ts; + struct timespec kt; + struct timespec tomono; + struct timespec delta; + unsigned long seq; + long timeout; + + if (!(lock->flags & WAKE_LOCK_AUTO_EXPIRE)) + return 0; + do { + seq = read_seqbegin(&xtime_lock); + timeout = lock->expires - jiffies; + if (timeout > 0) + return 0; + kt = current_kernel_time(); + tomono = __get_wall_to_monotonic(); + } while (read_seqretry(&xtime_lock, seq)); + jiffies_to_timespec(-timeout, &delta); + set_normalized_timespec(&ts, kt.tv_sec + tomono.tv_sec - delta.tv_sec, + kt.tv_nsec + tomono.tv_nsec - delta.tv_nsec); + *expire_time = timespec_to_ktime(ts); + return 1; +} + + +static int print_lock_stat(struct seq_file *m, struct wake_lock *lock) +{ + int lock_count = lock->stat.count; + int expire_count = lock->stat.expire_count; + ktime_t active_time = ktime_set(0, 0); + ktime_t total_time = lock->stat.total_time; + ktime_t max_time = lock->stat.max_time; + + ktime_t prevent_suspend_time = lock->stat.prevent_suspend_time; + if (lock->flags & WAKE_LOCK_ACTIVE) { + ktime_t now, add_time; + int expired = get_expired_time(lock, &now); + if (!expired) + now = ktime_get(); + add_time = ktime_sub(now, lock->stat.last_time); + lock_count++; + if (!expired) + active_time = add_time; + else + expire_count++; + total_time = ktime_add(total_time, add_time); + if (lock->flags & WAKE_LOCK_PREVENTING_SUSPEND) + prevent_suspend_time = ktime_add(prevent_suspend_time, + ktime_sub(now, last_sleep_time_update)); + if (add_time.tv64 > max_time.tv64) + max_time = add_time; + } + + return seq_printf(m, + "\"%s\"\t%d\t%d\t%d\t%lld\t%lld\t%lld\t%lld\t%lld\n", + lock->name, lock_count, expire_count, + lock->stat.wakeup_count, ktime_to_ns(active_time), + ktime_to_ns(total_time), + ktime_to_ns(prevent_suspend_time), ktime_to_ns(max_time), + ktime_to_ns(lock->stat.last_time)); +} + +static int wakelock_stats_show(struct seq_file *m, void *unused) +{ + unsigned long irqflags; + struct wake_lock *lock; + int ret; + int type; + + spin_lock_irqsave(&list_lock, irqflags); + + ret = seq_puts(m, "name\tcount\texpire_count\twake_count\tactive_since" + "\ttotal_time\tsleep_time\tmax_time\tlast_change\n"); + list_for_each_entry(lock, &inactive_locks, link) + ret = print_lock_stat(m, lock); + for (type = 0; type < WAKE_LOCK_TYPE_COUNT; type++) { + list_for_each_entry(lock, &active_wake_locks[type], link) + ret = print_lock_stat(m, lock); + } + spin_unlock_irqrestore(&list_lock, irqflags); + return 0; +} + +static void wake_unlock_stat_locked(struct wake_lock *lock, int expired) +{ + ktime_t duration; + ktime_t now; + if (!(lock->flags & WAKE_LOCK_ACTIVE)) + return; + if (get_expired_time(lock, &now)) + expired = 1; + else + now = ktime_get(); + lock->stat.count++; + if (expired) + lock->stat.expire_count++; + duration = ktime_sub(now, lock->stat.last_time); + lock->stat.total_time = ktime_add(lock->stat.total_time, duration); + if (ktime_to_ns(duration) > ktime_to_ns(lock->stat.max_time)) + lock->stat.max_time = duration; + lock->stat.last_time = ktime_get(); + if (lock->flags & WAKE_LOCK_PREVENTING_SUSPEND) { + duration = ktime_sub(now, last_sleep_time_update); + lock->stat.prevent_suspend_time = ktime_add( + lock->stat.prevent_suspend_time, duration); + lock->flags &= ~WAKE_LOCK_PREVENTING_SUSPEND; + } +} + +static void update_sleep_wait_stats_locked(int done) +{ + struct wake_lock *lock; + ktime_t now, etime, elapsed, add; + int expired; + + now = ktime_get(); + elapsed = ktime_sub(now, last_sleep_time_update); + list_for_each_entry(lock, &active_wake_locks[WAKE_LOCK_SUSPEND], link) { + expired = get_expired_time(lock, &etime); + if (lock->flags & WAKE_LOCK_PREVENTING_SUSPEND) { + if (expired) + add = ktime_sub(etime, last_sleep_time_update); + else + add = elapsed; + lock->stat.prevent_suspend_time = ktime_add( + lock->stat.prevent_suspend_time, add); + } + if (done || expired) + lock->flags &= ~WAKE_LOCK_PREVENTING_SUSPEND; + else + lock->flags |= WAKE_LOCK_PREVENTING_SUSPEND; + } + last_sleep_time_update = now; +} +#endif + + +static void expire_wake_lock(struct wake_lock *lock) +{ +#ifdef CONFIG_WAKELOCK_STAT + wake_unlock_stat_locked(lock, 1); +#endif + lock->flags &= ~(WAKE_LOCK_ACTIVE | WAKE_LOCK_AUTO_EXPIRE); + list_del(&lock->link); + list_add(&lock->link, &inactive_locks); + if (debug_mask & (DEBUG_WAKE_LOCK | DEBUG_EXPIRE)) + pr_info("expired wake lock %s\n", lock->name); +} + +/* Caller must acquire the list_lock spinlock */ +static void print_active_locks(int type) +{ + struct wake_lock *lock; + bool print_expired = true; + + BUG_ON(type >= WAKE_LOCK_TYPE_COUNT); + list_for_each_entry(lock, &active_wake_locks[type], link) { + if (lock->flags & WAKE_LOCK_AUTO_EXPIRE) { + long timeout = lock->expires - jiffies; + if (timeout > 0) + pr_info("active wake lock %s, time left %ld\n", + lock->name, timeout); + else if (print_expired) + pr_info("wake lock %s, expired\n", lock->name); + } else { + pr_info("active wake lock %s\n", lock->name); + if (!(debug_mask & DEBUG_EXPIRE)) + print_expired = false; + } + } +} + +static long has_wake_lock_locked(int type) +{ + struct wake_lock *lock, *n; + long max_timeout = 0; + + BUG_ON(type >= WAKE_LOCK_TYPE_COUNT); + list_for_each_entry_safe(lock, n, &active_wake_locks[type], link) { + if (lock->flags & WAKE_LOCK_AUTO_EXPIRE) { + long timeout = lock->expires - jiffies; + if (timeout <= 0) + expire_wake_lock(lock); + else if (timeout > max_timeout) + max_timeout = timeout; + } else + return -1; + } + return max_timeout; +} + +long has_wake_lock(int type) +{ + long ret; + unsigned long irqflags; + spin_lock_irqsave(&list_lock, irqflags); + ret = has_wake_lock_locked(type); + if (ret && (debug_mask & DEBUG_SUSPEND) && type == WAKE_LOCK_SUSPEND) + print_active_locks(type); + spin_unlock_irqrestore(&list_lock, irqflags); + return ret; +} + +static void suspend(struct work_struct *work) +{ + int ret; + int entry_event_num; + + if (has_wake_lock(WAKE_LOCK_SUSPEND)) { + if (debug_mask & DEBUG_SUSPEND) + pr_info("suspend: abort suspend\n"); + return; + } + + entry_event_num = current_event_num; + sys_sync(); + if (debug_mask & DEBUG_SUSPEND) + pr_info("suspend: enter suspend\n"); + ret = pm_suspend(requested_suspend_state); + if (debug_mask & DEBUG_EXIT_SUSPEND) { + struct timespec ts; + struct rtc_time tm; + getnstimeofday(&ts); + rtc_time_to_tm(ts.tv_sec, &tm); + pr_info("suspend: exit suspend, ret = %d " + "(%d-%02d-%02d %02d:%02d:%02d.%09lu UTC)\n", ret, + tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, + tm.tm_hour, tm.tm_min, tm.tm_sec, ts.tv_nsec); + } + if (current_event_num == entry_event_num) { + if (debug_mask & DEBUG_SUSPEND) + pr_info("suspend: pm_suspend returned with no event\n"); + wake_lock_timeout(&unknown_wakeup, HZ / 2); + } +} +static DECLARE_WORK(suspend_work, suspend); + +static void expire_wake_locks(unsigned long data) +{ + long has_lock; + unsigned long irqflags; + if (debug_mask & DEBUG_EXPIRE) + pr_info("expire_wake_locks: start\n"); + spin_lock_irqsave(&list_lock, irqflags); + if (debug_mask & DEBUG_SUSPEND) + print_active_locks(WAKE_LOCK_SUSPEND); + has_lock = has_wake_lock_locked(WAKE_LOCK_SUSPEND); + if (debug_mask & DEBUG_EXPIRE) + pr_info("expire_wake_locks: done, has_lock %ld\n", has_lock); + if (has_lock == 0) + queue_work(suspend_work_queue, &suspend_work); + spin_unlock_irqrestore(&list_lock, irqflags); +} +static DEFINE_TIMER(expire_timer, expire_wake_locks, 0, 0); + +static int power_suspend_late(struct device *dev) +{ + int ret = has_wake_lock(WAKE_LOCK_SUSPEND) ? -EAGAIN : 0; +#ifdef CONFIG_WAKELOCK_STAT + wait_for_wakeup = 1; +#endif + if (debug_mask & DEBUG_SUSPEND) + pr_info("power_suspend_late return %d\n", ret); + return ret; +} + +static struct dev_pm_ops power_driver_pm_ops = { + .suspend_noirq = power_suspend_late, +}; + +static struct platform_driver power_driver = { + .driver.name = "power", + .driver.pm = &power_driver_pm_ops, +}; +static struct platform_device power_device = { + .name = "power", +}; + +void wake_lock_init(struct wake_lock *lock, int type, const char *name) +{ + unsigned long irqflags = 0; + + if (name) + lock->name = name; + BUG_ON(!lock->name); + + if (debug_mask & DEBUG_WAKE_LOCK) + pr_info("wake_lock_init name=%s\n", lock->name); +#ifdef CONFIG_WAKELOCK_STAT + lock->stat.count = 0; + lock->stat.expire_count = 0; + lock->stat.wakeup_count = 0; + lock->stat.total_time = ktime_set(0, 0); + lock->stat.prevent_suspend_time = ktime_set(0, 0); + lock->stat.max_time = ktime_set(0, 0); + lock->stat.last_time = ktime_set(0, 0); +#endif + lock->flags = (type & WAKE_LOCK_TYPE_MASK) | WAKE_LOCK_INITIALIZED; + + INIT_LIST_HEAD(&lock->link); + spin_lock_irqsave(&list_lock, irqflags); + list_add(&lock->link, &inactive_locks); + spin_unlock_irqrestore(&list_lock, irqflags); +} +EXPORT_SYMBOL(wake_lock_init); + +void wake_lock_destroy(struct wake_lock *lock) +{ + unsigned long irqflags; + if (debug_mask & DEBUG_WAKE_LOCK) + pr_info("wake_lock_destroy name=%s\n", lock->name); + spin_lock_irqsave(&list_lock, irqflags); + lock->flags &= ~WAKE_LOCK_INITIALIZED; +#ifdef CONFIG_WAKELOCK_STAT + if (lock->stat.count) { + deleted_wake_locks.stat.count += lock->stat.count; + deleted_wake_locks.stat.expire_count += lock->stat.expire_count; + deleted_wake_locks.stat.total_time = + ktime_add(deleted_wake_locks.stat.total_time, + lock->stat.total_time); + deleted_wake_locks.stat.prevent_suspend_time = + ktime_add(deleted_wake_locks.stat.prevent_suspend_time, + lock->stat.prevent_suspend_time); + deleted_wake_locks.stat.max_time = + ktime_add(deleted_wake_locks.stat.max_time, + lock->stat.max_time); + } +#endif + list_del(&lock->link); + spin_unlock_irqrestore(&list_lock, irqflags); +} +EXPORT_SYMBOL(wake_lock_destroy); + +static void wake_lock_internal( + struct wake_lock *lock, long timeout, int has_timeout) +{ + int type; + unsigned long irqflags; + long expire_in; + + spin_lock_irqsave(&list_lock, irqflags); + type = lock->flags & WAKE_LOCK_TYPE_MASK; + BUG_ON(type >= WAKE_LOCK_TYPE_COUNT); + BUG_ON(!(lock->flags & WAKE_LOCK_INITIALIZED)); +#ifdef CONFIG_WAKELOCK_STAT + if (type == WAKE_LOCK_SUSPEND && wait_for_wakeup) { + if (debug_mask & DEBUG_WAKEUP) + pr_info("wakeup wake lock: %s\n", lock->name); + wait_for_wakeup = 0; + lock->stat.wakeup_count++; + } + if ((lock->flags & WAKE_LOCK_AUTO_EXPIRE) && + (long)(lock->expires - jiffies) <= 0) { + wake_unlock_stat_locked(lock, 0); + lock->stat.last_time = ktime_get(); + } +#endif + if (!(lock->flags & WAKE_LOCK_ACTIVE)) { + lock->flags |= WAKE_LOCK_ACTIVE; +#ifdef CONFIG_WAKELOCK_STAT + lock->stat.last_time = ktime_get(); +#endif + } + list_del(&lock->link); + if (has_timeout) { + if (debug_mask & DEBUG_WAKE_LOCK) + pr_info("wake_lock: %s, type %d, timeout %ld.%03lu\n", + lock->name, type, timeout / HZ, + (timeout % HZ) * MSEC_PER_SEC / HZ); + lock->expires = jiffies + timeout; + lock->flags |= WAKE_LOCK_AUTO_EXPIRE; + list_add_tail(&lock->link, &active_wake_locks[type]); + } else { + if (debug_mask & DEBUG_WAKE_LOCK) + pr_info("wake_lock: %s, type %d\n", lock->name, type); + lock->expires = LONG_MAX; + lock->flags &= ~WAKE_LOCK_AUTO_EXPIRE; + list_add(&lock->link, &active_wake_locks[type]); + } + if (type == WAKE_LOCK_SUSPEND) { + current_event_num++; +#ifdef CONFIG_WAKELOCK_STAT + if (lock == &main_wake_lock) + update_sleep_wait_stats_locked(1); + else if (!wake_lock_active(&main_wake_lock)) + update_sleep_wait_stats_locked(0); +#endif + if (has_timeout) + expire_in = has_wake_lock_locked(type); + else + expire_in = -1; + if (expire_in > 0) { + if (debug_mask & DEBUG_EXPIRE) + pr_info("wake_lock: %s, start expire timer, " + "%ld\n", lock->name, expire_in); + mod_timer(&expire_timer, jiffies + expire_in); + } else { + if (del_timer(&expire_timer)) + if (debug_mask & DEBUG_EXPIRE) + pr_info("wake_lock: %s, stop expire timer\n", + lock->name); + if (expire_in == 0) + queue_work(suspend_work_queue, &suspend_work); + } + } + spin_unlock_irqrestore(&list_lock, irqflags); +} + +void wake_lock(struct wake_lock *lock) +{ + wake_lock_internal(lock, 0, 0); +} +EXPORT_SYMBOL(wake_lock); + +void wake_lock_timeout(struct wake_lock *lock, long timeout) +{ + wake_lock_internal(lock, timeout, 1); +} +EXPORT_SYMBOL(wake_lock_timeout); + +void wake_unlock(struct wake_lock *lock) +{ + int type; + unsigned long irqflags; + spin_lock_irqsave(&list_lock, irqflags); + type = lock->flags & WAKE_LOCK_TYPE_MASK; +#ifdef CONFIG_WAKELOCK_STAT + wake_unlock_stat_locked(lock, 0); +#endif + if (debug_mask & DEBUG_WAKE_LOCK) + pr_info("wake_unlock: %s\n", lock->name); + lock->flags &= ~(WAKE_LOCK_ACTIVE | WAKE_LOCK_AUTO_EXPIRE); + list_del(&lock->link); + list_add(&lock->link, &inactive_locks); + if (type == WAKE_LOCK_SUSPEND) { + long has_lock = has_wake_lock_locked(type); + if (has_lock > 0) { + if (debug_mask & DEBUG_EXPIRE) + pr_info("wake_unlock: %s, start expire timer, " + "%ld\n", lock->name, has_lock); + mod_timer(&expire_timer, jiffies + has_lock); + } else { + if (del_timer(&expire_timer)) + if (debug_mask & DEBUG_EXPIRE) + pr_info("wake_unlock: %s, stop expire " + "timer\n", lock->name); + if (has_lock == 0) + queue_work(suspend_work_queue, &suspend_work); + } + if (lock == &main_wake_lock) { + if (debug_mask & DEBUG_SUSPEND) + print_active_locks(WAKE_LOCK_SUSPEND); +#ifdef CONFIG_WAKELOCK_STAT + update_sleep_wait_stats_locked(0); +#endif + } + } + spin_unlock_irqrestore(&list_lock, irqflags); +} +EXPORT_SYMBOL(wake_unlock); + +int wake_lock_active(struct wake_lock *lock) +{ + return !!(lock->flags & WAKE_LOCK_ACTIVE); +} +EXPORT_SYMBOL(wake_lock_active); + +static int wakelock_stats_open(struct inode *inode, struct file *file) +{ + return single_open(file, wakelock_stats_show, NULL); +} + +static const struct file_operations wakelock_stats_fops = { + .owner = THIS_MODULE, + .open = wakelock_stats_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int __init wakelocks_init(void) +{ + int ret; + int i; + + for (i = 0; i < ARRAY_SIZE(active_wake_locks); i++) + INIT_LIST_HEAD(&active_wake_locks[i]); + +#ifdef CONFIG_WAKELOCK_STAT + wake_lock_init(&deleted_wake_locks, WAKE_LOCK_SUSPEND, + "deleted_wake_locks"); +#endif + wake_lock_init(&main_wake_lock, WAKE_LOCK_SUSPEND, "main"); + wake_lock(&main_wake_lock); + wake_lock_init(&unknown_wakeup, WAKE_LOCK_SUSPEND, "unknown_wakeups"); + + ret = platform_device_register(&power_device); + if (ret) { + pr_err("wakelocks_init: platform_device_register failed\n"); + goto err_platform_device_register; + } + ret = platform_driver_register(&power_driver); + if (ret) { + pr_err("wakelocks_init: platform_driver_register failed\n"); + goto err_platform_driver_register; + } + + suspend_work_queue = create_singlethread_workqueue("suspend"); + if (suspend_work_queue == NULL) { + ret = -ENOMEM; + goto err_suspend_work_queue; + } + +#ifdef CONFIG_WAKELOCK_STAT + proc_create("wakelocks", S_IRUGO, NULL, &wakelock_stats_fops); +#endif + + return 0; + +err_suspend_work_queue: + platform_driver_unregister(&power_driver); +err_platform_driver_register: + platform_device_unregister(&power_device); +err_platform_device_register: + wake_lock_destroy(&unknown_wakeup); + wake_lock_destroy(&main_wake_lock); +#ifdef CONFIG_WAKELOCK_STAT + wake_lock_destroy(&deleted_wake_locks); +#endif + return ret; +} + +static void __exit wakelocks_exit(void) +{ +#ifdef CONFIG_WAKELOCK_STAT + remove_proc_entry("wakelocks", NULL); +#endif + destroy_workqueue(suspend_work_queue); + platform_driver_unregister(&power_driver); + platform_device_unregister(&power_device); + wake_lock_destroy(&unknown_wakeup); + wake_lock_destroy(&main_wake_lock); +#ifdef CONFIG_WAKELOCK_STAT + wake_lock_destroy(&deleted_wake_locks); +#endif +} + +core_initcall(wakelocks_init); +module_exit(wakelocks_exit); diff --git a/kernel/printk.c b/kernel/printk.c index 8fe465ac008a..48ce53f5bb0a 100644 --- a/kernel/printk.c +++ b/kernel/printk.c @@ -57,6 +57,10 @@ void asmlinkage __attribute__((weak)) early_printk(const char *fmt, ...) #define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT) +#ifdef CONFIG_DEBUG_LL +extern void printascii(char *); +#endif + /* printk's without a loglevel use this.. */ #define DEFAULT_MESSAGE_LOGLEVEL 4 /* KERN_WARNING */ @@ -261,6 +265,68 @@ static inline void boot_delay_msec(void) } #endif +/* + * Return the number of unread characters in the log buffer. + */ +static int log_buf_get_len(void) +{ + return logged_chars; +} + +/* + * Clears the ring-buffer + */ +void log_buf_clear(void) +{ + logged_chars = 0; +} + +/* + * Copy a range of characters from the log buffer. + */ +int log_buf_copy(char *dest, int idx, int len) +{ + int ret, max; + bool took_lock = false; + + if (!oops_in_progress) { + spin_lock_irq(&logbuf_lock); + took_lock = true; + } + + max = log_buf_get_len(); + if (idx < 0 || idx >= max) { + ret = -1; + } else { + if (len > max - idx) + len = max - idx; + ret = len; + idx += (log_end - max); + while (len-- > 0) + dest[len] = LOG_BUF(idx + len); + } + + if (took_lock) + spin_unlock_irq(&logbuf_lock); + + return ret; +} + +/* + * Commands to do_syslog: + * + * 0 -- Close the log. Currently a NOP. + * 1 -- Open the log. Currently a NOP. + * 2 -- Read from the log. + * 3 -- Read all messages remaining in the ring buffer. + * 4 -- Read and clear all messages remaining in the ring buffer + * 5 -- Clear ring buffer. + * 6 -- Disable printk's to console + * 7 -- Enable printk's to console + * 8 -- Set level of messages printed to console + * 9 -- Return number of unread characters in the log buffer + * 10 -- Return size of the log buffer + */ int do_syslog(int type, char __user *buf, int len, bool from_file) { unsigned i, j, limit, count; @@ -736,6 +802,9 @@ asmlinkage int vprintk(const char *fmt, va_list args) printed_len += vscnprintf(printk_buf + printed_len, sizeof(printk_buf) - printed_len, fmt, args); +#ifdef CONFIG_DEBUG_LL + printascii(printk_buf); +#endif p = printk_buf; diff --git a/kernel/sched.c b/kernel/sched.c index dc85ceb90832..17c6925e2016 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -72,6 +72,7 @@ #include <linux/ctype.h> #include <linux/ftrace.h> #include <linux/slab.h> +#include <linux/cpuacct.h> #include <asm/tlb.h> #include <asm/irq_regs.h> @@ -5254,7 +5255,7 @@ void sched_show_task(struct task_struct *p) unsigned state; state = p->state ? __ffs(p->state) + 1 : 0; - printk(KERN_INFO "%-13.13s %c", p->comm, + printk(KERN_INFO "%-15.15s %c", p->comm, state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?'); #if BITS_PER_LONG == 32 if (state == TASK_RUNNING) @@ -7891,13 +7892,24 @@ static inline int preempt_count_equals(int preempt_offset) return (nested == PREEMPT_INATOMIC_BASE + preempt_offset); } +static int __might_sleep_init_called; +int __init __might_sleep_init(void) +{ + __might_sleep_init_called = 1; + return 0; +} +early_initcall(__might_sleep_init); + void __might_sleep(const char *file, int line, int preempt_offset) { #ifdef in_atomic static unsigned long prev_jiffy; /* ratelimiting */ if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) || - system_state != SYSTEM_RUNNING || oops_in_progress) + oops_in_progress) + return; + if (system_state != SYSTEM_RUNNING && + (!__might_sleep_init_called || system_state != SYSTEM_BOOTING)) return; if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) return; @@ -8297,6 +8309,11 @@ void sched_move_task(struct task_struct *tsk) if (unlikely(running)) tsk->sched_class->put_prev_task(rq, tsk); +#ifdef CONFIG_FAIR_GROUP_SCHED + if (tsk->sched_class->prep_move_group) + tsk->sched_class->prep_move_group(tsk, on_rq); +#endif + set_task_rq(tsk, task_cpu(tsk)); #ifdef CONFIG_FAIR_GROUP_SCHED @@ -8714,6 +8731,15 @@ cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp) static int cpu_cgroup_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk) { + if ((current != tsk) && (!capable(CAP_SYS_NICE))) { + const struct cred *cred = current_cred(), *tcred; + + tcred = __task_cred(tsk); + + if (cred->euid != tcred->uid && cred->euid != tcred->suid) + return -EPERM; + } + #ifdef CONFIG_RT_GROUP_SCHED if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk)) return -EINVAL; @@ -8858,8 +8884,30 @@ struct cpuacct { u64 __percpu *cpuusage; struct percpu_counter cpustat[CPUACCT_STAT_NSTATS]; struct cpuacct *parent; + struct cpuacct_charge_calls *cpufreq_fn; + void *cpuacct_data; }; +static struct cpuacct *cpuacct_root; + +/* Default calls for cpufreq accounting */ +static struct cpuacct_charge_calls *cpuacct_cpufreq; +int cpuacct_register_cpufreq(struct cpuacct_charge_calls *fn) +{ + cpuacct_cpufreq = fn; + + /* + * Root node is created before platform can register callbacks, + * initalize here. + */ + if (cpuacct_root && fn) { + cpuacct_root->cpufreq_fn = fn; + if (fn->init) + fn->init(&cpuacct_root->cpuacct_data); + } + return 0; +} + struct cgroup_subsys cpuacct_subsys; /* return cpu accounting group corresponding to this container */ @@ -8894,8 +8942,16 @@ static struct cgroup_subsys_state *cpuacct_create( if (percpu_counter_init(&ca->cpustat[i], 0)) goto out_free_counters; + ca->cpufreq_fn = cpuacct_cpufreq; + + /* If available, have platform code initalize cpu frequency table */ + if (ca->cpufreq_fn && ca->cpufreq_fn->init) + ca->cpufreq_fn->init(&ca->cpuacct_data); + if (cgrp->parent) ca->parent = cgroup_ca(cgrp->parent); + else + cpuacct_root = ca; return &ca->css; @@ -9023,6 +9079,32 @@ static int cpuacct_stats_show(struct cgroup *cgrp, struct cftype *cft, return 0; } +static int cpuacct_cpufreq_show(struct cgroup *cgrp, struct cftype *cft, + struct cgroup_map_cb *cb) +{ + struct cpuacct *ca = cgroup_ca(cgrp); + if (ca->cpufreq_fn && ca->cpufreq_fn->cpufreq_show) + ca->cpufreq_fn->cpufreq_show(ca->cpuacct_data, cb); + + return 0; +} + +/* return total cpu power usage (milliWatt second) of a group */ +static u64 cpuacct_powerusage_read(struct cgroup *cgrp, struct cftype *cft) +{ + int i; + struct cpuacct *ca = cgroup_ca(cgrp); + u64 totalpower = 0; + + if (ca->cpufreq_fn && ca->cpufreq_fn->power_usage) + for_each_present_cpu(i) { + totalpower += ca->cpufreq_fn->power_usage( + ca->cpuacct_data); + } + + return totalpower; +} + static struct cftype files[] = { { .name = "usage", @@ -9037,6 +9119,14 @@ static struct cftype files[] = { .name = "stat", .read_map = cpuacct_stats_show, }, + { + .name = "cpufreq", + .read_map = cpuacct_cpufreq_show, + }, + { + .name = "power", + .read_u64 = cpuacct_powerusage_read + }, }; static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp) @@ -9066,6 +9156,10 @@ static void cpuacct_charge(struct task_struct *tsk, u64 cputime) for (; ca; ca = ca->parent) { u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu); *cpuusage += cputime; + + /* Call back into platform code to account for CPU speeds */ + if (ca->cpufreq_fn && ca->cpufreq_fn->charge) + ca->cpufreq_fn->charge(ca->cpuacct_data, cputime, cpu); } rcu_read_unlock(); diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index db3f674ca49d..1bb879013173 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -802,6 +802,8 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) static void dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) { + u64 min_vruntime; + /* * Update run-time statistics of the 'current'. */ @@ -826,6 +828,8 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) if (se != cfs_rq->curr) __dequeue_entity(cfs_rq, se); account_entity_dequeue(cfs_rq, se); + + min_vruntime = cfs_rq->min_vruntime; update_min_vruntime(cfs_rq); /* @@ -834,7 +838,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) * movement in our normalized position. */ if (!(flags & DEQUEUE_SLEEP)) - se->vruntime -= cfs_rq->min_vruntime; + se->vruntime -= min_vruntime; } /* @@ -3827,10 +3831,21 @@ static void set_curr_task_fair(struct rq *rq) static void moved_group_fair(struct task_struct *p, int on_rq) { struct cfs_rq *cfs_rq = task_cfs_rq(p); + struct sched_entity *se = &p->se; update_curr(cfs_rq); if (!on_rq) - place_entity(cfs_rq, &p->se, 1); + se->vruntime += cfs_rq->min_vruntime; +} + +static void prep_move_group_fair(struct task_struct *p, int on_rq) +{ + struct cfs_rq *cfs_rq = task_cfs_rq(p); + struct sched_entity *se = &p->se; + + /* normalize the runtime of a sleeping task before moving it */ + if (!on_rq) + se->vruntime -= cfs_rq->min_vruntime; } #endif @@ -3883,6 +3898,7 @@ static const struct sched_class fair_sched_class = { #ifdef CONFIG_FAIR_GROUP_SCHED .moved_group = moved_group_fair, + .prep_move_group = prep_move_group_fair, #endif }; diff --git a/kernel/sysctl.c b/kernel/sysctl.c index f88552c6d227..aadaee9bcb18 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -94,6 +94,7 @@ extern char core_pattern[]; extern unsigned int core_pipe_limit; extern int pid_max; extern int min_free_kbytes; +extern int min_free_order_shift; extern int pid_max_min, pid_max_max; extern int sysctl_drop_caches; extern int percpu_pagelist_fraction; @@ -1157,6 +1158,13 @@ static struct ctl_table vm_table[] = { .extra1 = &zero, }, { + .procname = "min_free_order_shift", + .data = &min_free_order_shift, + .maxlen = sizeof(min_free_order_shift), + .mode = 0644, + .proc_handler = &proc_dointvec + }, + { .procname = "percpu_pagelist_fraction", .data = &percpu_pagelist_fraction, .maxlen = sizeof(percpu_pagelist_fraction), diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 1b4afd2e6ca0..3ebdd8809b18 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -604,8 +604,9 @@ config DEBUG_LOCKING_API_SELFTESTS mutexes and rwsems. config STACKTRACE - bool + bool "Stacktrace" depends on STACKTRACE_SUPPORT + default y config DEBUG_KOBJECT bool "kobject debugging" diff --git a/mm/Makefile b/mm/Makefile index 34b2546a9e37..a62e107d7e50 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -24,6 +24,7 @@ obj-$(CONFIG_HUGETLBFS) += hugetlb.o obj-$(CONFIG_NUMA) += mempolicy.o obj-$(CONFIG_SPARSEMEM) += sparse.o obj-$(CONFIG_SPARSEMEM_VMEMMAP) += sparse-vmemmap.o +obj-$(CONFIG_ASHMEM) += ashmem.o obj-$(CONFIG_SLOB) += slob.o obj-$(CONFIG_COMPACTION) += compaction.o obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o diff --git a/mm/ashmem.c b/mm/ashmem.c new file mode 100644 index 000000000000..f92eb34a1482 --- /dev/null +++ b/mm/ashmem.c @@ -0,0 +1,743 @@ +/* mm/ashmem.c +** +** Anonymous Shared Memory Subsystem, ashmem +** +** Copyright (C) 2008 Google, Inc. +** +** Robert Love <rlove@google.com> +** +** This software is licensed under the terms of the GNU General Public +** License version 2, as published by the Free Software Foundation, and +** may be copied, distributed, and modified under those terms. +** +** This program is distributed in the hope that it will be useful, +** but WITHOUT ANY WARRANTY; without even the implied warranty of +** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +** GNU General Public License for more details. +*/ + +#include <linux/module.h> +#include <linux/file.h> +#include <linux/fs.h> +#include <linux/miscdevice.h> +#include <linux/security.h> +#include <linux/mm.h> +#include <linux/mman.h> +#include <linux/uaccess.h> +#include <linux/personality.h> +#include <linux/bitops.h> +#include <linux/mutex.h> +#include <linux/shmem_fs.h> +#include <linux/ashmem.h> + +#define ASHMEM_NAME_PREFIX "dev/ashmem/" +#define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1) +#define ASHMEM_FULL_NAME_LEN (ASHMEM_NAME_LEN + ASHMEM_NAME_PREFIX_LEN) + +/* + * ashmem_area - anonymous shared memory area + * Lifecycle: From our parent file's open() until its release() + * Locking: Protected by `ashmem_mutex' + * Big Note: Mappings do NOT pin this structure; it dies on close() + */ +struct ashmem_area { + char name[ASHMEM_FULL_NAME_LEN];/* optional name for /proc/pid/maps */ + struct list_head unpinned_list; /* list of all ashmem areas */ + struct file *file; /* the shmem-based backing file */ + size_t size; /* size of the mapping, in bytes */ + unsigned long prot_mask; /* allowed prot bits, as vm_flags */ +}; + +/* + * ashmem_range - represents an interval of unpinned (evictable) pages + * Lifecycle: From unpin to pin + * Locking: Protected by `ashmem_mutex' + */ +struct ashmem_range { + struct list_head lru; /* entry in LRU list */ + struct list_head unpinned; /* entry in its area's unpinned list */ + struct ashmem_area *asma; /* associated area */ + size_t pgstart; /* starting page, inclusive */ + size_t pgend; /* ending page, inclusive */ + unsigned int purged; /* ASHMEM_NOT or ASHMEM_WAS_PURGED */ +}; + +/* LRU list of unpinned pages, protected by ashmem_mutex */ +static LIST_HEAD(ashmem_lru_list); + +/* Count of pages on our LRU list, protected by ashmem_mutex */ +static unsigned long lru_count; + +/* + * ashmem_mutex - protects the list of and each individual ashmem_area + * + * Lock Ordering: ashmex_mutex -> i_mutex -> i_alloc_sem + */ +static DEFINE_MUTEX(ashmem_mutex); + +static struct kmem_cache *ashmem_area_cachep __read_mostly; +static struct kmem_cache *ashmem_range_cachep __read_mostly; + +#define range_size(range) \ + ((range)->pgend - (range)->pgstart + 1) + +#define range_on_lru(range) \ + ((range)->purged == ASHMEM_NOT_PURGED) + +#define page_range_subsumes_range(range, start, end) \ + (((range)->pgstart >= (start)) && ((range)->pgend <= (end))) + +#define page_range_subsumed_by_range(range, start, end) \ + (((range)->pgstart <= (start)) && ((range)->pgend >= (end))) + +#define page_in_range(range, page) \ + (((range)->pgstart <= (page)) && ((range)->pgend >= (page))) + +#define page_range_in_range(range, start, end) \ + (page_in_range(range, start) || page_in_range(range, end) || \ + page_range_subsumes_range(range, start, end)) + +#define range_before_page(range, page) \ + ((range)->pgend < (page)) + +#define PROT_MASK (PROT_EXEC | PROT_READ | PROT_WRITE) + +static inline void lru_add(struct ashmem_range *range) +{ + list_add_tail(&range->lru, &ashmem_lru_list); + lru_count += range_size(range); +} + +static inline void lru_del(struct ashmem_range *range) +{ + list_del(&range->lru); + lru_count -= range_size(range); +} + +/* + * range_alloc - allocate and initialize a new ashmem_range structure + * + * 'asma' - associated ashmem_area + * 'prev_range' - the previous ashmem_range in the sorted asma->unpinned list + * 'purged' - initial purge value (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED) + * 'start' - starting page, inclusive + * 'end' - ending page, inclusive + * + * Caller must hold ashmem_mutex. + */ +static int range_alloc(struct ashmem_area *asma, + struct ashmem_range *prev_range, unsigned int purged, + size_t start, size_t end) +{ + struct ashmem_range *range; + + range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL); + if (unlikely(!range)) + return -ENOMEM; + + range->asma = asma; + range->pgstart = start; + range->pgend = end; + range->purged = purged; + + list_add_tail(&range->unpinned, &prev_range->unpinned); + + if (range_on_lru(range)) + lru_add(range); + + return 0; +} + +static void range_del(struct ashmem_range *range) +{ + list_del(&range->unpinned); + if (range_on_lru(range)) + lru_del(range); + kmem_cache_free(ashmem_range_cachep, range); +} + +/* + * range_shrink - shrinks a range + * + * Caller must hold ashmem_mutex. + */ +static inline void range_shrink(struct ashmem_range *range, + size_t start, size_t end) +{ + size_t pre = range_size(range); + + range->pgstart = start; + range->pgend = end; + + if (range_on_lru(range)) + lru_count -= pre - range_size(range); +} + +static int ashmem_open(struct inode *inode, struct file *file) +{ + struct ashmem_area *asma; + int ret; + + ret = generic_file_open(inode, file); + if (unlikely(ret)) + return ret; + + asma = kmem_cache_zalloc(ashmem_area_cachep, GFP_KERNEL); + if (unlikely(!asma)) + return -ENOMEM; + + INIT_LIST_HEAD(&asma->unpinned_list); + memcpy(asma->name, ASHMEM_NAME_PREFIX, ASHMEM_NAME_PREFIX_LEN); + asma->prot_mask = PROT_MASK; + file->private_data = asma; + + return 0; +} + +static int ashmem_release(struct inode *ignored, struct file *file) +{ + struct ashmem_area *asma = file->private_data; + struct ashmem_range *range, *next; + + mutex_lock(&ashmem_mutex); + list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) + range_del(range); + mutex_unlock(&ashmem_mutex); + + if (asma->file) + fput(asma->file); + kmem_cache_free(ashmem_area_cachep, asma); + + return 0; +} + +static ssize_t ashmem_read(struct file *file, char __user *buf, + size_t len, loff_t *pos) +{ + struct ashmem_area *asma = file->private_data; + int ret = 0; + + mutex_lock(&ashmem_mutex); + + /* If size is not set, or set to 0, always return EOF. */ + if (asma->size == 0) { + goto out; + } + + if (!asma->file) { + ret = -EBADF; + goto out; + } + + ret = asma->file->f_op->read(asma->file, buf, len, pos); + if (ret < 0) { + goto out; + } + + /** Update backing file pos, since f_ops->read() doesn't */ + asma->file->f_pos = *pos; + +out: + mutex_unlock(&ashmem_mutex); + return ret; +} + +static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin) +{ + struct ashmem_area *asma = file->private_data; + int ret; + + mutex_lock(&ashmem_mutex); + + if (asma->size == 0) { + ret = -EINVAL; + goto out; + } + + if (!asma->file) { + ret = -EBADF; + goto out; + } + + ret = asma->file->f_op->llseek(asma->file, offset, origin); + if (ret < 0) { + goto out; + } + + /** Copy f_pos from backing file, since f_ops->llseek() sets it */ + file->f_pos = asma->file->f_pos; + +out: + mutex_unlock(&ashmem_mutex); + return ret; +} + +static inline unsigned long +calc_vm_may_flags(unsigned long prot) +{ + return _calc_vm_trans(prot, PROT_READ, VM_MAYREAD ) | + _calc_vm_trans(prot, PROT_WRITE, VM_MAYWRITE) | + _calc_vm_trans(prot, PROT_EXEC, VM_MAYEXEC); +} + +static int ashmem_mmap(struct file *file, struct vm_area_struct *vma) +{ + struct ashmem_area *asma = file->private_data; + int ret = 0; + + mutex_lock(&ashmem_mutex); + + /* user needs to SET_SIZE before mapping */ + if (unlikely(!asma->size)) { + ret = -EINVAL; + goto out; + } + + /* requested protection bits must match our allowed protection mask */ + if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask)) & + calc_vm_prot_bits(PROT_MASK))) { + ret = -EPERM; + goto out; + } + vma->vm_flags &= ~calc_vm_may_flags(~asma->prot_mask); + + if (!asma->file) { + char *name = ASHMEM_NAME_DEF; + struct file *vmfile; + + if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') + name = asma->name; + + /* ... and allocate the backing shmem file */ + vmfile = shmem_file_setup(name, asma->size, vma->vm_flags); + if (unlikely(IS_ERR(vmfile))) { + ret = PTR_ERR(vmfile); + goto out; + } + asma->file = vmfile; + } + get_file(asma->file); + + if (vma->vm_flags & VM_SHARED) + shmem_set_file(vma, asma->file); + else { + if (vma->vm_file) + fput(vma->vm_file); + vma->vm_file = asma->file; + } + vma->vm_flags |= VM_CAN_NONLINEAR; + +out: + mutex_unlock(&ashmem_mutex); + return ret; +} + +/* + * ashmem_shrink - our cache shrinker, called from mm/vmscan.c :: shrink_slab + * + * 'nr_to_scan' is the number of objects (pages) to prune, or 0 to query how + * many objects (pages) we have in total. + * + * 'gfp_mask' is the mask of the allocation that got us into this mess. + * + * Return value is the number of objects (pages) remaining, or -1 if we cannot + * proceed without risk of deadlock (due to gfp_mask). + * + * We approximate LRU via least-recently-unpinned, jettisoning unpinned partial + * chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan' + * pages freed. + */ +static int ashmem_shrink(struct shrinker *s, int nr_to_scan, gfp_t gfp_mask) +{ + struct ashmem_range *range, *next; + + /* We might recurse into filesystem code, so bail out if necessary */ + if (nr_to_scan && !(gfp_mask & __GFP_FS)) + return -1; + if (!nr_to_scan) + return lru_count; + + mutex_lock(&ashmem_mutex); + list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) { + struct inode *inode = range->asma->file->f_dentry->d_inode; + loff_t start = range->pgstart * PAGE_SIZE; + loff_t end = (range->pgend + 1) * PAGE_SIZE - 1; + + vmtruncate_range(inode, start, end); + range->purged = ASHMEM_WAS_PURGED; + lru_del(range); + + nr_to_scan -= range_size(range); + if (nr_to_scan <= 0) + break; + } + mutex_unlock(&ashmem_mutex); + + return lru_count; +} + +static struct shrinker ashmem_shrinker = { + .shrink = ashmem_shrink, + .seeks = DEFAULT_SEEKS * 4, +}; + +static int set_prot_mask(struct ashmem_area *asma, unsigned long prot) +{ + int ret = 0; + + mutex_lock(&ashmem_mutex); + + /* the user can only remove, not add, protection bits */ + if (unlikely((asma->prot_mask & prot) != prot)) { + ret = -EINVAL; + goto out; + } + + /* does the application expect PROT_READ to imply PROT_EXEC? */ + if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) + prot |= PROT_EXEC; + + asma->prot_mask = prot; + +out: + mutex_unlock(&ashmem_mutex); + return ret; +} + +static int set_name(struct ashmem_area *asma, void __user *name) +{ + int ret = 0; + + mutex_lock(&ashmem_mutex); + + /* cannot change an existing mapping's name */ + if (unlikely(asma->file)) { + ret = -EINVAL; + goto out; + } + + if (unlikely(copy_from_user(asma->name + ASHMEM_NAME_PREFIX_LEN, + name, ASHMEM_NAME_LEN))) + ret = -EFAULT; + asma->name[ASHMEM_FULL_NAME_LEN-1] = '\0'; + +out: + mutex_unlock(&ashmem_mutex); + + return ret; +} + +static int get_name(struct ashmem_area *asma, void __user *name) +{ + int ret = 0; + + mutex_lock(&ashmem_mutex); + if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') { + size_t len; + + /* + * Copying only `len', instead of ASHMEM_NAME_LEN, bytes + * prevents us from revealing one user's stack to another. + */ + len = strlen(asma->name + ASHMEM_NAME_PREFIX_LEN) + 1; + if (unlikely(copy_to_user(name, + asma->name + ASHMEM_NAME_PREFIX_LEN, len))) + ret = -EFAULT; + } else { + if (unlikely(copy_to_user(name, ASHMEM_NAME_DEF, + sizeof(ASHMEM_NAME_DEF)))) + ret = -EFAULT; + } + mutex_unlock(&ashmem_mutex); + + return ret; +} + +/* + * ashmem_pin - pin the given ashmem region, returning whether it was + * previously purged (ASHMEM_WAS_PURGED) or not (ASHMEM_NOT_PURGED). + * + * Caller must hold ashmem_mutex. + */ +static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend) +{ + struct ashmem_range *range, *next; + int ret = ASHMEM_NOT_PURGED; + + list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) { + /* moved past last applicable page; we can short circuit */ + if (range_before_page(range, pgstart)) + break; + + /* + * The user can ask us to pin pages that span multiple ranges, + * or to pin pages that aren't even unpinned, so this is messy. + * + * Four cases: + * 1. The requested range subsumes an existing range, so we + * just remove the entire matching range. + * 2. The requested range overlaps the start of an existing + * range, so we just update that range. + * 3. The requested range overlaps the end of an existing + * range, so we just update that range. + * 4. The requested range punches a hole in an existing range, + * so we have to update one side of the range and then + * create a new range for the other side. + */ + if (page_range_in_range(range, pgstart, pgend)) { + ret |= range->purged; + + /* Case #1: Easy. Just nuke the whole thing. */ + if (page_range_subsumes_range(range, pgstart, pgend)) { + range_del(range); + continue; + } + + /* Case #2: We overlap from the start, so adjust it */ + if (range->pgstart >= pgstart) { + range_shrink(range, pgend + 1, range->pgend); + continue; + } + + /* Case #3: We overlap from the rear, so adjust it */ + if (range->pgend <= pgend) { + range_shrink(range, range->pgstart, pgstart-1); + continue; + } + + /* + * Case #4: We eat a chunk out of the middle. A bit + * more complicated, we allocate a new range for the + * second half and adjust the first chunk's endpoint. + */ + range_alloc(asma, range, range->purged, + pgend + 1, range->pgend); + range_shrink(range, range->pgstart, pgstart - 1); + break; + } + } + + return ret; +} + +/* + * ashmem_unpin - unpin the given range of pages. Returns zero on success. + * + * Caller must hold ashmem_mutex. + */ +static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend) +{ + struct ashmem_range *range, *next; + unsigned int purged = ASHMEM_NOT_PURGED; + +restart: + list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) { + /* short circuit: this is our insertion point */ + if (range_before_page(range, pgstart)) + break; + + /* + * The user can ask us to unpin pages that are already entirely + * or partially pinned. We handle those two cases here. + */ + if (page_range_subsumed_by_range(range, pgstart, pgend)) + return 0; + if (page_range_in_range(range, pgstart, pgend)) { + pgstart = min_t(size_t, range->pgstart, pgstart), + pgend = max_t(size_t, range->pgend, pgend); + purged |= range->purged; + range_del(range); + goto restart; + } + } + + return range_alloc(asma, range, purged, pgstart, pgend); +} + +/* + * ashmem_get_pin_status - Returns ASHMEM_IS_UNPINNED if _any_ pages in the + * given interval are unpinned and ASHMEM_IS_PINNED otherwise. + * + * Caller must hold ashmem_mutex. + */ +static int ashmem_get_pin_status(struct ashmem_area *asma, size_t pgstart, + size_t pgend) +{ + struct ashmem_range *range; + int ret = ASHMEM_IS_PINNED; + + list_for_each_entry(range, &asma->unpinned_list, unpinned) { + if (range_before_page(range, pgstart)) + break; + if (page_range_in_range(range, pgstart, pgend)) { + ret = ASHMEM_IS_UNPINNED; + break; + } + } + + return ret; +} + +static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd, + void __user *p) +{ + struct ashmem_pin pin; + size_t pgstart, pgend; + int ret = -EINVAL; + + if (unlikely(!asma->file)) + return -EINVAL; + + if (unlikely(copy_from_user(&pin, p, sizeof(pin)))) + return -EFAULT; + + /* per custom, you can pass zero for len to mean "everything onward" */ + if (!pin.len) + pin.len = PAGE_ALIGN(asma->size) - pin.offset; + + if (unlikely((pin.offset | pin.len) & ~PAGE_MASK)) + return -EINVAL; + + if (unlikely(((__u32) -1) - pin.offset < pin.len)) + return -EINVAL; + + if (unlikely(PAGE_ALIGN(asma->size) < pin.offset + pin.len)) + return -EINVAL; + + pgstart = pin.offset / PAGE_SIZE; + pgend = pgstart + (pin.len / PAGE_SIZE) - 1; + + mutex_lock(&ashmem_mutex); + + switch (cmd) { + case ASHMEM_PIN: + ret = ashmem_pin(asma, pgstart, pgend); + break; + case ASHMEM_UNPIN: + ret = ashmem_unpin(asma, pgstart, pgend); + break; + case ASHMEM_GET_PIN_STATUS: + ret = ashmem_get_pin_status(asma, pgstart, pgend); + break; + } + + mutex_unlock(&ashmem_mutex); + + return ret; +} + +static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct ashmem_area *asma = file->private_data; + long ret = -ENOTTY; + + switch (cmd) { + case ASHMEM_SET_NAME: + ret = set_name(asma, (void __user *) arg); + break; + case ASHMEM_GET_NAME: + ret = get_name(asma, (void __user *) arg); + break; + case ASHMEM_SET_SIZE: + ret = -EINVAL; + if (!asma->file) { + ret = 0; + asma->size = (size_t) arg; + } + break; + case ASHMEM_GET_SIZE: + ret = asma->size; + break; + case ASHMEM_SET_PROT_MASK: + ret = set_prot_mask(asma, arg); + break; + case ASHMEM_GET_PROT_MASK: + ret = asma->prot_mask; + break; + case ASHMEM_PIN: + case ASHMEM_UNPIN: + case ASHMEM_GET_PIN_STATUS: + ret = ashmem_pin_unpin(asma, cmd, (void __user *) arg); + break; + case ASHMEM_PURGE_ALL_CACHES: + ret = -EPERM; + if (capable(CAP_SYS_ADMIN)) { + ret = ashmem_shrink(&ashmem_shrinker, 0, GFP_KERNEL); + ashmem_shrink(&ashmem_shrinker, ret, GFP_KERNEL); + } + break; + } + + return ret; +} + +static struct file_operations ashmem_fops = { + .owner = THIS_MODULE, + .open = ashmem_open, + .release = ashmem_release, + .read = ashmem_read, + .llseek = ashmem_llseek, + .mmap = ashmem_mmap, + .unlocked_ioctl = ashmem_ioctl, + .compat_ioctl = ashmem_ioctl, +}; + +static struct miscdevice ashmem_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "ashmem", + .fops = &ashmem_fops, +}; + +static int __init ashmem_init(void) +{ + int ret; + + ashmem_area_cachep = kmem_cache_create("ashmem_area_cache", + sizeof(struct ashmem_area), + 0, 0, NULL); + if (unlikely(!ashmem_area_cachep)) { + printk(KERN_ERR "ashmem: failed to create slab cache\n"); + return -ENOMEM; + } + + ashmem_range_cachep = kmem_cache_create("ashmem_range_cache", + sizeof(struct ashmem_range), + 0, 0, NULL); + if (unlikely(!ashmem_range_cachep)) { + printk(KERN_ERR "ashmem: failed to create slab cache\n"); + return -ENOMEM; + } + + ret = misc_register(&ashmem_misc); + if (unlikely(ret)) { + printk(KERN_ERR "ashmem: failed to register misc device!\n"); + return ret; + } + + register_shrinker(&ashmem_shrinker); + + printk(KERN_INFO "ashmem: initialized\n"); + + return 0; +} + +static void __exit ashmem_exit(void) +{ + int ret; + + unregister_shrinker(&ashmem_shrinker); + + ret = misc_deregister(&ashmem_misc); + if (unlikely(ret)) + printk(KERN_ERR "ashmem: failed to unregister misc device!\n"); + + kmem_cache_destroy(ashmem_range_cachep); + kmem_cache_destroy(ashmem_area_cachep); + + printk(KERN_INFO "ashmem: unloaded\n"); +} + +module_init(ashmem_init); +module_exit(ashmem_exit); + +MODULE_LICENSE("GPL"); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index a8cfa9cc6e86..d0e56c880154 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -166,6 +166,7 @@ static char * const zone_names[MAX_NR_ZONES] = { }; int min_free_kbytes = 1024; +int min_free_order_shift = 1; static unsigned long __meminitdata nr_kernel_pages; static unsigned long __meminitdata nr_all_pages; @@ -1477,7 +1478,7 @@ int zone_watermark_ok(struct zone *z, int order, unsigned long mark, free_pages -= z->free_area[o].nr_free << o; /* Require fewer higher order pages to be free */ - min >>= 1; + min >>= min_free_order_shift; if (free_pages <= min) return 0; @@ -3159,6 +3160,20 @@ static inline unsigned long wait_table_bits(unsigned long size) #define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1)) /* + * Check if a pageblock contains reserved pages + */ +static int pageblock_is_reserved(unsigned long start_pfn) +{ + unsigned long end_pfn = start_pfn + pageblock_nr_pages; + unsigned long pfn; + + for (pfn = start_pfn; pfn < end_pfn; pfn++) + if (PageReserved(pfn_to_page(pfn))) + return 1; + return 0; +} + +/* * Mark a number of pageblocks as MIGRATE_RESERVE. The number * of blocks reserved is based on min_wmark_pages(zone). The memory within * the reserve will tend to store contiguous free pages. Setting min_free_kbytes @@ -3197,7 +3212,7 @@ static void setup_zone_migrate_reserve(struct zone *zone) continue; /* Blocks with reserved pages will never free, skip them. */ - if (PageReserved(page)) + if (pageblock_is_reserved(pfn)) continue; block_migratetype = get_pageblock_migratetype(page); diff --git a/mm/shmem.c b/mm/shmem.c index 080b09a57a8f..b331553ea523 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2766,6 +2766,14 @@ put_memory: } EXPORT_SYMBOL_GPL(shmem_file_setup); +void shmem_set_file(struct vm_area_struct *vma, struct file *file) +{ + if (vma->vm_file) + fput(vma->vm_file); + vma->vm_file = file; + vma->vm_ops = &shmem_vm_ops; +} + /** * shmem_zero_setup - setup a shared anonymous mapping * @vma: the vma to be mmapped is prepared by do_mmap_pgoff @@ -2778,10 +2786,6 @@ int shmem_zero_setup(struct vm_area_struct *vma) file = shmem_file_setup("dev/zero", size, vma->vm_flags); if (IS_ERR(file)) return PTR_ERR(file); - - if (vma->vm_file) - fput(vma->vm_file); - vma->vm_file = file; - vma->vm_ops = &shmem_vm_ops; + shmem_set_file(vma, file); return 0; } diff --git a/net/Kconfig b/net/Kconfig index e926884c1675..2094c144d05d 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -79,6 +79,20 @@ source "net/netlabel/Kconfig" endif # if INET +config ANDROID_PARANOID_NETWORK + bool "Only allow certain groups to create sockets" + default y + help + none + +config NET_ACTIVITY_STATS + bool "Network activity statistics tracking" + default y + help + Network activity statistics are useful for tracking wireless + modem activity on 2G, 3G, 4G wireless networks. Counts number of + transmissions and groups them in specified time buckets. + config NETWORK_SECMARK bool "Security Marking" help @@ -216,7 +230,7 @@ source "net/dcb/Kconfig" source "net/dns_resolver/Kconfig" config RPS - boolean + boolean "RPS" depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS default y diff --git a/net/Makefile b/net/Makefile index ea60fbce9b1b..edb30692a11e 100644 --- a/net/Makefile +++ b/net/Makefile @@ -68,3 +68,4 @@ obj-$(CONFIG_SYSCTL) += sysctl_net.o endif obj-$(CONFIG_WIMAX) += wimax/ obj-$(CONFIG_DNS_RESOLVER) += dns_resolver/ +obj-$(CONFIG_NET_ACTIVITY_STATS) += activity_stats.o diff --git a/net/activity_stats.c b/net/activity_stats.c new file mode 100644 index 000000000000..8a3e93470069 --- /dev/null +++ b/net/activity_stats.c @@ -0,0 +1,115 @@ +/* net/activity_stats.c + * + * Copyright (C) 2010 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Author: Mike Chan (mike@android.com) + */ + +#include <linux/proc_fs.h> +#include <linux/suspend.h> +#include <net/net_namespace.h> + +/* + * Track transmission rates in buckets (power of 2). + * 1,2,4,8...512 seconds. + * + * Buckets represent the count of network transmissions at least + * N seconds apart, where N is 1 << bucket index. + */ +#define BUCKET_MAX 10 + +/* Track network activity frequency */ +static unsigned long activity_stats[BUCKET_MAX]; +static ktime_t last_transmit; +static ktime_t suspend_time; +static DEFINE_SPINLOCK(activity_lock); + +void activity_stats_update(void) +{ + int i; + unsigned long flags; + ktime_t now; + s64 delta; + + spin_lock_irqsave(&activity_lock, flags); + now = ktime_get(); + delta = ktime_to_ns(ktime_sub(now, last_transmit)); + + for (i = BUCKET_MAX - 1; i >= 0; i--) { + /* + * Check if the time delta between network activity is within the + * minimum bucket range. + */ + if (delta < (1000000000ULL << i)) + continue; + + activity_stats[i]++; + last_transmit = now; + break; + } + spin_unlock_irqrestore(&activity_lock, flags); +} + +static int activity_stats_read_proc(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + int i; + int len; + char *p = page; + + /* Only print if offset is 0, or we have enough buffer space */ + if (off || count < (30 * BUCKET_MAX + 22)) + return -ENOMEM; + + len = snprintf(p, count, "Min Bucket(sec) Count\n"); + count -= len; + p += len; + + for (i = 0; i < BUCKET_MAX; i++) { + len = snprintf(p, count, "%15d %lu\n", 1 << i, activity_stats[i]); + count -= len; + p += len; + } + *eof = 1; + + return p - page; +} + +static int activity_stats_notifier(struct notifier_block *nb, + unsigned long event, void *dummy) +{ + switch (event) { + case PM_SUSPEND_PREPARE: + suspend_time = ktime_get_real(); + break; + + case PM_POST_SUSPEND: + suspend_time = ktime_sub(ktime_get_real(), suspend_time); + last_transmit = ktime_sub(last_transmit, suspend_time); + } + + return 0; +} + +static struct notifier_block activity_stats_notifier_block = { + .notifier_call = activity_stats_notifier, +}; + +static int __init activity_stats_init(void) +{ + create_proc_read_entry("activity", S_IRUGO, + init_net.proc_net_stat, activity_stats_read_proc, NULL); + return register_pm_notifier(&activity_stats_notifier_block); +} + +subsys_initcall(activity_stats_init); + diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c index 421c45bd1b95..9c60673842c1 100644 --- a/net/bluetooth/af_bluetooth.c +++ b/net/bluetooth/af_bluetooth.c @@ -40,6 +40,15 @@ #include <net/bluetooth/bluetooth.h> +#ifdef CONFIG_ANDROID_PARANOID_NETWORK +#include <linux/android_aid.h> +#endif + +#ifndef CONFIG_BT_SOCK_DEBUG +#undef BT_DBG +#define BT_DBG(D...) +#endif + #define VERSION "2.15" /* Bluetooth sockets */ @@ -125,11 +134,40 @@ int bt_sock_unregister(int proto) } EXPORT_SYMBOL(bt_sock_unregister); +#ifdef CONFIG_ANDROID_PARANOID_NETWORK +static inline int current_has_bt_admin(void) +{ + return (!current_euid() || in_egroup_p(AID_NET_BT_ADMIN)); +} + +static inline int current_has_bt(void) +{ + return (current_has_bt_admin() || in_egroup_p(AID_NET_BT)); +} +# else +static inline int current_has_bt_admin(void) +{ + return 1; +} + +static inline int current_has_bt(void) +{ + return 1; +} +#endif + static int bt_sock_create(struct net *net, struct socket *sock, int proto, int kern) { int err; + if (proto == BTPROTO_RFCOMM || proto == BTPROTO_SCO || + proto == BTPROTO_L2CAP) { + if (!current_has_bt()) + return -EPERM; + } else if (!current_has_bt_admin()) + return -EPERM; + if (net != &init_net) return -EAFNOSUPPORT; diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 0b1e460fe440..1c8c30d52b53 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -217,7 +217,8 @@ static void hci_conn_idle(unsigned long arg) hci_conn_enter_sniff_mode(conn); } -struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst) +struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, + __u16 pkt_type, bdaddr_t *dst) { struct hci_conn *conn; @@ -242,14 +243,22 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst) conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK; break; case SCO_LINK: - if (lmp_esco_capable(hdev)) - conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) | - (hdev->esco_type & EDR_ESCO_MASK); - else - conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK; - break; + if (!pkt_type) + pkt_type = SCO_ESCO_MASK; case ESCO_LINK: - conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK; + if (!pkt_type) + pkt_type = ALL_ESCO_MASK; + if (lmp_esco_capable(hdev)) { + /* HCI Setup Synchronous Connection Command uses + reverse logic on the EDR_ESCO_MASK bits */ + conn->pkt_type = (pkt_type ^ EDR_ESCO_MASK) & + hdev->esco_type; + } else { + /* Legacy HCI Add Sco Connection Command uses a + shifted bitmask */ + conn->pkt_type = (pkt_type << 5) & hdev->pkt_type & + SCO_PTYPE_MASK; + } break; } @@ -361,7 +370,9 @@ EXPORT_SYMBOL(hci_get_route); /* Create SCO or ACL connection. * Device _must_ be locked */ -struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type) +struct hci_conn *hci_connect(struct hci_dev *hdev, int type, + __u16 pkt_type, bdaddr_t *dst, + __u8 sec_level, __u8 auth_type) { struct hci_conn *acl; struct hci_conn *sco; @@ -369,7 +380,7 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 BT_DBG("%s dst %s", hdev->name, batostr(dst)); if (!(acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst))) { - if (!(acl = hci_conn_add(hdev, ACL_LINK, dst))) + if (!(acl = hci_conn_add(hdev, ACL_LINK, 0, dst))) return NULL; } @@ -390,7 +401,7 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 return acl; if (!(sco = hci_conn_hash_lookup_ba(hdev, type, dst))) { - if (!(sco = hci_conn_add(hdev, type, dst))) { + if (!(sco = hci_conn_add(hdev, type, pkt_type, dst))) { hci_conn_put(acl); return NULL; } @@ -529,7 +540,7 @@ void hci_conn_enter_active_mode(struct hci_conn *conn) if (test_bit(HCI_RAW, &hdev->flags)) return; - if (conn->mode != HCI_CM_SNIFF || !conn->power_save) + if (conn->mode != HCI_CM_SNIFF /* || !conn->power_save */) goto timer; if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) { @@ -668,6 +679,15 @@ int hci_get_conn_list(void __user *arg) (ci + n)->out = c->out; (ci + n)->state = c->state; (ci + n)->link_mode = c->link_mode; + if (c->type == SCO_LINK) { + (ci + n)->mtu = hdev->sco_mtu; + (ci + n)->cnt = hdev->sco_cnt; + (ci + n)->pkts = hdev->sco_pkts; + } else { + (ci + n)->mtu = hdev->acl_mtu; + (ci + n)->cnt = hdev->acl_cnt; + (ci + n)->pkts = hdev->acl_pkts; + } if (++n >= req.conn_num) break; } @@ -704,6 +724,15 @@ int hci_get_conn_info(struct hci_dev *hdev, void __user *arg) ci.out = conn->out; ci.state = conn->state; ci.link_mode = conn->link_mode; + if (req.type == SCO_LINK) { + ci.mtu = hdev->sco_mtu; + ci.cnt = hdev->sco_cnt; + ci.pkts = hdev->sco_pkts; + } else { + ci.mtu = hdev->acl_mtu; + ci.cnt = hdev->acl_cnt; + ci.pkts = hdev->acl_pkts; + } } hci_dev_unlock_bh(hdev); diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index c52f091ee6de..a49aa2bf97f3 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -1367,7 +1367,7 @@ void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags) skb->dev = (void *) hdev; bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT; - hci_add_acl_hdr(skb, conn->handle, flags | ACL_START); + hci_add_acl_hdr(skb, conn->handle, flags); if (!(list = skb_shinfo(skb)->frag_list)) { /* Non fragmented */ @@ -1384,12 +1384,14 @@ void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags) spin_lock_bh(&conn->data_q.lock); __skb_queue_tail(&conn->data_q, skb); + flags &= ~ACL_PB_MASK; + flags |= ACL_CONT; do { skb = list; list = list->next; skb->dev = (void *) hdev; bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT; - hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT); + hci_add_acl_hdr(skb, conn->handle, flags); BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index bfef5bae0b3a..bbb444175c81 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -579,7 +579,7 @@ static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) } } else { if (!conn) { - conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr); + conn = hci_conn_add(hdev, ACL_LINK, 0, &cp->bdaddr); if (conn) { conn->out = 1; conn->link_mode |= HCI_LM_MASTER; @@ -960,7 +960,9 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); if (!conn) { - if (!(conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr))) { + /* pkt_type not yet used for incoming connections */ + if (!(conn = hci_conn_add(hdev, ev->link_type, 0, + &ev->bdaddr))) { BT_ERR("No memory for new connection"); hci_dev_unlock(hdev); return; @@ -1699,6 +1701,7 @@ static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_bu hci_conn_add_sysfs(conn); break; + case 0x10: /* Connection Accept Timeout */ case 0x11: /* Unsupported Feature or Parameter Value */ case 0x1c: /* SCO interval rejected */ case 0x1a: /* Unsupported Remote Feature */ diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c index fadf26b4ed7c..72337ad07eca 100644 --- a/net/bluetooth/l2cap.c +++ b/net/bluetooth/l2cap.c @@ -354,13 +354,19 @@ static inline u8 l2cap_get_ident(struct l2cap_conn *conn) static inline void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data) { struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data); + u8 flags; BT_DBG("code 0x%2.2x", code); if (!skb) return; - hci_send_acl(conn->hcon, skb, 0); + if (lmp_no_flush_capable(conn->hcon->hdev)) + flags = ACL_START_NO_FLUSH; + else + flags = ACL_START; + + hci_send_acl(conn->hcon, skb, flags); } static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control) @@ -564,7 +570,8 @@ static void l2cap_conn_start(struct l2cap_conn *conn) struct sock *parent = bt_sk(sk)->parent; rsp.result = cpu_to_le16(L2CAP_CR_PEND); rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND); - parent->sk_data_ready(parent, 0); + if (parent) + parent->sk_data_ready(parent, 0); } else { sk->sk_state = BT_CONFIG; @@ -900,6 +907,7 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent) pi->sec_level = l2cap_pi(parent)->sec_level; pi->role_switch = l2cap_pi(parent)->role_switch; pi->force_reliable = l2cap_pi(parent)->force_reliable; + pi->flushable = l2cap_pi(parent)->flushable; } else { pi->imtu = L2CAP_DEFAULT_MTU; pi->omtu = 0; @@ -915,6 +923,7 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent) pi->sec_level = BT_SECURITY_LOW; pi->role_switch = 0; pi->force_reliable = 0; + pi->flushable = 0; } /* Default config options */ @@ -1092,7 +1101,7 @@ static int l2cap_do_connect(struct sock *sk) } } - hcon = hci_connect(hdev, ACL_LINK, dst, + hcon = hci_connect(hdev, ACL_LINK, 0, dst, l2cap_pi(sk)->sec_level, auth_type); if (!hcon) goto done; @@ -1433,10 +1442,17 @@ static void l2cap_drop_acked_frames(struct sock *sk) static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb) { struct l2cap_pinfo *pi = l2cap_pi(sk); + struct hci_conn *hcon = pi->conn->hcon; + u16 flags; BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len); - hci_send_acl(pi->conn->hcon, skb, 0); + if (lmp_no_flush_capable(hcon->hdev) && !l2cap_pi(sk)->flushable) + flags = ACL_START_NO_FLUSH; + else + flags = ACL_START; + + hci_send_acl(hcon, skb, flags); } static void l2cap_streaming_send(struct sock *sk) @@ -2016,6 +2032,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER); l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE); + l2cap_pi(sk)->flushable = (opt & L2CAP_LM_FLUSHABLE); break; default: @@ -2146,6 +2163,9 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __us if (l2cap_pi(sk)->force_reliable) opt |= L2CAP_LM_RELIABLE; + if (l2cap_pi(sk)->flushable) + opt |= L2CAP_LM_FLUSHABLE; + if (put_user(opt, (u32 __user *) optval)) err = -EFAULT; break; @@ -4645,7 +4665,7 @@ static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 fl BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags); - if (flags & ACL_START) { + if (!(flags & ACL_CONT)) { struct l2cap_hdr *hdr; int len; diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c index 7dca91bb8c57..ecda6d52d4d4 100644 --- a/net/bluetooth/rfcomm/core.c +++ b/net/bluetooth/rfcomm/core.c @@ -248,32 +248,6 @@ static inline int rfcomm_check_security(struct rfcomm_dlc *d) auth_type); } -static void rfcomm_session_timeout(unsigned long arg) -{ - struct rfcomm_session *s = (void *) arg; - - BT_DBG("session %p state %ld", s, s->state); - - set_bit(RFCOMM_TIMED_OUT, &s->flags); - rfcomm_schedule(RFCOMM_SCHED_TIMEO); -} - -static void rfcomm_session_set_timer(struct rfcomm_session *s, long timeout) -{ - BT_DBG("session %p state %ld timeout %ld", s, s->state, timeout); - - if (!mod_timer(&s->timer, jiffies + timeout)) - rfcomm_session_hold(s); -} - -static void rfcomm_session_clear_timer(struct rfcomm_session *s) -{ - BT_DBG("session %p state %ld", s, s->state); - - if (timer_pending(&s->timer) && del_timer(&s->timer)) - rfcomm_session_put(s); -} - /* ---- RFCOMM DLCs ---- */ static void rfcomm_dlc_timeout(unsigned long arg) { @@ -350,7 +324,6 @@ static void rfcomm_dlc_link(struct rfcomm_session *s, struct rfcomm_dlc *d) rfcomm_session_hold(s); - rfcomm_session_clear_timer(s); rfcomm_dlc_hold(d); list_add(&d->list, &s->dlcs); d->session = s; @@ -366,9 +339,6 @@ static void rfcomm_dlc_unlink(struct rfcomm_dlc *d) d->session = NULL; rfcomm_dlc_put(d); - if (list_empty(&s->dlcs)) - rfcomm_session_set_timer(s, RFCOMM_IDLE_TIMEOUT); - rfcomm_session_put(s); } @@ -462,7 +432,6 @@ static int __rfcomm_dlc_close(struct rfcomm_dlc *d, int err) switch (d->state) { case BT_CONNECT: - case BT_CONFIG: if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) { set_bit(RFCOMM_AUTH_REJECT, &d->flags); rfcomm_schedule(RFCOMM_SCHED_AUTH); @@ -482,7 +451,6 @@ static int __rfcomm_dlc_close(struct rfcomm_dlc *d, int err) break; case BT_OPEN: - case BT_CONNECT2: if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) { set_bit(RFCOMM_AUTH_REJECT, &d->flags); rfcomm_schedule(RFCOMM_SCHED_AUTH); @@ -601,8 +569,6 @@ static struct rfcomm_session *rfcomm_session_add(struct socket *sock, int state) BT_DBG("session %p sock %p", s, sock); - setup_timer(&s->timer, rfcomm_session_timeout, (unsigned long) s); - INIT_LIST_HEAD(&s->dlcs); s->state = state; s->sock = sock; @@ -634,7 +600,6 @@ static void rfcomm_session_del(struct rfcomm_session *s) if (state == BT_CONNECTED) rfcomm_send_disc(s, 0); - rfcomm_session_clear_timer(s); sock_release(s->sock); kfree(s); @@ -676,7 +641,6 @@ static void rfcomm_session_close(struct rfcomm_session *s, int err) __rfcomm_dlc_close(d, err); } - rfcomm_session_clear_timer(s); rfcomm_session_put(s); } @@ -1238,8 +1202,6 @@ void rfcomm_dlc_accept(struct rfcomm_dlc *d) rfcomm_send_ua(d->session, d->dlci); - rfcomm_dlc_clear_timer(d); - rfcomm_dlc_lock(d); d->state = BT_CONNECTED; d->state_change(d, 0); @@ -1257,11 +1219,6 @@ static void rfcomm_check_accept(struct rfcomm_dlc *d) if (d->defer_setup) { set_bit(RFCOMM_DEFER_SETUP, &d->flags); rfcomm_dlc_set_timer(d, RFCOMM_AUTH_TIMEOUT); - - rfcomm_dlc_lock(d); - d->state = BT_CONNECT2; - d->state_change(d, 0); - rfcomm_dlc_unlock(d); } else rfcomm_dlc_accept(d); } else { @@ -1803,11 +1760,6 @@ static inline void rfcomm_process_dlcs(struct rfcomm_session *s) if (d->defer_setup) { set_bit(RFCOMM_DEFER_SETUP, &d->flags); rfcomm_dlc_set_timer(d, RFCOMM_AUTH_TIMEOUT); - - rfcomm_dlc_lock(d); - d->state = BT_CONNECT2; - d->state_change(d, 0); - rfcomm_dlc_unlock(d); } else rfcomm_dlc_accept(d); } @@ -1923,13 +1875,6 @@ static inline void rfcomm_process_sessions(void) struct rfcomm_session *s; s = list_entry(p, struct rfcomm_session, list); - if (test_and_clear_bit(RFCOMM_TIMED_OUT, &s->flags)) { - s->state = BT_DISCONN; - rfcomm_send_disc(s, 0); - rfcomm_session_put(s); - continue; - } - if (s->state == BT_LISTEN) { rfcomm_accept_connection(s); continue; diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index d0927d1fdada..cb21a785f9de 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -178,6 +178,7 @@ static int sco_connect(struct sock *sk) { bdaddr_t *src = &bt_sk(sk)->src; bdaddr_t *dst = &bt_sk(sk)->dst; + __u16 pkt_type = sco_pi(sk)->pkt_type; struct sco_conn *conn; struct hci_conn *hcon; struct hci_dev *hdev; @@ -194,10 +195,13 @@ static int sco_connect(struct sock *sk) if (lmp_esco_capable(hdev) && !disable_esco) type = ESCO_LINK; - else + else { type = SCO_LINK; + pkt_type &= SCO_ESCO_MASK; + } - hcon = hci_connect(hdev, type, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING); + hcon = hci_connect(hdev, type, pkt_type, dst, + BT_SECURITY_LOW, HCI_AT_NO_BONDING); if (!hcon) goto done; @@ -450,18 +454,22 @@ static int sco_sock_create(struct net *net, struct socket *sock, int protocol, return 0; } -static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len) +static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int alen) { - struct sockaddr_sco *sa = (struct sockaddr_sco *) addr; + struct sockaddr_sco sa; struct sock *sk = sock->sk; - bdaddr_t *src = &sa->sco_bdaddr; - int err = 0; + bdaddr_t *src = &sa.sco_bdaddr; + int len, err = 0; - BT_DBG("sk %p %s", sk, batostr(&sa->sco_bdaddr)); + BT_DBG("sk %p %s", sk, batostr(&sa.sco_bdaddr)); if (!addr || addr->sa_family != AF_BLUETOOTH) return -EINVAL; + memset(&sa, 0, sizeof(sa)); + len = min_t(unsigned int, sizeof(sa), alen); + memcpy(&sa, addr, len); + lock_sock(sk); if (sk->sk_state != BT_OPEN) { @@ -475,7 +483,8 @@ static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_le err = -EADDRINUSE; } else { /* Save source address */ - bacpy(&bt_sk(sk)->src, &sa->sco_bdaddr); + bacpy(&bt_sk(sk)->src, &sa.sco_bdaddr); + sco_pi(sk)->pkt_type = sa.sco_pkt_type; sk->sk_state = BT_BOUND; } @@ -488,27 +497,34 @@ done: static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags) { - struct sockaddr_sco *sa = (struct sockaddr_sco *) addr; struct sock *sk = sock->sk; - int err = 0; - + struct sockaddr_sco sa; + int len, err = 0; BT_DBG("sk %p", sk); - if (alen < sizeof(struct sockaddr_sco) || - addr->sa_family != AF_BLUETOOTH) + if (!addr || addr->sa_family != AF_BLUETOOTH) return -EINVAL; - if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) - return -EBADFD; - - if (sk->sk_type != SOCK_SEQPACKET) - return -EINVAL; + memset(&sa, 0, sizeof(sa)); + len = min_t(unsigned int, sizeof(sa), alen); + memcpy(&sa, addr, len); lock_sock(sk); + if (sk->sk_type != SOCK_SEQPACKET) { + err = -EINVAL; + goto done; + } + + if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) { + err = -EBADFD; + goto done; + } + /* Set destination address and psm */ - bacpy(&bt_sk(sk)->dst, &sa->sco_bdaddr); + bacpy(&bt_sk(sk)->dst, &sa.sco_bdaddr); + sco_pi(sk)->pkt_type = sa.sco_pkt_type; if ((err = sco_connect(sk))) goto done; @@ -614,6 +630,7 @@ static int sco_sock_getname(struct socket *sock, struct sockaddr *addr, int *len bacpy(&sa->sco_bdaddr, &bt_sk(sk)->dst); else bacpy(&sa->sco_bdaddr, &bt_sk(sk)->src); + sa->sco_pkt_type = sco_pi(sk)->pkt_type; return 0; } diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile index 80ff87ce43aa..6a13dce23c4c 100644 --- a/net/ipv4/Makefile +++ b/net/ipv4/Makefile @@ -14,6 +14,7 @@ obj-y := route.o inetpeer.o protocol.o \ inet_fragment.o obj-$(CONFIG_SYSCTL) += sysctl_net_ipv4.o +obj-$(CONFIG_SYSFS) += sysfs_net_ipv4.o obj-$(CONFIG_IP_FIB_HASH) += fib_hash.o obj-$(CONFIG_IP_FIB_TRIE) += fib_trie.o obj-$(CONFIG_PROC_FS) += proc.o diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 6a1100c25a9f..cefdfddc898a 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -117,6 +117,19 @@ #include <linux/mroute.h> #endif +#ifdef CONFIG_ANDROID_PARANOID_NETWORK +#include <linux/android_aid.h> + +static inline int current_has_network(void) +{ + return in_egroup_p(AID_INET) || capable(CAP_NET_RAW); +} +#else +static inline int current_has_network(void) +{ + return 1; +} +#endif /* The inetsw table contains everything that inet_create needs to * build a new socket. @@ -259,6 +272,7 @@ static inline int inet_netns_ok(struct net *net, int protocol) return ipprot->netns_ok; } + /* * Create an inet socket. */ @@ -275,6 +289,9 @@ static int inet_create(struct net *net, struct socket *sock, int protocol, int try_loading_module = 0; int err; + if (!current_has_network()) + return -EACCES; + if (unlikely(!inet_ehash_secret)) if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM) build_ehash_secret(); @@ -869,6 +886,7 @@ int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) case SIOCSIFPFLAGS: case SIOCGIFPFLAGS: case SIOCSIFFLAGS: + case SIOCKILLADDR: err = devinet_ioctl(net, cmd, (void __user *)arg); break; default: diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index da14c49284f4..8b7dfaf3b6ea 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -58,6 +58,7 @@ #include <net/arp.h> #include <net/ip.h> +#include <net/tcp.h> #include <net/route.h> #include <net/ip_fib.h> #include <net/rtnetlink.h> @@ -635,6 +636,7 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg) case SIOCSIFBRDADDR: /* Set the broadcast address */ case SIOCSIFDSTADDR: /* Set the destination address */ case SIOCSIFNETMASK: /* Set the netmask for the interface */ + case SIOCKILLADDR: /* Nuke all sockets on this address */ ret = -EACCES; if (!capable(CAP_NET_ADMIN)) goto out; @@ -686,7 +688,8 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg) } ret = -EADDRNOTAVAIL; - if (!ifa && cmd != SIOCSIFADDR && cmd != SIOCSIFFLAGS) + if (!ifa && cmd != SIOCSIFADDR && cmd != SIOCSIFFLAGS + && cmd != SIOCKILLADDR) goto done; switch (cmd) { @@ -811,6 +814,10 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg) inet_insert_ifa(ifa); } break; + case SIOCKILLADDR: /* Nuke all connections on this address */ + ret = 0; + tcp_v4_nuke_addr(sin->sin_addr.s_addr); + break; } done: rtnl_unlock(); diff --git a/net/ipv4/sysfs_net_ipv4.c b/net/ipv4/sysfs_net_ipv4.c new file mode 100644 index 000000000000..0cbbf10026a6 --- /dev/null +++ b/net/ipv4/sysfs_net_ipv4.c @@ -0,0 +1,88 @@ +/* + * net/ipv4/sysfs_net_ipv4.c + * + * sysfs-based networking knobs (so we can, unlike with sysctl, control perms) + * + * Copyright (C) 2008 Google, Inc. + * + * Robert Love <rlove@google.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/kobject.h> +#include <linux/string.h> +#include <linux/sysfs.h> +#include <linux/init.h> +#include <net/tcp.h> + +#define CREATE_IPV4_FILE(_name, _var) \ +static ssize_t _name##_show(struct kobject *kobj, \ + struct kobj_attribute *attr, char *buf) \ +{ \ + return sprintf(buf, "%d\n", _var); \ +} \ +static ssize_t _name##_store(struct kobject *kobj, \ + struct kobj_attribute *attr, \ + const char *buf, size_t count) \ +{ \ + int val, ret; \ + ret = sscanf(buf, "%d", &val); \ + if (ret != 1) \ + return -EINVAL; \ + if (val < 0) \ + return -EINVAL; \ + _var = val; \ + return count; \ +} \ +static struct kobj_attribute _name##_attr = \ + __ATTR(_name, 0644, _name##_show, _name##_store) + +CREATE_IPV4_FILE(tcp_wmem_min, sysctl_tcp_wmem[0]); +CREATE_IPV4_FILE(tcp_wmem_def, sysctl_tcp_wmem[1]); +CREATE_IPV4_FILE(tcp_wmem_max, sysctl_tcp_wmem[2]); + +CREATE_IPV4_FILE(tcp_rmem_min, sysctl_tcp_rmem[0]); +CREATE_IPV4_FILE(tcp_rmem_def, sysctl_tcp_rmem[1]); +CREATE_IPV4_FILE(tcp_rmem_max, sysctl_tcp_rmem[2]); + +static struct attribute *ipv4_attrs[] = { + &tcp_wmem_min_attr.attr, + &tcp_wmem_def_attr.attr, + &tcp_wmem_max_attr.attr, + &tcp_rmem_min_attr.attr, + &tcp_rmem_def_attr.attr, + &tcp_rmem_max_attr.attr, + NULL +}; + +static struct attribute_group ipv4_attr_group = { + .attrs = ipv4_attrs, +}; + +static __init int sysfs_ipv4_init(void) +{ + struct kobject *ipv4_kobject; + int ret; + + ipv4_kobject = kobject_create_and_add("ipv4", kernel_kobj); + if (!ipv4_kobject) + return -ENOMEM; + + ret = sysfs_create_group(ipv4_kobject, &ipv4_attr_group); + if (ret) { + kobject_put(ipv4_kobject); + return ret; + } + + return 0; +} + +subsys_initcall(sysfs_ipv4_init); diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index f115ea68a4ef..501921e7506a 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -266,6 +266,7 @@ #include <linux/crypto.h> #include <linux/time.h> #include <linux/slab.h> +#include <linux/uid_stat.h> #include <net/icmp.h> #include <net/tcp.h> @@ -1106,6 +1107,9 @@ out: tcp_push(sk, flags, mss_now, tp->nonagle); TCP_CHECK_TIMER(sk); release_sock(sk); + + if (copied > 0) + uid_stat_tcp_snd(current_uid(), copied); return copied; do_fault: @@ -1383,8 +1387,11 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, tcp_rcv_space_adjust(sk); /* Clean up data we have read: This will do ACK frames. */ - if (copied > 0) + if (copied > 0) { tcp_cleanup_rbuf(sk, copied); + uid_stat_tcp_rcv(current_uid(), copied); + } + return copied; } EXPORT_SYMBOL(tcp_read_sock); @@ -1771,6 +1778,9 @@ skip_copy: TCP_CHECK_TIMER(sk); release_sock(sk); + + if (copied > 0) + uid_stat_tcp_rcv(current_uid(), copied); return copied; out: @@ -1780,6 +1790,8 @@ out: recv_urg: err = tcp_recv_urg(sk, msg, len, flags); + if (err > 0) + uid_stat_tcp_rcv(current_uid(), err); goto out; } EXPORT_SYMBOL(tcp_recvmsg); diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 020766292bb0..c3306fe20f8f 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1969,6 +1969,49 @@ void tcp_v4_destroy_sock(struct sock *sk) } EXPORT_SYMBOL(tcp_v4_destroy_sock); +/* + * tcp_v4_nuke_addr - destroy all sockets on the given local address + */ +void tcp_v4_nuke_addr(__u32 saddr) +{ + unsigned int bucket; + + for (bucket = 0; bucket < tcp_hashinfo.ehash_mask; bucket++) { + struct hlist_nulls_node *node; + struct sock *sk; + spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, bucket); + +restart: + spin_lock_bh(lock); + sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[bucket].chain) { + struct inet_sock *inet = inet_sk(sk); + + if (inet->inet_rcv_saddr != saddr) + continue; + if (sysctl_ip_dynaddr && sk->sk_state == TCP_SYN_SENT) + continue; + if (sock_flag(sk, SOCK_DEAD)) + continue; + + sock_hold(sk); + spin_unlock_bh(lock); + + local_bh_disable(); + bh_lock_sock(sk); + sk->sk_err = ETIMEDOUT; + sk->sk_error_report(sk); + + tcp_done(sk); + bh_unlock_sock(sk); + local_bh_enable(); + sock_put(sk); + + goto restart; + } + spin_unlock_bh(lock); + } +} + #ifdef CONFIG_PROC_FS /* Proc filesystem TCP sock list dumping. */ diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 56b9bf2516f4..2f421a4beca7 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -63,6 +63,20 @@ #include <asm/system.h> #include <linux/mroute6.h> +#ifdef CONFIG_ANDROID_PARANOID_NETWORK +#include <linux/android_aid.h> + +static inline int current_has_network(void) +{ + return in_egroup_p(AID_INET) || capable(CAP_NET_RAW); +} +#else +static inline int current_has_network(void) +{ + return 1; +} +#endif + MODULE_AUTHOR("Cast of dozens"); MODULE_DESCRIPTION("IPv6 protocol stack for Linux"); MODULE_LICENSE("GPL"); @@ -109,6 +123,9 @@ static int inet6_create(struct net *net, struct socket *sock, int protocol, int try_loading_module = 0; int err; + if (!current_has_network()) + return -EACCES; + if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM && !inet_ehash_secret) diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c index 1ae697681bc7..4caca4af1e76 100644 --- a/net/l2tp/l2tp_eth.c +++ b/net/l2tp/l2tp_eth.c @@ -284,7 +284,7 @@ static __net_init int l2tp_eth_init_net(struct net *net) return 0; } -static __net_initdata struct pernet_operations l2tp_eth_net_ops = { +static struct pernet_operations l2tp_eth_net_ops = { .init = l2tp_eth_init_net, .id = &l2tp_eth_net_id, .size = sizeof(struct l2tp_eth_net), diff --git a/net/rfkill/Kconfig b/net/rfkill/Kconfig index eaf765876458..51f4baeede85 100644 --- a/net/rfkill/Kconfig +++ b/net/rfkill/Kconfig @@ -10,6 +10,11 @@ menuconfig RFKILL To compile this driver as a module, choose M here: the module will be called rfkill. +config RFKILL_PM + bool "Power off on suspend" + depends on RFKILL && PM + default y + # LED trigger support config RFKILL_LEDS bool diff --git a/net/rfkill/core.c b/net/rfkill/core.c index 51875a0c5d48..56b50e7db72d 100644 --- a/net/rfkill/core.c +++ b/net/rfkill/core.c @@ -783,6 +783,7 @@ void rfkill_pause_polling(struct rfkill *rfkill) } EXPORT_SYMBOL(rfkill_pause_polling); +#ifdef CONFIG_RFKILL_PM void rfkill_resume_polling(struct rfkill *rfkill) { BUG_ON(!rfkill); @@ -817,14 +818,17 @@ static int rfkill_resume(struct device *dev) return 0; } +#endif static struct class rfkill_class = { .name = "rfkill", .dev_release = rfkill_release, .dev_attrs = rfkill_dev_attrs, .dev_uevent = rfkill_dev_uevent, +#ifdef CONFIG_RFKILL_PM .suspend = rfkill_suspend, .resume = rfkill_resume, +#endif }; bool rfkill_blocked(struct rfkill *rfkill) diff --git a/net/socket.c b/net/socket.c index 2270b941bcc7..af60b48773ba 100644 --- a/net/socket.c +++ b/net/socket.c @@ -564,7 +564,8 @@ static inline int __sock_sendmsg(struct kiocb *iocb, struct socket *sock, if (err) return err; - return sock->ops->sendmsg(iocb, sock, msg, size); + err = sock->ops->sendmsg(iocb, sock, msg, size); + return err; } int sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) @@ -680,6 +681,7 @@ EXPORT_SYMBOL_GPL(__sock_recv_ts_and_drops); static inline int __sock_recvmsg_nosec(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { + int err; struct sock_iocb *si = kiocb_to_siocb(iocb); sock_update_classid(sock->sk); @@ -690,7 +692,8 @@ static inline int __sock_recvmsg_nosec(struct kiocb *iocb, struct socket *sock, si->size = size; si->flags = flags; - return sock->ops->recvmsg(iocb, sock, msg, size, flags); + err = sock->ops->recvmsg(iocb, sock, msg, size, flags); + return err; } static inline int __sock_recvmsg(struct kiocb *iocb, struct socket *sock, diff --git a/security/commoncap.c b/security/commoncap.c index 9d172e6e330c..3238b2938893 100644 --- a/security/commoncap.c +++ b/security/commoncap.c @@ -29,6 +29,10 @@ #include <linux/securebits.h> #include <linux/syslog.h> +#ifdef CONFIG_ANDROID_PARANOID_NETWORK +#include <linux/android_aid.h> +#endif + /* * If a non-root user executes a setuid-root binary in * !secure(SECURE_NOROOT) mode, then we raise capabilities. @@ -83,6 +87,12 @@ EXPORT_SYMBOL(cap_netlink_recv); int cap_capable(struct task_struct *tsk, const struct cred *cred, int cap, int audit) { +#ifdef CONFIG_ANDROID_PARANOID_NETWORK + if (cap == CAP_NET_RAW && in_egroup_p(AID_NET_RAW)) + return 0; + if (cap == CAP_NET_ADMIN && in_egroup_p(AID_NET_ADMIN)) + return 0; +#endif return cap_raised(cred->cap_effective, cap) ? 0 : -EPERM; } |