diff options
Diffstat (limited to 'drivers')
29 files changed, 232 insertions, 184 deletions
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c index 4d535c50d821..c8e3cba423ef 100644 --- a/drivers/acpi/fan.c +++ b/drivers/acpi/fan.c @@ -260,22 +260,24 @@ static int acpi_fan_add(struct acpi_device *device) result = PTR_ERR(cdev); goto end; } - printk(KERN_INFO PREFIX - "%s is registered as cooling_device%d\n", - device->dev.bus_id, cdev->id); - - acpi_driver_data(device) = cdev; - result = sysfs_create_link(&device->dev.kobj, - &cdev->device.kobj, - "thermal_cooling"); - if (result) - return result; - - result = sysfs_create_link(&cdev->device.kobj, - &device->dev.kobj, - "device"); - if (result) - return result; + if (cdev) { + printk(KERN_INFO PREFIX + "%s is registered as cooling_device%d\n", + device->dev.bus_id, cdev->id); + + acpi_driver_data(device) = cdev; + result = sysfs_create_link(&device->dev.kobj, + &cdev->device.kobj, + "thermal_cooling"); + if (result) + return result; + + result = sysfs_create_link(&cdev->device.kobj, + &device->dev.kobj, + "device"); + if (result) + return result; + } result = acpi_fan_add_fs(device); if (result) diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c index 3a136f6c66a3..36a68fa114e3 100644 --- a/drivers/acpi/processor_core.c +++ b/drivers/acpi/processor_core.c @@ -674,20 +674,22 @@ static int __cpuinit acpi_processor_start(struct acpi_device *device) result = PTR_ERR(pr->cdev); goto end; } - printk(KERN_INFO PREFIX - "%s is registered as cooling_device%d\n", - device->dev.bus_id, pr->cdev->id); - - result = sysfs_create_link(&device->dev.kobj, - &pr->cdev->device.kobj, - "thermal_cooling"); - if (result) - return result; - result = sysfs_create_link(&pr->cdev->device.kobj, - &device->dev.kobj, - "device"); - if (result) - return result; + if (pr->cdev) { + printk(KERN_INFO PREFIX + "%s is registered as cooling_device%d\n", + device->dev.bus_id, pr->cdev->id); + + result = sysfs_create_link(&device->dev.kobj, + &pr->cdev->device.kobj, + "thermal_cooling"); + if (result) + return result; + result = sysfs_create_link(&pr->cdev->device.kobj, + &device->dev.kobj, + "device"); + if (result) + return result; + } if (pr->flags.throttling) { printk(KERN_INFO PREFIX "%s [%s] (supports", diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index fe09b57de617..12fb44f16766 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c @@ -734,19 +734,21 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device) if (IS_ERR(device->cdev)) return; - printk(KERN_INFO PREFIX - "%s is registered as cooling_device%d\n", - device->dev->dev.bus_id, device->cdev->id); - result = sysfs_create_link(&device->dev->dev.kobj, - &device->cdev->device.kobj, - "thermal_cooling"); - if (result) - printk(KERN_ERR PREFIX "Create sysfs link\n"); - result = sysfs_create_link(&device->cdev->device.kobj, - &device->dev->dev.kobj, - "device"); - if (result) - printk(KERN_ERR PREFIX "Create sysfs link\n"); + if (device->cdev) { + printk(KERN_INFO PREFIX + "%s is registered as cooling_device%d\n", + device->dev->dev.bus_id, device->cdev->id); + result = sysfs_create_link(&device->dev->dev.kobj, + &device->cdev->device.kobj, + "thermal_cooling"); + if (result) + printk(KERN_ERR PREFIX "Create sysfs link\n"); + result = sysfs_create_link(&device->cdev->device.kobj, + &device->dev->dev.kobj, + "device"); + if (result) + printk(KERN_ERR PREFIX "Create sysfs link\n"); + } } if (device->cap._DCS && device->cap._DSS){ static int count = 0; diff --git a/drivers/base/driver.c b/drivers/base/driver.c index bf31a0170a48..9a6537f14401 100644 --- a/drivers/base/driver.c +++ b/drivers/base/driver.c @@ -133,6 +133,7 @@ int driver_add_kobj(struct device_driver *drv, struct kobject *kobj, { va_list args; char *name; + int ret; va_start(args, fmt); name = kvasprintf(GFP_KERNEL, fmt, args); @@ -141,7 +142,9 @@ int driver_add_kobj(struct device_driver *drv, struct kobject *kobj, if (!name) return -ENOMEM; - return kobject_add(kobj, &drv->p->kobj, "%s", name); + ret = kobject_add(kobj, &drv->p->kobj, "%s", name); + kfree(name); + return ret; } EXPORT_SYMBOL_GPL(driver_add_kobj); diff --git a/drivers/char/drm/ati_pcigart.c b/drivers/char/drm/ati_pcigart.c index e5a0e97cfdda..35d25d821c38 100644 --- a/drivers/char/drm/ati_pcigart.c +++ b/drivers/char/drm/ati_pcigart.c @@ -122,8 +122,9 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga } else { address = gart_info->addr; bus_address = gart_info->bus_addr; - DRM_DEBUG("PCI: Gart Table: VRAM %08X mapped at %08lX\n", - bus_address, (unsigned long)address); + DRM_DEBUG("PCI: Gart Table: VRAM %08LX mapped at %08lX\n", + (unsigned long long)bus_address, + (unsigned long)address); } pci_gart = (u32 *) address; diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c index 320f2b6ddee6..99f2f2a46bf7 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_cm.c +++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c @@ -1745,7 +1745,7 @@ int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) /* bind QP to EP and move to RTS */ attrs.mpa_attr = ep->mpa_attr; - attrs.max_ird = ep->ord; + attrs.max_ird = ep->ird; attrs.max_ord = ep->ord; attrs.llp_stream_handle = ep; attrs.next_state = IWCH_QP_STATE_RTS; diff --git a/drivers/input/misc/ixp4xx-beeper.c b/drivers/input/misc/ixp4xx-beeper.c index d2ade7443b7d..798d84c44d03 100644 --- a/drivers/input/misc/ixp4xx-beeper.c +++ b/drivers/input/misc/ixp4xx-beeper.c @@ -25,6 +25,7 @@ MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>"); MODULE_DESCRIPTION("ixp4xx beeper driver"); MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:ixp4xx-beeper"); static DEFINE_SPINLOCK(beep_lock); diff --git a/drivers/lguest/Makefile b/drivers/lguest/Makefile index 5e8272d296d8..7d463c26124f 100644 --- a/drivers/lguest/Makefile +++ b/drivers/lguest/Makefile @@ -19,3 +19,11 @@ Beer: @for f in Preparation Guest Drivers Launcher Host Switcher Mastery; do echo "{==- $$f -==}"; make -s $$f; done; echo "{==-==}" Preparation Preparation! Guest Drivers Launcher Host Switcher Mastery: @sh ../../Documentation/lguest/extract $(PREFIX) `find ../../* -name '*.[chS]' -wholename '*lguest*'` +Puppy: + @clear + @printf " __ \n (___()'\`;\n /, /\`\n \\\\\\\"--\\\\\\ \n" + @sleep 2; clear; printf "\n\n Sit!\n\n"; sleep 1; clear + @printf " __ \n ()'\`; \n /\\|\` \n / | \n(/_)_|_ \n" + @sleep 2; clear; printf "\n\n Stand!\n\n"; sleep 1; clear + @printf " __ \n ()'\`; \n /\\|\` \n /._.= \n /| / \n(_\_)_ \n" + @sleep 2; clear; printf "\n\n Good puppy!\n\n"; sleep 1; clear diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c index c632c08cbbdc..5eea4356d703 100644 --- a/drivers/lguest/core.c +++ b/drivers/lguest/core.c @@ -1,8 +1,6 @@ /*P:400 This contains run_guest() which actually calls into the Host<->Guest * Switcher and analyzes the return, such as determining if the Guest wants the - * Host to do something. This file also contains useful helper routines, and a - * couple of non-obvious setup and teardown pieces which were implemented after - * days of debugging pain. :*/ + * Host to do something. This file also contains useful helper routines. :*/ #include <linux/module.h> #include <linux/stringify.h> #include <linux/stddef.h> @@ -49,8 +47,8 @@ static __init int map_switcher(void) * easy. */ - /* We allocate an array of "struct page"s. map_vm_area() wants the - * pages in this form, rather than just an array of pointers. */ + /* We allocate an array of struct page pointers. map_vm_area() wants + * this, rather than just an array of pages. */ switcher_page = kmalloc(sizeof(switcher_page[0])*TOTAL_SWITCHER_PAGES, GFP_KERNEL); if (!switcher_page) { @@ -172,7 +170,7 @@ void __lgread(struct lg_cpu *cpu, void *b, unsigned long addr, unsigned bytes) } } -/* This is the write (copy into guest) version. */ +/* This is the write (copy into Guest) version. */ void __lgwrite(struct lg_cpu *cpu, unsigned long addr, const void *b, unsigned bytes) { @@ -209,9 +207,9 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user) if (cpu->break_out) return -EAGAIN; - /* Check if there are any interrupts which can be delivered - * now: if so, this sets up the hander to be executed when we - * next run the Guest. */ + /* Check if there are any interrupts which can be delivered now: + * if so, this sets up the hander to be executed when we next + * run the Guest. */ maybe_do_interrupt(cpu); /* All long-lived kernel loops need to check with this horrible @@ -246,8 +244,10 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user) lguest_arch_handle_trap(cpu); } + /* Special case: Guest is 'dead' but wants a reboot. */ if (cpu->lg->dead == ERR_PTR(-ERESTART)) return -ERESTART; + /* The Guest is dead => "No such file or directory" */ return -ENOENT; } diff --git a/drivers/lguest/hypercalls.c b/drivers/lguest/hypercalls.c index 0f2cb4fd7c69..54d66f05fefa 100644 --- a/drivers/lguest/hypercalls.c +++ b/drivers/lguest/hypercalls.c @@ -29,7 +29,7 @@ #include "lg.h" /*H:120 This is the core hypercall routine: where the Guest gets what it wants. - * Or gets killed. Or, in the case of LHCALL_CRASH, both. */ + * Or gets killed. Or, in the case of LHCALL_SHUTDOWN, both. */ static void do_hcall(struct lg_cpu *cpu, struct hcall_args *args) { switch (args->arg0) { @@ -190,6 +190,13 @@ static void initialize(struct lg_cpu *cpu) * pagetable. */ guest_pagetable_clear_all(cpu); } +/*:*/ + +/*M:013 If a Guest reads from a page (so creates a mapping) that it has never + * written to, and then the Launcher writes to it (ie. the output of a virtual + * device), the Guest will still see the old page. In practice, this never + * happens: why would the Guest read a page which it has never written to? But + * a similar scenario might one day bite us, so it's worth mentioning. :*/ /*H:100 * Hypercalls @@ -227,7 +234,7 @@ void do_hypercalls(struct lg_cpu *cpu) * However, if we are signalled or the Guest sends I/O to the * Launcher, the run_guest() loop will exit without running the * Guest. When it comes back it would try to re-run the - * hypercall. */ + * hypercall. Finding that bug sucked. */ cpu->hcall = NULL; } } diff --git a/drivers/lguest/interrupts_and_traps.c b/drivers/lguest/interrupts_and_traps.c index 32e97c1858e5..0414ddf87587 100644 --- a/drivers/lguest/interrupts_and_traps.c +++ b/drivers/lguest/interrupts_and_traps.c @@ -144,7 +144,6 @@ void maybe_do_interrupt(struct lg_cpu *cpu) if (copy_from_user(&blk, cpu->lg->lguest_data->blocked_interrupts, sizeof(blk))) return; - bitmap_andnot(blk, cpu->irqs_pending, blk, LGUEST_IRQS); /* Find the first interrupt. */ @@ -237,9 +236,9 @@ void free_interrupts(void) clear_bit(syscall_vector, used_vectors); } -/*H:220 Now we've got the routines to deliver interrupts, delivering traps - * like page fault is easy. The only trick is that Intel decided that some - * traps should have error codes: */ +/*H:220 Now we've got the routines to deliver interrupts, delivering traps like + * page fault is easy. The only trick is that Intel decided that some traps + * should have error codes: */ static int has_err(unsigned int trap) { return (trap == 8 || (trap >= 10 && trap <= 14) || trap == 17); diff --git a/drivers/lguest/lguest_device.c b/drivers/lguest/lguest_device.c index 1b2ec0bf5eb1..2bc9bf7e88e5 100644 --- a/drivers/lguest/lguest_device.c +++ b/drivers/lguest/lguest_device.c @@ -1,10 +1,10 @@ /*P:050 Lguest guests use a very simple method to describe devices. It's a - * series of device descriptors contained just above the top of normal + * series of device descriptors contained just above the top of normal Guest * memory. * * We use the standard "virtio" device infrastructure, which provides us with a * console, a network and a block driver. Each one expects some configuration - * information and a "virtqueue" mechanism to send and receive data. :*/ + * information and a "virtqueue" or two to send and receive data. :*/ #include <linux/init.h> #include <linux/bootmem.h> #include <linux/lguest_launcher.h> @@ -53,7 +53,7 @@ struct lguest_device { * Device configurations * * The configuration information for a device consists of one or more - * virtqueues, a feature bitmaks, and some configuration bytes. The + * virtqueues, a feature bitmap, and some configuration bytes. The * configuration bytes don't really matter to us: the Launcher sets them up, and * the driver will look at them during setup. * @@ -179,7 +179,7 @@ struct lguest_vq_info }; /* When the virtio_ring code wants to prod the Host, it calls us here and we - * make a hypercall. We hand the page number of the virtqueue so the Host + * make a hypercall. We hand the physical address of the virtqueue so the Host * knows which virtqueue we're talking about. */ static void lg_notify(struct virtqueue *vq) { @@ -199,7 +199,8 @@ static void lg_notify(struct virtqueue *vq) * allocate its own pages and tell the Host where they are, but for lguest it's * simpler for the Host to simply tell us where the pages are. * - * So we provide devices with a "find virtqueue and set it up" function. */ + * So we provide drivers with a "find the Nth virtqueue and set it up" + * function. */ static struct virtqueue *lg_find_vq(struct virtio_device *vdev, unsigned index, void (*callback)(struct virtqueue *vq)) diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c index 2221485b0773..564e425d71dd 100644 --- a/drivers/lguest/lguest_user.c +++ b/drivers/lguest/lguest_user.c @@ -73,7 +73,7 @@ static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o) if (current != cpu->tsk) return -EPERM; - /* If the guest is already dead, we indicate why */ + /* If the Guest is already dead, we indicate why */ if (lg->dead) { size_t len; @@ -88,7 +88,7 @@ static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o) return len; } - /* If we returned from read() last time because the Guest notified, + /* If we returned from read() last time because the Guest sent I/O, * clear the flag. */ if (cpu->pending_notify) cpu->pending_notify = 0; @@ -97,14 +97,20 @@ static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o) return run_guest(cpu, (unsigned long __user *)user); } +/*L:025 This actually initializes a CPU. For the moment, a Guest is only + * uniprocessor, so "id" is always 0. */ static int lg_cpu_start(struct lg_cpu *cpu, unsigned id, unsigned long start_ip) { + /* We have a limited number the number of CPUs in the lguest struct. */ if (id >= NR_CPUS) return -EINVAL; + /* Set up this CPU's id, and pointer back to the lguest struct. */ cpu->id = id; cpu->lg = container_of((cpu - id), struct lguest, cpus[0]); cpu->lg->nr_cpus++; + + /* Each CPU has a timer it can set. */ init_clockdev(cpu); /* We need a complete page for the Guest registers: they are accessible @@ -120,11 +126,11 @@ static int lg_cpu_start(struct lg_cpu *cpu, unsigned id, unsigned long start_ip) * address. */ lguest_arch_setup_regs(cpu, start_ip); - /* Initialize the queue for the waker to wait on */ + /* Initialize the queue for the Waker to wait on */ init_waitqueue_head(&cpu->break_wq); /* We keep a pointer to the Launcher task (ie. current task) for when - * other Guests want to wake this one (inter-Guest I/O). */ + * other Guests want to wake this one (eg. console input). */ cpu->tsk = current; /* We need to keep a pointer to the Launcher's memory map, because if @@ -136,6 +142,7 @@ static int lg_cpu_start(struct lg_cpu *cpu, unsigned id, unsigned long start_ip) * when the same Guest runs on the same CPU twice. */ cpu->last_pages = NULL; + /* No error == success. */ return 0; } @@ -185,14 +192,13 @@ static int initialize(struct file *file, const unsigned long __user *input) lg->mem_base = (void __user *)(long)args[0]; lg->pfn_limit = args[1]; - /* This is the first cpu */ + /* This is the first cpu (cpu 0) and it will start booting at args[3] */ err = lg_cpu_start(&lg->cpus[0], 0, args[3]); if (err) goto release_guest; /* Initialize the Guest's shadow page tables, using the toplevel - * address the Launcher gave us. This allocates memory, so can - * fail. */ + * address the Launcher gave us. This allocates memory, so can fail. */ err = init_guest_pagetable(lg, args[2]); if (err) goto free_regs; @@ -218,11 +224,16 @@ unlock: /*L:010 The first operation the Launcher does must be a write. All writes * start with an unsigned long number: for the first write this must be * LHREQ_INITIALIZE to set up the Guest. After that the Launcher can use - * writes of other values to send interrupts. */ + * writes of other values to send interrupts. + * + * Note that we overload the "offset" in the /dev/lguest file to indicate what + * CPU number we're dealing with. Currently this is always 0, since we only + * support uniprocessor Guests, but you can see the beginnings of SMP support + * here. */ static ssize_t write(struct file *file, const char __user *in, size_t size, loff_t *off) { - /* Once the guest is initialized, we hold the "struct lguest" in the + /* Once the Guest is initialized, we hold the "struct lguest" in the * file private data. */ struct lguest *lg = file->private_data; const unsigned long __user *input = (const unsigned long __user *)in; @@ -230,6 +241,7 @@ static ssize_t write(struct file *file, const char __user *in, struct lg_cpu *uninitialized_var(cpu); unsigned int cpu_id = *off; + /* The first value tells us what this request is. */ if (get_user(req, input) != 0) return -EFAULT; input++; diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c index a7f64a9d67e0..d93500f24fbb 100644 --- a/drivers/lguest/page_tables.c +++ b/drivers/lguest/page_tables.c @@ -2,8 +2,8 @@ * previous encounters. It's functional, and as neat as it can be in the * circumstances, but be wary, for these things are subtle and break easily. * The Guest provides a virtual to physical mapping, but we can neither trust - * it nor use it: we verify and convert it here to point the hardware to the - * actual Guest pages when running the Guest. :*/ + * it nor use it: we verify and convert it here then point the CPU to the + * converted Guest pages when running the Guest. :*/ /* Copyright (C) Rusty Russell IBM Corporation 2006. * GPL v2 and any later version */ @@ -106,6 +106,11 @@ static unsigned long gpte_addr(pgd_t gpgd, unsigned long vaddr) BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT)); return gpage + ((vaddr>>PAGE_SHIFT) % PTRS_PER_PTE) * sizeof(pte_t); } +/*:*/ + +/*M:014 get_pfn is slow; it takes the mmap sem and calls get_user_pages. We + * could probably try to grab batches of pages here as an optimization + * (ie. pre-faulting). :*/ /*H:350 This routine takes a page number given by the Guest and converts it to * an actual, physical page number. It can fail for several reasons: the @@ -113,8 +118,8 @@ static unsigned long gpte_addr(pgd_t gpgd, unsigned long vaddr) * and the page is read-only, or the write flag was set and the page was * shared so had to be copied, but we ran out of memory. * - * This holds a reference to the page, so release_pte() is careful to - * put that back. */ + * This holds a reference to the page, so release_pte() is careful to put that + * back. */ static unsigned long get_pfn(unsigned long virtpfn, int write) { struct page *page; @@ -532,13 +537,13 @@ static void do_set_pte(struct lg_cpu *cpu, int idx, * all processes. So when the page table above that address changes, we update * all the page tables, not just the current one. This is rare. * - * The benefit is that when we have to track a new page table, we can copy keep - * all the kernel mappings. This speeds up context switch immensely. */ + * The benefit is that when we have to track a new page table, we can keep all + * the kernel mappings. This speeds up context switch immensely. */ void guest_set_pte(struct lg_cpu *cpu, unsigned long gpgdir, unsigned long vaddr, pte_t gpte) { - /* Kernel mappings must be changed on all top levels. Slow, but - * doesn't happen often. */ + /* Kernel mappings must be changed on all top levels. Slow, but doesn't + * happen often. */ if (vaddr >= cpu->lg->kernel_address) { unsigned int i; for (i = 0; i < ARRAY_SIZE(cpu->lg->pgdirs); i++) @@ -704,12 +709,11 @@ static __init void populate_switcher_pte_page(unsigned int cpu, /* We've made it through the page table code. Perhaps our tired brains are * still processing the details, or perhaps we're simply glad it's over. * - * If nothing else, note that all this complexity in juggling shadow page - * tables in sync with the Guest's page tables is for one reason: for most - * Guests this page table dance determines how bad performance will be. This - * is why Xen uses exotic direct Guest pagetable manipulation, and why both - * Intel and AMD have implemented shadow page table support directly into - * hardware. + * If nothing else, note that all this complexity in juggling shadow page tables + * in sync with the Guest's page tables is for one reason: for most Guests this + * page table dance determines how bad performance will be. This is why Xen + * uses exotic direct Guest pagetable manipulation, and why both Intel and AMD + * have implemented shadow page table support directly into hardware. * * There is just one file remaining in the Host. */ diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c index 635187812d52..5126d5d9ea0e 100644 --- a/drivers/lguest/x86/core.c +++ b/drivers/lguest/x86/core.c @@ -17,6 +17,13 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ +/*P:450 This file contains the x86-specific lguest code. It used to be all + * mixed in with drivers/lguest/core.c but several foolhardy code slashers + * wrestled most of the dependencies out to here in preparation for porting + * lguest to other architectures (see what I mean by foolhardy?). + * + * This also contains a couple of non-obvious setup and teardown pieces which + * were implemented after days of debugging pain. :*/ #include <linux/kernel.h> #include <linux/start_kernel.h> #include <linux/string.h> @@ -157,6 +164,8 @@ static void run_guest_once(struct lg_cpu *cpu, struct lguest_pages *pages) * also simplify copy_in_guest_info(). Note that we'd still need to restore * things when we exit to Launcher userspace, but that's fairly easy. * + * We could also try using this hooks for PGE, but that might be too expensive. + * * The hooks were designed for KVM, but we can also put them to good use. :*/ /*H:040 This is the i386-specific code to setup and run the Guest. Interrupts @@ -182,7 +191,7 @@ void lguest_arch_run_guest(struct lg_cpu *cpu) * was doing. */ run_guest_once(cpu, lguest_pages(raw_smp_processor_id())); - /* Note that the "regs" pointer contains two extra entries which are + /* Note that the "regs" structure contains two extra entries which are * not really registers: a trap number which says what interrupt or * trap made the switcher code come back, and an error code which some * traps set. */ @@ -293,11 +302,10 @@ void lguest_arch_handle_trap(struct lg_cpu *cpu) break; case 14: /* We've intercepted a Page Fault. */ /* The Guest accessed a virtual address that wasn't mapped. - * This happens a lot: we don't actually set up most of the - * page tables for the Guest at all when we start: as it runs - * it asks for more and more, and we set them up as - * required. In this case, we don't even tell the Guest that - * the fault happened. + * This happens a lot: we don't actually set up most of the page + * tables for the Guest at all when we start: as it runs it asks + * for more and more, and we set them up as required. In this + * case, we don't even tell the Guest that the fault happened. * * The errcode tells whether this was a read or a write, and * whether kernel or userspace code. */ @@ -342,7 +350,7 @@ void lguest_arch_handle_trap(struct lg_cpu *cpu) if (!deliver_trap(cpu, cpu->regs->trapnum)) /* If the Guest doesn't have a handler (either it hasn't * registered any yet, or it's one of the faults we don't let - * it handle), it dies with a cryptic error message. */ + * it handle), it dies with this cryptic error message. */ kill_guest(cpu, "unhandled trap %li at %#lx (%#lx)", cpu->regs->trapnum, cpu->regs->eip, cpu->regs->trapnum == 14 ? cpu->arch.last_pagefault @@ -375,8 +383,8 @@ void __init lguest_arch_host_init(void) * The only exception is the interrupt handlers in switcher.S: their * addresses are placed in a table (default_idt_entries), so we need to * update the table with the new addresses. switcher_offset() is a - * convenience function which returns the distance between the builtin - * switcher code and the high-mapped copy we just made. */ + * convenience function which returns the distance between the + * compiled-in switcher code and the high-mapped copy we just made. */ for (i = 0; i < IDT_ENTRIES; i++) default_idt_entries[i] += switcher_offset(); @@ -416,7 +424,7 @@ void __init lguest_arch_host_init(void) state->guest_gdt_desc.address = (long)&state->guest_gdt; /* We know where we want the stack to be when the Guest enters - * the switcher: in pages->regs. The stack grows upwards, so + * the Switcher: in pages->regs. The stack grows upwards, so * we start it at the end of that structure. */ state->guest_tss.sp0 = (long)(&pages->regs + 1); /* And this is the GDT entry to use for the stack: we keep a @@ -513,8 +521,8 @@ int lguest_arch_init_hypercalls(struct lg_cpu *cpu) { u32 tsc_speed; - /* The pointer to the Guest's "struct lguest_data" is the only - * argument. We check that address now. */ + /* The pointer to the Guest's "struct lguest_data" is the only argument. + * We check that address now. */ if (!lguest_address_ok(cpu->lg, cpu->hcall->arg1, sizeof(*cpu->lg->lguest_data))) return -EFAULT; @@ -546,6 +554,7 @@ int lguest_arch_init_hypercalls(struct lg_cpu *cpu) return 0; } +/*:*/ /*L:030 lguest_arch_setup_regs() * diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S index 0af8baaa0d4a..3fc15318a80f 100644 --- a/drivers/lguest/x86/switcher_32.S +++ b/drivers/lguest/x86/switcher_32.S @@ -1,6 +1,6 @@ -/*P:900 This is the Switcher: code which sits at 0xFFC00000 to do the low-level - * Guest<->Host switch. It is as simple as it can be made, but it's naturally - * very specific to x86. +/*P:900 This is the Switcher: code which sits at 0xFFC00000 astride both the + * Host and Guest to do the low-level Guest<->Host switch. It is as simple as + * it can be made, but it's naturally very specific to x86. * * You have now completed Preparation. If this has whet your appetite; if you * are feeling invigorated and refreshed then the next, more challenging stage @@ -189,7 +189,7 @@ ENTRY(switch_to_guest) // Interrupts are turned back on: we are Guest. iret -// We treat two paths to switch back to the Host +// We tread two paths to switch back to the Host // Yet both must save Guest state and restore Host // So we put the routine in a macro. #define SWITCH_TO_HOST \ diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index b04f98df94ea..835def11419d 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -1,7 +1,7 @@ /* * Copyright (C) 2003 Christophe Saout <christophe@saout.de> * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org> - * Copyright (C) 2006-2007 Red Hat, Inc. All rights reserved. + * Copyright (C) 2006-2008 Red Hat, Inc. All rights reserved. * * This file is released under the GPL. */ @@ -93,6 +93,8 @@ struct crypt_config { struct workqueue_struct *io_queue; struct workqueue_struct *crypt_queue; + wait_queue_head_t writeq; + /* * crypto related data */ @@ -331,14 +333,7 @@ static void crypt_convert_init(struct crypt_config *cc, ctx->idx_out = bio_out ? bio_out->bi_idx : 0; ctx->sector = sector + cc->iv_offset; init_completion(&ctx->restart); - /* - * Crypto operation can be asynchronous, - * ctx->pending is increased after request submission. - * We need to ensure that we don't call the crypt finish - * operation before pending got incremented - * (dependent on crypt submission return code). - */ - atomic_set(&ctx->pending, 2); + atomic_set(&ctx->pending, 1); } static int crypt_convert_block(struct crypt_config *cc, @@ -411,43 +406,42 @@ static void crypt_alloc_req(struct crypt_config *cc, static int crypt_convert(struct crypt_config *cc, struct convert_context *ctx) { - int r = 0; + int r; while(ctx->idx_in < ctx->bio_in->bi_vcnt && ctx->idx_out < ctx->bio_out->bi_vcnt) { crypt_alloc_req(cc, ctx); + atomic_inc(&ctx->pending); + r = crypt_convert_block(cc, ctx, cc->req); switch (r) { + /* async */ case -EBUSY: wait_for_completion(&ctx->restart); INIT_COMPLETION(ctx->restart); /* fall through*/ case -EINPROGRESS: - atomic_inc(&ctx->pending); cc->req = NULL; - r = 0; - /* fall through*/ + ctx->sector++; + continue; + + /* sync */ case 0: + atomic_dec(&ctx->pending); ctx->sector++; continue; - } - break; + /* error */ + default: + atomic_dec(&ctx->pending); + return r; + } } - /* - * If there are pending crypto operation run async - * code. Otherwise process return code synchronously. - * The step of 2 ensures that async finish doesn't - * call crypto finish too early. - */ - if (atomic_sub_return(2, &ctx->pending)) - return -EINPROGRESS; - - return r; + return 0; } static void dm_crypt_bio_destructor(struct bio *bio) @@ -624,8 +618,10 @@ static void kcryptd_io_read(struct dm_crypt_io *io) static void kcryptd_io_write(struct dm_crypt_io *io) { struct bio *clone = io->ctx.bio_out; + struct crypt_config *cc = io->target->private; generic_make_request(clone); + wake_up(&cc->writeq); } static void kcryptd_io(struct work_struct *work) @@ -698,7 +694,8 @@ static void kcryptd_crypt_write_convert_loop(struct dm_crypt_io *io) r = crypt_convert(cc, &io->ctx); - if (r != -EINPROGRESS) { + if (atomic_dec_and_test(&io->ctx.pending)) { + /* processed, no running async crypto */ kcryptd_crypt_write_io_submit(io, r, 0); if (unlikely(r < 0)) return; @@ -706,8 +703,12 @@ static void kcryptd_crypt_write_convert_loop(struct dm_crypt_io *io) atomic_inc(&io->pending); /* out of memory -> run queues */ - if (unlikely(remaining)) + if (unlikely(remaining)) { + /* wait for async crypto then reinitialize pending */ + wait_event(cc->writeq, !atomic_read(&io->ctx.pending)); + atomic_set(&io->ctx.pending, 1); congestion_wait(WRITE, HZ/100); + } } } @@ -746,7 +747,7 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io) r = crypt_convert(cc, &io->ctx); - if (r != -EINPROGRESS) + if (atomic_dec_and_test(&io->ctx.pending)) kcryptd_crypt_read_done(io, r); crypt_dec_pending(io); @@ -1047,6 +1048,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) goto bad_crypt_queue; } + init_waitqueue_head(&cc->writeq); ti->private = cc; return 0; diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index b8e342fe7586..8f25f628ef16 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c @@ -114,7 +114,7 @@ static void dec_count(struct io *io, unsigned int region, int error) wake_up_process(io->sleeper); else { - int r = io->error; + unsigned long r = io->error; io_notify_fn fn = io->callback; void *context = io->context; diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index 51605870f898..762cb086bb7f 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c @@ -753,7 +753,7 @@ out: * are in the no-sync state. We have to recover these by * recopying from the default mirror to all the others. *---------------------------------------------------------------*/ -static void recovery_complete(int read_err, unsigned int write_err, +static void recovery_complete(int read_err, unsigned long write_err, void *context) { struct region *reg = (struct region *)context; @@ -767,7 +767,7 @@ static void recovery_complete(int read_err, unsigned int write_err, } if (write_err) { - DMERR_LIMIT("Write error during recovery (error = 0x%x)", + DMERR_LIMIT("Write error during recovery (error = 0x%lx)", write_err); /* * Bits correspond to devices (excluding default mirror). diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index ae24eab8cd81..4dc8a43c034b 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -804,7 +804,7 @@ static void commit_callback(void *context, int success) * Called when the copy I/O has finished. kcopyd actually runs * this code so don't block. */ -static void copy_callback(int read_err, unsigned int write_err, void *context) +static void copy_callback(int read_err, unsigned long write_err, void *context) { struct dm_snap_pending_exception *pe = context; struct dm_snapshot *s = pe->snap; diff --git a/drivers/md/kcopyd.c b/drivers/md/kcopyd.c index f3831f31223e..e76b52ade690 100644 --- a/drivers/md/kcopyd.c +++ b/drivers/md/kcopyd.c @@ -169,7 +169,7 @@ struct kcopyd_job { * Error state of the job. */ int read_err; - unsigned int write_err; + unsigned long write_err; /* * Either READ or WRITE @@ -293,7 +293,7 @@ static int run_complete_job(struct kcopyd_job *job) { void *context = job->context; int read_err = job->read_err; - unsigned int write_err = job->write_err; + unsigned long write_err = job->write_err; kcopyd_notify_fn fn = job->fn; struct kcopyd_client *kc = job->kc; @@ -396,7 +396,7 @@ static int process_jobs(struct list_head *jobs, int (*fn) (struct kcopyd_job *)) if (r < 0) { /* error this rogue job */ if (job->rw == WRITE) - job->write_err = (unsigned int) -1; + job->write_err = (unsigned long) -1L; else job->read_err = 1; push(&_complete_jobs, job); @@ -448,8 +448,8 @@ static void dispatch_job(struct kcopyd_job *job) } #define SUB_JOB_SIZE 128 -static void segment_complete(int read_err, - unsigned int write_err, void *context) +static void segment_complete(int read_err, unsigned long write_err, + void *context) { /* FIXME: tidy this function */ sector_t progress = 0; diff --git a/drivers/md/kcopyd.h b/drivers/md/kcopyd.h index 4621ea055c0e..4845f2a0c676 100644 --- a/drivers/md/kcopyd.h +++ b/drivers/md/kcopyd.h @@ -32,8 +32,8 @@ void kcopyd_client_destroy(struct kcopyd_client *kc); * read_err is a boolean, * write_err is a bitset, with 1 bit for each destination region */ -typedef void (*kcopyd_notify_fn)(int read_err, - unsigned int write_err, void *context); +typedef void (*kcopyd_notify_fn)(int read_err, unsigned long write_err, + void *context); int kcopyd_copy(struct kcopyd_client *kc, struct io_region *from, unsigned int num_dests, struct io_region *dests, diff --git a/drivers/memstick/host/tifm_ms.c b/drivers/memstick/host/tifm_ms.c index eb150dfb637f..8577de4ebb0e 100644 --- a/drivers/memstick/host/tifm_ms.c +++ b/drivers/memstick/host/tifm_ms.c @@ -182,7 +182,7 @@ static unsigned int tifm_ms_transfer_data(struct tifm_ms *host) struct tifm_dev *sock = host->dev; unsigned int length; unsigned int off; - unsigned int t_size, p_off, p_cnt; + unsigned int t_size, p_cnt; unsigned char *buf; struct page *pg; unsigned long flags = 0; @@ -198,6 +198,8 @@ static unsigned int tifm_ms_transfer_data(struct tifm_ms *host) host->block_pos); while (length) { + unsigned int uninitialized_var(p_off); + if (host->req->long_data) { pg = nth_page(sg_page(&host->req->sg), off >> PAGE_SHIFT); diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c index f00e04efbe28..bc4649a17b9d 100644 --- a/drivers/mtd/maps/physmap.c +++ b/drivers/mtd/maps/physmap.c @@ -202,9 +202,8 @@ static int physmap_flash_suspend(struct platform_device *dev, pm_message_t state int ret = 0; int i; - if (info) - for (i = 0; i < MAX_RESOURCES; i++) - ret |= info->mtd[i]->suspend(info->mtd[i]); + for (i = 0; i < MAX_RESOURCES && info->mtd[i]; i++) + ret |= info->mtd[i]->suspend(info->mtd[i]); return ret; } @@ -214,9 +213,9 @@ static int physmap_flash_resume(struct platform_device *dev) struct physmap_flash_info *info = platform_get_drvdata(dev); int i; - if (info) - for (i = 0; i < MAX_RESOURCES; i++) - info->mtd[i]->resume(info->mtd[i]); + for (i = 0; i < MAX_RESOURCES && info->mtd[i]; i++) + info->mtd[i]->resume(info->mtd[i]); + return 0; } @@ -225,8 +224,8 @@ static void physmap_flash_shutdown(struct platform_device *dev) struct physmap_flash_info *info = platform_get_drvdata(dev); int i; - for (i = 0; i < MAX_RESOURCES; i++) - if (info && info->mtd[i]->suspend(info->mtd[i]) == 0) + for (i = 0; i < MAX_RESOURCES && info->mtd[i]; i++) + if (info->mtd[i]->suspend(info->mtd[i]) == 0) info->mtd[i]->resume(info->mtd[i]); } #else diff --git a/drivers/mtd/nand/rtc_from4.c b/drivers/mtd/nand/rtc_from4.c index 9189ec8f243e..0f6ac250f434 100644 --- a/drivers/mtd/nand/rtc_from4.c +++ b/drivers/mtd/nand/rtc_from4.c @@ -460,7 +460,7 @@ static int rtc_from4_errstat(struct mtd_info *mtd, struct nand_chip *this, er_stat |= 1 << 1; kfree(buf); } - +out: rtn = status; if (er_stat == 0) { /* if ECC is available */ rtn = (status & ~NAND_STATUS_FAIL); /* clear the error bit */ diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index e9a333d98552..e887aa45c9cd 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -951,6 +951,12 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82375, quirk_e * accesses to the SMBus registers, with potentially bad effects. Thus you * should be very careful when adding new entries: if SMM is accessing the * Intel SMBus, this is a very good reason to leave it hidden. + * + * Likewise, many recent laptops use ACPI for thermal management. If the + * ACPI DSDT code accesses the SMBus, then Linux should not access it + * natively, and keeping the SMBus hidden is the right thing to do. If you + * are about to add an entry in the table below, please first disassemble + * the DSDT and double-check that there is no code accessing the SMBus. */ static int asus_hides_smbus; @@ -1028,11 +1034,6 @@ static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev) case 0x12bf: /* HP xw4100 */ asus_hides_smbus = 1; } - else if (dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB) - switch (dev->subsystem_device) { - case 0x099c: /* HP Compaq nx6110 */ - asus_hides_smbus = 1; - } } else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG)) { if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB) switch(dev->subsystem_device) { diff --git a/drivers/video/bf54x-lq043fb.c b/drivers/video/bf54x-lq043fb.c index 986a550c0439..eefba3d0e4b9 100644 --- a/drivers/video/bf54x-lq043fb.c +++ b/drivers/video/bf54x-lq043fb.c @@ -384,7 +384,7 @@ static int bfin_bf54x_fb_mmap(struct fb_info *info, struct vm_area_struct *vma) * Other flags can be set, and are documented in * include/linux/mm.h */ - vma->vm_flags |= VM_MAYSHARE; + vma->vm_flags |= VM_MAYSHARE | VM_SHARED; return 0; } diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/bfin-t350mcqb-fb.c index a2bb2de9e020..135d6dd7e672 100644 --- a/drivers/video/bfin-t350mcqb-fb.c +++ b/drivers/video/bfin-t350mcqb-fb.c @@ -91,6 +91,7 @@ struct bfin_t350mcqbfb_info { int lq043_open_cnt; int irq; spinlock_t lock; /* lock */ + u32 pseudo_pal[16]; }; static int nocursor; @@ -182,13 +183,13 @@ static void bfin_t350mcqb_config_dma(struct bfin_t350mcqbfb_info *fbi) } -static int bfin_t350mcqb_request_ports(int action) -{ - u16 ppi0_req_8[] = {P_PPI0_CLK, P_PPI0_FS1, P_PPI0_FS2, +static u16 ppi0_req_8[] = {P_PPI0_CLK, P_PPI0_FS1, P_PPI0_FS2, P_PPI0_D0, P_PPI0_D1, P_PPI0_D2, P_PPI0_D3, P_PPI0_D4, P_PPI0_D5, P_PPI0_D6, P_PPI0_D7, 0}; +static int bfin_t350mcqb_request_ports(int action) +{ if (action) { if (peripheral_request_list(ppi0_req_8, DRIVER_NAME)) { printk(KERN_ERR "Requesting Peripherals faild\n"); @@ -301,7 +302,7 @@ static int bfin_t350mcqb_fb_mmap(struct fb_info *info, struct vm_area_struct *vm * Other flags can be set, and are documented in * include/linux/mm.h */ - vma->vm_flags |= VM_MAYSHARE; + vma->vm_flags |= VM_MAYSHARE | VM_SHARED; return 0; } @@ -520,16 +521,7 @@ static int __init bfin_t350mcqb_probe(struct platform_device *pdev) fbinfo->fbops = &bfin_t350mcqb_fb_ops; - fbinfo->pseudo_palette = kmalloc(sizeof(u32) * 16, GFP_KERNEL); - if (!fbinfo->pseudo_palette) { - printk(KERN_ERR DRIVER_NAME - "Fail to allocate pseudo_palette\n"); - - ret = -ENOMEM; - goto out4; - } - - memset(fbinfo->pseudo_palette, 0, sizeof(u32) * 16); + fbinfo->pseudo_palette = &info->pseudo_pal; if (fb_alloc_cmap(&fbinfo->cmap, BFIN_LCD_NBR_PALETTE_ENTRIES, 0) < 0) { @@ -537,7 +529,7 @@ static int __init bfin_t350mcqb_probe(struct platform_device *pdev) "Fail to allocate colormap (%d entries)\n", BFIN_LCD_NBR_PALETTE_ENTRIES); ret = -EFAULT; - goto out5; + goto out4; } if (bfin_t350mcqb_request_ports(1)) { @@ -552,11 +544,11 @@ static int __init bfin_t350mcqb_probe(struct platform_device *pdev) goto out7; } - if (request_irq(info->irq, (void *)bfin_t350mcqb_irq_error, IRQF_DISABLED, - "PPI ERROR", info) < 0) { + ret = request_irq(info->irq, bfin_t350mcqb_irq_error, IRQF_DISABLED, + "PPI ERROR", info); + if (ret < 0) { printk(KERN_ERR DRIVER_NAME ": unable to request PPI ERROR IRQ\n"); - ret = -EFAULT; goto out7; } @@ -584,8 +576,6 @@ out7: bfin_t350mcqb_request_ports(0); out6: fb_dealloc_cmap(&fbinfo->cmap); -out5: - kfree(fbinfo->pseudo_palette); out4: dma_free_coherent(NULL, fbinfo->fix.smem_len, info->fb_buffer, info->dma_handle); @@ -605,6 +595,8 @@ static int bfin_t350mcqb_remove(struct platform_device *pdev) struct fb_info *fbinfo = platform_get_drvdata(pdev); struct bfin_t350mcqbfb_info *info = fbinfo->par; + unregister_framebuffer(fbinfo); + free_dma(CH_PPI); free_irq(info->irq, info); @@ -612,7 +604,6 @@ static int bfin_t350mcqb_remove(struct platform_device *pdev) dma_free_coherent(NULL, fbinfo->fix.smem_len, info->fb_buffer, info->dma_handle); - kfree(fbinfo->pseudo_palette); fb_dealloc_cmap(&fbinfo->cmap); #ifndef NO_BL_SUPPORT @@ -620,10 +611,11 @@ static int bfin_t350mcqb_remove(struct platform_device *pdev) backlight_device_unregister(bl_dev); #endif - unregister_framebuffer(fbinfo); - bfin_t350mcqb_request_ports(0); + platform_set_drvdata(pdev, NULL); + framebuffer_release(fbinfo); + printk(KERN_INFO DRIVER_NAME ": Unregister LCD driver.\n"); return 0; diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c index 59a8f73dec73..6c8ecde6aad1 100644 --- a/drivers/virtio/virtio_pci.c +++ b/drivers/virtio/virtio_pci.c @@ -388,6 +388,7 @@ static void __devexit virtio_pci_remove(struct pci_dev *pci_dev) { struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); + unregister_virtio_device(&vp_dev->vdev); free_irq(pci_dev->irq, vp_dev); pci_set_drvdata(pci_dev, NULL); pci_iounmap(pci_dev, vp_dev->ioaddr); |