diff options
author | Linus Torvalds <torvalds@woody.osdl.org> | 2006-12-06 08:01:37 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.osdl.org> | 2006-12-06 08:01:37 -0800 |
commit | dd8856bda5f1308beb113281b248683992998a9e (patch) | |
tree | 5dc35290cdbca32cbdecd93a76fa5b29075ac18c /arch | |
parent | f81cff0d4067e41fd7383d9c013cc82da7c169d2 (diff) | |
parent | 06328b4f7919e9d2169d45cadc5a37b828a78eda (diff) |
Merge git://git.infradead.org/users/dhowells/workq-2.6
* git://git.infradead.org/users/dhowells/workq-2.6:
Actually update the fixed up compile failures.
WorkQueue: Fix up arch-specific work items where possible
WorkStruct: make allyesconfig
WorkStruct: Pass the work_struct pointer instead of context data
WorkStruct: Merge the pending bit into the wq_data pointer
WorkStruct: Typedef the work function prototype
WorkStruct: Separate delayable and non-delayable events.
Diffstat (limited to 'arch')
26 files changed, 107 insertions, 89 deletions
diff --git a/arch/arm/common/sharpsl_pm.c b/arch/arm/common/sharpsl_pm.c index 605dedf96790..b3599743093b 100644 --- a/arch/arm/common/sharpsl_pm.c +++ b/arch/arm/common/sharpsl_pm.c @@ -60,16 +60,16 @@ static int sharpsl_ac_check(void); static int sharpsl_fatal_check(void); static int sharpsl_average_value(int ad); static void sharpsl_average_clear(void); -static void sharpsl_charge_toggle(void *private_); -static void sharpsl_battery_thread(void *private_); +static void sharpsl_charge_toggle(struct work_struct *private_); +static void sharpsl_battery_thread(struct work_struct *private_); /* * Variables */ struct sharpsl_pm_status sharpsl_pm; -DECLARE_WORK(toggle_charger, sharpsl_charge_toggle, NULL); -DECLARE_WORK(sharpsl_bat, sharpsl_battery_thread, NULL); +DECLARE_DELAYED_WORK(toggle_charger, sharpsl_charge_toggle); +DECLARE_DELAYED_WORK(sharpsl_bat, sharpsl_battery_thread); DEFINE_LED_TRIGGER(sharpsl_charge_led_trigger); @@ -116,7 +116,7 @@ void sharpsl_battery_kick(void) EXPORT_SYMBOL(sharpsl_battery_kick); -static void sharpsl_battery_thread(void *private_) +static void sharpsl_battery_thread(struct work_struct *private_) { int voltage, percent, apm_status, i = 0; @@ -128,7 +128,7 @@ static void sharpsl_battery_thread(void *private_) /* Corgi cannot confirm when battery fully charged so periodically kick! */ if (!sharpsl_pm.machinfo->batfull_irq && (sharpsl_pm.charge_mode == CHRG_ON) && time_after(jiffies, sharpsl_pm.charge_start_time + SHARPSL_CHARGE_ON_TIME_INTERVAL)) - schedule_work(&toggle_charger); + schedule_delayed_work(&toggle_charger, 0); while(1) { voltage = sharpsl_pm.machinfo->read_devdata(SHARPSL_BATT_VOLT); @@ -212,7 +212,7 @@ static void sharpsl_charge_off(void) sharpsl_pm_led(SHARPSL_LED_OFF); sharpsl_pm.charge_mode = CHRG_OFF; - schedule_work(&sharpsl_bat); + schedule_delayed_work(&sharpsl_bat, 0); } static void sharpsl_charge_error(void) @@ -222,7 +222,7 @@ static void sharpsl_charge_error(void) sharpsl_pm.charge_mode = CHRG_ERROR; } -static void sharpsl_charge_toggle(void *private_) +static void sharpsl_charge_toggle(struct work_struct *private_) { dev_dbg(sharpsl_pm.dev, "Toogling Charger at time: %lx\n", jiffies); @@ -254,7 +254,7 @@ static void sharpsl_ac_timer(unsigned long data) else if (sharpsl_pm.charge_mode == CHRG_ON) sharpsl_charge_off(); - schedule_work(&sharpsl_bat); + schedule_delayed_work(&sharpsl_bat, 0); } @@ -279,10 +279,10 @@ static void sharpsl_chrg_full_timer(unsigned long data) sharpsl_charge_off(); } else if (sharpsl_pm.full_count < 2) { dev_dbg(sharpsl_pm.dev, "Charge Full: Count too low\n"); - schedule_work(&toggle_charger); + schedule_delayed_work(&toggle_charger, 0); } else if (time_after(jiffies, sharpsl_pm.charge_start_time + SHARPSL_CHARGE_FINISH_TIME)) { dev_dbg(sharpsl_pm.dev, "Charge Full: Interrupt generated too slowly - retry.\n"); - schedule_work(&toggle_charger); + schedule_delayed_work(&toggle_charger, 0); } else { sharpsl_charge_off(); sharpsl_pm.charge_mode = CHRG_DONE; diff --git a/arch/arm/mach-omap1/board-h3.c b/arch/arm/mach-omap1/board-h3.c index f225a083dee1..9d2346fb68f4 100644 --- a/arch/arm/mach-omap1/board-h3.c +++ b/arch/arm/mach-omap1/board-h3.c @@ -323,7 +323,8 @@ static int h3_transceiver_mode(struct device *dev, int mode) cancel_delayed_work(&irda_config->gpio_expa); PREPARE_WORK(&irda_config->gpio_expa, set_trans_mode, &mode); - schedule_work(&irda_config->gpio_expa); +#error this is not permitted - mode is an argument variable + schedule_delayed_work(&irda_config->gpio_expa, 0); return 0; } diff --git a/arch/arm/mach-omap1/board-nokia770.c b/arch/arm/mach-omap1/board-nokia770.c index dbc555d209ff..cbe909bad79b 100644 --- a/arch/arm/mach-omap1/board-nokia770.c +++ b/arch/arm/mach-omap1/board-nokia770.c @@ -74,7 +74,7 @@ static struct omap_kp_platform_data nokia770_kp_data = { .rows = 8, .cols = 8, .keymap = nokia770_keymap, - .keymapsize = ARRAY_SIZE(nokia770_keymap) + .keymapsize = ARRAY_SIZE(nokia770_keymap), .delay = 4, }; @@ -191,7 +191,7 @@ static void nokia770_audio_pwr_up(void) printk("HP connected\n"); } -static void codec_delayed_power_down(void *arg) +static void codec_delayed_power_down(struct work_struct *work) { down(&audio_pwr_sem); if (audio_pwr_state == -1) @@ -200,7 +200,7 @@ static void codec_delayed_power_down(void *arg) up(&audio_pwr_sem); } -static DECLARE_WORK(codec_power_down_work, codec_delayed_power_down, NULL); +static DECLARE_DELAYED_WORK(codec_power_down_work, codec_delayed_power_down); static void nokia770_audio_pwr_down(void) { diff --git a/arch/arm/mach-omap1/leds-osk.c b/arch/arm/mach-omap1/leds-osk.c index 3b29e59b0e6f..0cbf1b0071f8 100644 --- a/arch/arm/mach-omap1/leds-osk.c +++ b/arch/arm/mach-omap1/leds-osk.c @@ -35,7 +35,7 @@ static u8 hw_led_state; static u8 tps_leds_change; -static void tps_work(void *unused) +static void tps_work(struct work_struct *unused) { for (;;) { u8 leds; @@ -61,7 +61,7 @@ static void tps_work(void *unused) } } -static DECLARE_WORK(work, tps_work, NULL); +static DECLARE_WORK(work, tps_work); #ifdef CONFIG_OMAP_OSK_MISTRAL diff --git a/arch/arm/mach-omap2/board-h4.c b/arch/arm/mach-omap2/board-h4.c index 26a95a642ad7..3b1ad1d981a3 100644 --- a/arch/arm/mach-omap2/board-h4.c +++ b/arch/arm/mach-omap2/board-h4.c @@ -206,7 +206,8 @@ static int h4_transceiver_mode(struct device *dev, int mode) cancel_delayed_work(&irda_config->gpio_expa); PREPARE_WORK(&irda_config->gpio_expa, set_trans_mode, &mode); - schedule_work(&irda_config->gpio_expa); +#error this is not permitted - mode is an argument variable + schedule_delayed_work(&irda_config->gpio_expa, 0); return 0; } diff --git a/arch/arm/mach-pxa/akita-ioexp.c b/arch/arm/mach-pxa/akita-ioexp.c index 1b398742ab56..12d2fe0ceff6 100644 --- a/arch/arm/mach-pxa/akita-ioexp.c +++ b/arch/arm/mach-pxa/akita-ioexp.c @@ -36,11 +36,11 @@ I2C_CLIENT_INSMOD; static int max7310_write(struct i2c_client *client, int address, int data); static struct i2c_client max7310_template; -static void akita_ioexp_work(void *private_); +static void akita_ioexp_work(struct work_struct *private_); static struct device *akita_ioexp_device; static unsigned char ioexp_output_value = AKITA_IOEXP_IO_OUT; -DECLARE_WORK(akita_ioexp, akita_ioexp_work, NULL); +DECLARE_WORK(akita_ioexp, akita_ioexp_work); /* @@ -158,7 +158,7 @@ void akita_reset_ioexp(struct device *dev, unsigned char bit) EXPORT_SYMBOL(akita_set_ioexp); EXPORT_SYMBOL(akita_reset_ioexp); -static void akita_ioexp_work(void *private_) +static void akita_ioexp_work(struct work_struct *private_) { if (akita_ioexp_device) max7310_set_ouputs(akita_ioexp_device, ioexp_output_value); diff --git a/arch/i386/kernel/cpu/mcheck/non-fatal.c b/arch/i386/kernel/cpu/mcheck/non-fatal.c index 1f9153ae5b03..6b5d3518a1c0 100644 --- a/arch/i386/kernel/cpu/mcheck/non-fatal.c +++ b/arch/i386/kernel/cpu/mcheck/non-fatal.c @@ -51,10 +51,10 @@ static void mce_checkregs (void *info) } } -static void mce_work_fn(void *data); -static DECLARE_WORK(mce_work, mce_work_fn, NULL); +static void mce_work_fn(struct work_struct *work); +static DECLARE_DELAYED_WORK(mce_work, mce_work_fn); -static void mce_work_fn(void *data) +static void mce_work_fn(struct work_struct *work) { on_each_cpu(mce_checkregs, NULL, 1, 1); schedule_delayed_work(&mce_work, MCE_RATE); diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c index 4bb8b77cd65b..02a9b66b6ac3 100644 --- a/arch/i386/kernel/smpboot.c +++ b/arch/i386/kernel/smpboot.c @@ -1049,13 +1049,15 @@ void cpu_exit_clear(void) struct warm_boot_cpu_info { struct completion *complete; + struct work_struct task; int apicid; int cpu; }; -static void __cpuinit do_warm_boot_cpu(void *p) +static void __cpuinit do_warm_boot_cpu(struct work_struct *work) { - struct warm_boot_cpu_info *info = p; + struct warm_boot_cpu_info *info = + container_of(work, struct warm_boot_cpu_info, task); do_boot_cpu(info->apicid, info->cpu); complete(info->complete); } @@ -1064,7 +1066,6 @@ static int __cpuinit __smp_prepare_cpu(int cpu) { DECLARE_COMPLETION_ONSTACK(done); struct warm_boot_cpu_info info; - struct work_struct task; int apicid, ret; struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu); @@ -1089,7 +1090,7 @@ static int __cpuinit __smp_prepare_cpu(int cpu) info.complete = &done; info.apicid = apicid; info.cpu = cpu; - INIT_WORK(&task, do_warm_boot_cpu, &info); + INIT_WORK(&info.task, do_warm_boot_cpu); tsc_sync_disabled = 1; @@ -1097,7 +1098,7 @@ static int __cpuinit __smp_prepare_cpu(int cpu) clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS, KERNEL_PGD_PTRS); flush_tlb_all(); - schedule_work(&task); + schedule_work(&info.task); wait_for_completion(&done); tsc_sync_disabled = 0; diff --git a/arch/i386/kernel/tsc.c b/arch/i386/kernel/tsc.c index fbc95828cd74..9810c8c90750 100644 --- a/arch/i386/kernel/tsc.c +++ b/arch/i386/kernel/tsc.c @@ -217,7 +217,7 @@ static unsigned int cpufreq_delayed_issched = 0; static unsigned int cpufreq_init = 0; static struct work_struct cpufreq_delayed_get_work; -static void handle_cpufreq_delayed_get(void *v) +static void handle_cpufreq_delayed_get(struct work_struct *work) { unsigned int cpu; @@ -306,7 +306,7 @@ static int __init cpufreq_tsc(void) { int ret; - INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get, NULL); + INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get); ret = cpufreq_register_notifier(&time_cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER); if (!ret) diff --git a/arch/ia64/hp/sim/simserial.c b/arch/ia64/hp/sim/simserial.c index caab986af70c..b62f0c4d2c7c 100644 --- a/arch/ia64/hp/sim/simserial.c +++ b/arch/ia64/hp/sim/simserial.c @@ -209,7 +209,7 @@ static void do_serial_bh(void) } #endif -static void do_softint(void *private_) +static void do_softint(struct work_struct *private_) { printk(KERN_ERR "simserial: do_softint called\n"); } @@ -698,7 +698,7 @@ static int get_async_struct(int line, struct async_struct **ret_info) info->flags = sstate->flags; info->xmit_fifo_size = sstate->xmit_fifo_size; info->line = line; - INIT_WORK(&info->work, do_softint, info); + INIT_WORK(&info->work, do_softint); info->state = sstate; if (sstate->info) { kfree(info); diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 7cfa63a98cb3..6bedd97570ca 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -678,7 +678,7 @@ ia64_mca_cmc_vector_enable (void *dummy) * disable the cmc interrupt vector. */ static void -ia64_mca_cmc_vector_disable_keventd(void *unused) +ia64_mca_cmc_vector_disable_keventd(struct work_struct *unused) { on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 1, 0); } @@ -690,7 +690,7 @@ ia64_mca_cmc_vector_disable_keventd(void *unused) * enable the cmc interrupt vector. */ static void -ia64_mca_cmc_vector_enable_keventd(void *unused) +ia64_mca_cmc_vector_enable_keventd(struct work_struct *unused) { on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 1, 0); } @@ -1247,8 +1247,8 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, monarch_cpu = -1; } -static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd, NULL); -static DECLARE_WORK(cmc_enable_work, ia64_mca_cmc_vector_enable_keventd, NULL); +static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd); +static DECLARE_WORK(cmc_enable_work, ia64_mca_cmc_vector_enable_keventd); /* * ia64_mca_cmc_int_handler diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index f7d7f5668144..b21ddecea943 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c @@ -463,15 +463,17 @@ struct pt_regs * __devinit idle_regs(struct pt_regs *regs) } struct create_idle { + struct work_struct work; struct task_struct *idle; struct completion done; int cpu; }; void -do_fork_idle(void *_c_idle) +do_fork_idle(struct work_struct *work) { - struct create_idle *c_idle = _c_idle; + struct create_idle *c_idle = + container_of(work, struct create_idle, work); c_idle->idle = fork_idle(c_idle->cpu); complete(&c_idle->done); @@ -482,10 +484,10 @@ do_boot_cpu (int sapicid, int cpu) { int timeout; struct create_idle c_idle = { + .work = __WORK_INITIALIZER(c_idle.work, do_fork_idle), .cpu = cpu, .done = COMPLETION_INITIALIZER(c_idle.done), }; - DECLARE_WORK(work, do_fork_idle, &c_idle); c_idle.idle = get_idle_for_cpu(cpu); if (c_idle.idle) { @@ -497,9 +499,9 @@ do_boot_cpu (int sapicid, int cpu) * We can't use kernel_thread since we must avoid to reschedule the child. */ if (!keventd_up() || current_is_keventd()) - work.func(work.data); + c_idle.work.func(&c_idle.work); else { - schedule_work(&work); + schedule_work(&c_idle.work); wait_for_completion(&c_idle.done); } diff --git a/arch/mips/kernel/kspd.c b/arch/mips/kernel/kspd.c index f06a144c7881..2c82412b9efe 100644 --- a/arch/mips/kernel/kspd.c +++ b/arch/mips/kernel/kspd.c @@ -319,7 +319,7 @@ static void sp_cleanup(void) static int channel_open = 0; /* the work handler */ -static void sp_work(void *data) +static void sp_work(struct work_struct *unused) { if (!channel_open) { if( rtlx_open(RTLX_CHANNEL_SYSIO, 1) != 0) { @@ -354,7 +354,7 @@ static void startwork(int vpe) return; } - INIT_WORK(&work, sp_work, NULL); + INIT_WORK(&work, sp_work); queue_work(workqueue, &work); } else queue_work(workqueue, &work); diff --git a/arch/powerpc/platforms/embedded6xx/ls_uart.c b/arch/powerpc/platforms/embedded6xx/ls_uart.c index 31bcdae84823..0e837762cc5b 100644 --- a/arch/powerpc/platforms/embedded6xx/ls_uart.c +++ b/arch/powerpc/platforms/embedded6xx/ls_uart.c @@ -14,7 +14,7 @@ static unsigned long avr_clock; static struct work_struct wd_work; -static void wd_stop(void *unused) +static void wd_stop(struct work_struct *unused) { const char string[] = "AAAAFFFFJJJJ>>>>VVVV>>>>ZZZZVVVVKKKK"; int i = 0, rescue = 8; @@ -122,7 +122,7 @@ static int __init ls_uarts_init(void) ls_uart_init(); - INIT_WORK(&wd_work, wd_stop, NULL); + INIT_WORK(&wd_work, wd_stop); schedule_work(&wd_work); return 0; diff --git a/arch/powerpc/platforms/powermac/backlight.c b/arch/powerpc/platforms/powermac/backlight.c index afa593a8544a..c3a89414ddc0 100644 --- a/arch/powerpc/platforms/powermac/backlight.c +++ b/arch/powerpc/platforms/powermac/backlight.c @@ -18,11 +18,11 @@ #define OLD_BACKLIGHT_MAX 15 -static void pmac_backlight_key_worker(void *data); -static void pmac_backlight_set_legacy_worker(void *data); +static void pmac_backlight_key_worker(struct work_struct *work); +static void pmac_backlight_set_legacy_worker(struct work_struct *work); -static DECLARE_WORK(pmac_backlight_key_work, pmac_backlight_key_worker, NULL); -static DECLARE_WORK(pmac_backlight_set_legacy_work, pmac_backlight_set_legacy_worker, NULL); +static DECLARE_WORK(pmac_backlight_key_work, pmac_backlight_key_worker); +static DECLARE_WORK(pmac_backlight_set_legacy_work, pmac_backlight_set_legacy_worker); /* Although these variables are used in interrupt context, it makes no sense to * protect them. No user is able to produce enough key events per second and @@ -94,7 +94,7 @@ int pmac_backlight_curve_lookup(struct fb_info *info, int value) return level; } -static void pmac_backlight_key_worker(void *data) +static void pmac_backlight_key_worker(struct work_struct *work) { if (atomic_read(&kernel_backlight_disabled)) return; @@ -166,7 +166,7 @@ static int __pmac_backlight_set_legacy_brightness(int brightness) return error; } -static void pmac_backlight_set_legacy_worker(void *data) +static void pmac_backlight_set_legacy_worker(struct work_struct *work) { if (atomic_read(&kernel_backlight_disabled)) return; diff --git a/arch/powerpc/platforms/pseries/eeh_event.c b/arch/powerpc/platforms/pseries/eeh_event.c index 137077451316..49037edf7d39 100644 --- a/arch/powerpc/platforms/pseries/eeh_event.c +++ b/arch/powerpc/platforms/pseries/eeh_event.c @@ -37,8 +37,8 @@ /* EEH event workqueue setup. */ static DEFINE_SPINLOCK(eeh_eventlist_lock); LIST_HEAD(eeh_eventlist); -static void eeh_thread_launcher(void *); -DECLARE_WORK(eeh_event_wq, eeh_thread_launcher, NULL); +static void eeh_thread_launcher(struct work_struct *); +DECLARE_WORK(eeh_event_wq, eeh_thread_launcher); /* Serialize reset sequences for a given pci device */ DEFINE_MUTEX(eeh_event_mutex); @@ -103,7 +103,7 @@ static int eeh_event_handler(void * dummy) * eeh_thread_launcher * @dummy - unused */ -static void eeh_thread_launcher(void *dummy) +static void eeh_thread_launcher(struct work_struct *dummy) { if (kernel_thread(eeh_event_handler, NULL, CLONE_KERNEL) < 0) printk(KERN_ERR "Failed to start EEH daemon\n"); diff --git a/arch/ppc/8260_io/fcc_enet.c b/arch/ppc/8260_io/fcc_enet.c index 2e1943e27819..709952c25f29 100644 --- a/arch/ppc/8260_io/fcc_enet.c +++ b/arch/ppc/8260_io/fcc_enet.c @@ -385,6 +385,7 @@ struct fcc_enet_private { phy_info_t *phy; struct work_struct phy_relink; struct work_struct phy_display_config; + struct net_device *dev; uint sequence_done; @@ -1391,10 +1392,11 @@ static phy_info_t *phy_info[] = { NULL }; -static void mii_display_status(void *data) +static void mii_display_status(struct work_struct *work) { - struct net_device *dev = data; - volatile struct fcc_enet_private *fep = dev->priv; + volatile struct fcc_enet_private *fep = + container_of(work, struct fcc_enet_private, phy_relink); + struct net_device *dev = fep->dev; uint s = fep->phy_status; if (!fep->link && !fep->old_link) { @@ -1428,10 +1430,12 @@ static void mii_display_status(void *data) printk(".\n"); } -static void mii_display_config(void *data) +static void mii_display_config(struct work_struct *work) { - struct net_device *dev = data; - volatile struct fcc_enet_private *fep = dev->priv; + volatile struct fcc_enet_private *fep = + container_of(work, struct fcc_enet_private, + phy_display_config); + struct net_device *dev = fep->dev; uint s = fep->phy_status; printk("%s: config: auto-negotiation ", dev->name); @@ -1758,8 +1762,9 @@ static int __init fec_enet_init(void) cep->phy_id_done = 0; cep->phy_addr = fip->fc_phyaddr; mii_queue(dev, mk_mii_read(MII_PHYSID1), mii_discover_phy); - INIT_WORK(&cep->phy_relink, mii_display_status, dev); - INIT_WORK(&cep->phy_display_config, mii_display_config, dev); + INIT_WORK(&cep->phy_relink, mii_display_status); + INIT_WORK(&cep->phy_display_config, mii_display_config); + cep->dev = dev; #endif /* CONFIG_USE_MDIO */ fip++; diff --git a/arch/ppc/8xx_io/fec.c b/arch/ppc/8xx_io/fec.c index 2f9fa9e3d331..e6c28fb423b2 100644 --- a/arch/ppc/8xx_io/fec.c +++ b/arch/ppc/8xx_io/fec.c @@ -173,6 +173,7 @@ struct fec_enet_private { uint phy_speed; phy_info_t *phy; struct work_struct phy_task; + struct net_device *dev; uint sequence_done; @@ -1263,10 +1264,11 @@ static void mii_display_status(struct net_device *dev) printk(".\n"); } -static void mii_display_config(void *priv) +static void mii_display_config(struct work_struct *work) { - struct net_device *dev = (struct net_device *)priv; - struct fec_enet_private *fep = dev->priv; + struct fec_enet_private *fep = + container_of(work, struct fec_enet_private, phy_task); + struct net_device *dev = fep->dev; volatile uint *s = &(fep->phy_status); printk("%s: config: auto-negotiation ", dev->name); @@ -1295,10 +1297,11 @@ static void mii_display_config(void *priv) fep->sequence_done = 1; } -static void mii_relink(void *priv) +static void mii_relink(struct work_struct *work) { - struct net_device *dev = (struct net_device *)priv; - struct fec_enet_private *fep = dev->priv; + struct fec_enet_private *fep = + container_of(work, struct fec_enet_private, phy_task); + struct net_device *dev = fep->dev; int duplex; fep->link = (fep->phy_status & PHY_STAT_LINK) ? 1 : 0; @@ -1325,7 +1328,8 @@ static void mii_queue_relink(uint mii_reg, struct net_device *dev) { struct fec_enet_private *fep = dev->priv; - INIT_WORK(&fep->phy_task, mii_relink, (void *)dev); + fep->dev = dev; + INIT_WORK(&fep->phy_task, mii_relink); schedule_work(&fep->phy_task); } @@ -1333,7 +1337,8 @@ static void mii_queue_config(uint mii_reg, struct net_device *dev) { struct fec_enet_private *fep = dev->priv; - INIT_WORK(&fep->phy_task, mii_display_config, (void *)dev); + fep->dev = dev; + INIT_WORK(&fep->phy_task, mii_display_config); schedule_work(&fep->phy_task); } diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c index af1e8fc7d985..67d5cf9cba83 100644 --- a/arch/s390/appldata/appldata_base.c +++ b/arch/s390/appldata/appldata_base.c @@ -92,8 +92,8 @@ static int appldata_timer_active; * Work queue */ static struct workqueue_struct *appldata_wq; -static void appldata_work_fn(void *data); -static DECLARE_WORK(appldata_work, appldata_work_fn, NULL); +static void appldata_work_fn(struct work_struct *work); +static DECLARE_WORK(appldata_work, appldata_work_fn); /* @@ -125,7 +125,7 @@ static void appldata_timer_function(unsigned long data) * * call data gathering function for each (active) module */ -static void appldata_work_fn(void *data) +static void appldata_work_fn(struct work_struct *work) { struct list_head *lh; struct appldata_ops *ops; diff --git a/arch/um/drivers/chan_kern.c b/arch/um/drivers/chan_kern.c index 3576b3cc505e..7d4190e55654 100644 --- a/arch/um/drivers/chan_kern.c +++ b/arch/um/drivers/chan_kern.c @@ -638,7 +638,7 @@ int chan_out_fd(struct list_head *chans) return -1; } -void chan_interrupt(struct list_head *chans, struct work_struct *task, +void chan_interrupt(struct list_head *chans, struct delayed_work *task, struct tty_struct *tty, int irq) { struct list_head *ele, *next; diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c index 7b172160fe04..96f0189327af 100644 --- a/arch/um/drivers/mconsole_kern.c +++ b/arch/um/drivers/mconsole_kern.c @@ -56,7 +56,7 @@ static struct notifier_block reboot_notifier = { static LIST_HEAD(mc_requests); -static void mc_work_proc(void *unused) +static void mc_work_proc(struct work_struct *unused) { struct mconsole_entry *req; unsigned long flags; @@ -72,7 +72,7 @@ static void mc_work_proc(void *unused) } } -static DECLARE_WORK(mconsole_work, mc_work_proc, NULL); +static DECLARE_WORK(mconsole_work, mc_work_proc); static irqreturn_t mconsole_interrupt(int irq, void *dev_id) { diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c index ec9eb8bd9432..286bc0b3207f 100644 --- a/arch/um/drivers/net_kern.c +++ b/arch/um/drivers/net_kern.c @@ -99,6 +99,7 @@ irqreturn_t uml_net_interrupt(int irq, void *dev_id) * same device, since it tests for (dev->flags & IFF_UP). So * there's no harm in delaying the device shutdown. */ schedule_work(&close_work); +#error this is not permitted - close_work will go out of scope goto out; } reactivate_fd(lp->fd, UM_ETH_IRQ); diff --git a/arch/um/drivers/port_kern.c b/arch/um/drivers/port_kern.c index ce9f3733f73e..6dfe632f1c14 100644 --- a/arch/um/drivers/port_kern.c +++ b/arch/um/drivers/port_kern.c @@ -132,7 +132,7 @@ static int port_accept(struct port_list *port) DECLARE_MUTEX(ports_sem); struct list_head ports = LIST_HEAD_INIT(ports); -void port_work_proc(void *unused) +void port_work_proc(struct work_struct *unused) { struct port_list *port; struct list_head *ele; @@ -150,7 +150,7 @@ void port_work_proc(void *unused) local_irq_restore(flags); } -DECLARE_WORK(port_work, port_work_proc, NULL); +DECLARE_WORK(port_work, port_work_proc); static irqreturn_t port_interrupt(int irq, void *data) { diff --git a/arch/x86_64/kernel/mce.c b/arch/x86_64/kernel/mce.c index bbea88801d88..c7587fc39015 100644 --- a/arch/x86_64/kernel/mce.c +++ b/arch/x86_64/kernel/mce.c @@ -306,8 +306,8 @@ void mce_log_therm_throt_event(unsigned int cpu, __u64 status) */ static int check_interval = 5 * 60; /* 5 minutes */ -static void mcheck_timer(void *data); -static DECLARE_WORK(mcheck_work, mcheck_timer, NULL); +static void mcheck_timer(struct work_struct *work); +static DECLARE_DELAYED_WORK(mcheck_work, mcheck_timer); static void mcheck_check_cpu(void *info) { @@ -315,7 +315,7 @@ static void mcheck_check_cpu(void *info) do_machine_check(NULL, 0); } -static void mcheck_timer(void *data) +static void mcheck_timer(struct work_struct *work) { on_each_cpu(mcheck_check_cpu, NULL, 1, 1); schedule_delayed_work(&mcheck_work, check_interval * HZ); diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c index 62c2e747af58..9800147c4c68 100644 --- a/arch/x86_64/kernel/smpboot.c +++ b/arch/x86_64/kernel/smpboot.c @@ -753,14 +753,16 @@ static int __cpuinit wakeup_secondary_via_INIT(int phys_apicid, unsigned int sta } struct create_idle { + struct work_struct work; struct task_struct *idle; struct completion done; int cpu; }; -void do_fork_idle(void *_c_idle) +void do_fork_idle(struct work_struct *work) { - struct create_idle *c_idle = _c_idle; + struct create_idle *c_idle = + container_of(work, struct create_idle, work); c_idle->idle = fork_idle(c_idle->cpu); complete(&c_idle->done); @@ -775,10 +777,10 @@ static int __cpuinit do_boot_cpu(int cpu, int apicid) int timeout; unsigned long start_rip; struct create_idle c_idle = { + .work = __WORK_INITIALIZER(c_idle.work, do_fork_idle), .cpu = cpu, .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), }; - DECLARE_WORK(work, do_fork_idle, &c_idle); /* allocate memory for gdts of secondary cpus. Hotplug is considered */ if (!cpu_gdt_descr[cpu].address && @@ -825,9 +827,9 @@ static int __cpuinit do_boot_cpu(int cpu, int apicid) * thread. */ if (!keventd_up() || current_is_keventd()) - work.func(work.data); + c_idle.work.func(&c_idle.work); else { - schedule_work(&work); + schedule_work(&c_idle.work); wait_for_completion(&c_idle.done); } diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c index e3ef544d2cfb..9f05bc9b2dad 100644 --- a/arch/x86_64/kernel/time.c +++ b/arch/x86_64/kernel/time.c @@ -563,7 +563,7 @@ static unsigned int cpufreq_delayed_issched = 0; static unsigned int cpufreq_init = 0; static struct work_struct cpufreq_delayed_get_work; -static void handle_cpufreq_delayed_get(void *v) +static void handle_cpufreq_delayed_get(struct work_struct *v) { unsigned int cpu; for_each_online_cpu(cpu) { @@ -639,7 +639,7 @@ static struct notifier_block time_cpufreq_notifier_block = { static int __init cpufreq_tsc(void) { - INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get, NULL); + INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get); if (!cpufreq_register_notifier(&time_cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER)) cpufreq_init = 1; |