diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-02-21 17:54:03 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-02-21 17:54:03 -0800 |
commit | 81ec44a6c69342fec1b1140c60a604027e429f69 (patch) | |
tree | ee6bec8a94ef28e111bf766cf4b7a9366cb4f7c1 /drivers/s390 | |
parent | 48a732dfaa77a4dfec803aa8f248373998704f76 (diff) | |
parent | e80cfc31d872b6b85b8966bce6ba80bee401a7dd (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull s390 update from Martin Schwidefsky:
"The most prominent change in this patch set is the software dirty bit
patch for s390. It removes __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY and
the page_test_and_clear_dirty primitive which makes the common memory
management code a bit less obscure.
Heiko fixed most of the PCI related fallout, more often than not
missing GENERIC_HARDIRQS dependencies. Notable is one of the 3270
patches which adds an export to tty_io to be able to resize a tty.
The rest is the usual bunch of cleanups and bug fixes."
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (42 commits)
s390/module: Add missing R_390_NONE relocation type
drivers/gpio: add missing GENERIC_HARDIRQ dependency
drivers/input: add couple of missing GENERIC_HARDIRQS dependencies
s390/cleanup: rename SPP to LPP
s390/mm: implement software dirty bits
s390/mm: Fix crst upgrade of mmap with MAP_FIXED
s390/linker skript: discard exit.data at runtime
drivers/media: add missing GENERIC_HARDIRQS dependency
s390/bpf,jit: add vlan tag support
drivers/net,AT91RM9200: add missing GENERIC_HARDIRQS dependency
iucv: fix kernel panic at reboot
s390/Kconfig: sort list of arch selected config options
phylib: remove !S390 dependeny from Kconfig
uio: remove !S390 dependency from Kconfig
dasd: fix sysfs cleanup in dasd_generic_remove
s390/pci: fix hotplug module init
s390/pci: cleanup clp page allocation
s390/pci: cleanup clp inline assembly
s390/perf: cpum_cf: fallback to software sampling events
s390/mm: provide PAGE_SHARED define
...
Diffstat (limited to 'drivers/s390')
29 files changed, 741 insertions, 540 deletions
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 29225e1c159c..f1b7fdc58a5f 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -1352,7 +1352,7 @@ int dasd_term_IO(struct dasd_ccw_req *cqr) switch (rc) { case 0: /* termination successful */ cqr->status = DASD_CQR_CLEAR_PENDING; - cqr->stopclk = get_clock(); + cqr->stopclk = get_tod_clock(); cqr->starttime = 0; DBF_DEV_EVENT(DBF_DEBUG, device, "terminate cqr %p successful", @@ -1420,7 +1420,7 @@ int dasd_start_IO(struct dasd_ccw_req *cqr) cqr->status = DASD_CQR_ERROR; return -EIO; } - cqr->startclk = get_clock(); + cqr->startclk = get_tod_clock(); cqr->starttime = jiffies; cqr->retries--; if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { @@ -1623,7 +1623,7 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, return; } - now = get_clock(); + now = get_tod_clock(); cqr = (struct dasd_ccw_req *) intparm; /* check for conditions that should be handled immediately */ if (!cqr || @@ -1963,7 +1963,7 @@ int dasd_flush_device_queue(struct dasd_device *device) } break; case DASD_CQR_QUEUED: - cqr->stopclk = get_clock(); + cqr->stopclk = get_tod_clock(); cqr->status = DASD_CQR_CLEARED; break; default: /* no need to modify the others */ @@ -2210,7 +2210,7 @@ static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible) wait_event(generic_waitq, _wait_for_wakeup(cqr)); } - maincqr->endclk = get_clock(); + maincqr->endclk = get_tod_clock(); if ((maincqr->status != DASD_CQR_DONE) && (maincqr->intrc != -ERESTARTSYS)) dasd_log_sense(maincqr, &maincqr->irb); @@ -2340,7 +2340,7 @@ int dasd_cancel_req(struct dasd_ccw_req *cqr) "Cancelling request %p failed with rc=%d\n", cqr, rc); } else { - cqr->stopclk = get_clock(); + cqr->stopclk = get_tod_clock(); } break; default: /* already finished or clear pending - do nothing */ @@ -2568,7 +2568,7 @@ restart: } /* Rechain finished requests to final queue */ - cqr->endclk = get_clock(); + cqr->endclk = get_tod_clock(); list_move_tail(&cqr->blocklist, final_queue); } } @@ -2711,7 +2711,7 @@ restart_cb: } /* call the callback function */ spin_lock_irq(&block->request_queue_lock); - cqr->endclk = get_clock(); + cqr->endclk = get_tod_clock(); list_del_init(&cqr->blocklist); __dasd_cleanup_cqr(cqr); spin_unlock_irq(&block->request_queue_lock); @@ -3042,12 +3042,15 @@ void dasd_generic_remove(struct ccw_device *cdev) cdev->handler = NULL; device = dasd_device_from_cdev(cdev); - if (IS_ERR(device)) + if (IS_ERR(device)) { + dasd_remove_sysfs_files(cdev); return; + } if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags) && !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { /* Already doing offline processing */ dasd_put_device(device); + dasd_remove_sysfs_files(cdev); return; } /* @@ -3504,7 +3507,7 @@ static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device, cqr->memdev = device; cqr->expires = 10*HZ; cqr->retries = 256; - cqr->buildclk = get_clock(); + cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; return cqr; } diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c index f8212d54013a..d26134713682 100644 --- a/drivers/s390/block/dasd_3990_erp.c +++ b/drivers/s390/block/dasd_3990_erp.c @@ -229,7 +229,7 @@ dasd_3990_erp_DCTL(struct dasd_ccw_req * erp, char modifier) dctl_cqr->expires = 5 * 60 * HZ; dctl_cqr->retries = 2; - dctl_cqr->buildclk = get_clock(); + dctl_cqr->buildclk = get_tod_clock(); dctl_cqr->status = DASD_CQR_FILLED; @@ -1719,7 +1719,7 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense) erp->magic = default_erp->magic; erp->expires = default_erp->expires; erp->retries = 256; - erp->buildclk = get_clock(); + erp->buildclk = get_tod_clock(); erp->status = DASD_CQR_FILLED; /* remove the default erp */ @@ -2322,7 +2322,7 @@ static struct dasd_ccw_req *dasd_3990_erp_add_erp(struct dasd_ccw_req *cqr) DBF_DEV_EVENT(DBF_ERR, device, "%s", "Unable to allocate ERP request"); cqr->status = DASD_CQR_FAILED; - cqr->stopclk = get_clock (); + cqr->stopclk = get_tod_clock(); } else { DBF_DEV_EVENT(DBF_ERR, device, "Unable to allocate ERP request " @@ -2364,7 +2364,7 @@ static struct dasd_ccw_req *dasd_3990_erp_add_erp(struct dasd_ccw_req *cqr) erp->magic = cqr->magic; erp->expires = cqr->expires; erp->retries = 256; - erp->buildclk = get_clock(); + erp->buildclk = get_tod_clock(); erp->status = DASD_CQR_FILLED; return erp; diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c index 6b556995bb33..a2597e683e79 100644 --- a/drivers/s390/block/dasd_alias.c +++ b/drivers/s390/block/dasd_alias.c @@ -448,7 +448,7 @@ static int read_unit_address_configuration(struct dasd_device *device, ccw->count = sizeof(*(lcu->uac)); ccw->cda = (__u32)(addr_t) lcu->uac; - cqr->buildclk = get_clock(); + cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; /* need to unset flag here to detect race with summary unit check */ @@ -733,7 +733,7 @@ static int reset_summary_unit_check(struct alias_lcu *lcu, cqr->memdev = device; cqr->block = NULL; cqr->expires = 5 * HZ; - cqr->buildclk = get_clock(); + cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; rc = dasd_sleep_on_immediatly(cqr); diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c index 704488d0f819..cc0603358522 100644 --- a/drivers/s390/block/dasd_diag.c +++ b/drivers/s390/block/dasd_diag.c @@ -184,14 +184,14 @@ dasd_start_diag(struct dasd_ccw_req * cqr) private->iob.bio_list = dreq->bio; private->iob.flaga = DASD_DIAG_FLAGA_DEFAULT; - cqr->startclk = get_clock(); + cqr->startclk = get_tod_clock(); cqr->starttime = jiffies; cqr->retries--; rc = dia250(&private->iob, RW_BIO); switch (rc) { case 0: /* Synchronous I/O finished successfully */ - cqr->stopclk = get_clock(); + cqr->stopclk = get_tod_clock(); cqr->status = DASD_CQR_SUCCESS; /* Indicate to calling function that only a dasd_schedule_bh() and no timer is needed */ @@ -222,7 +222,7 @@ dasd_diag_term_IO(struct dasd_ccw_req * cqr) mdsk_term_io(device); mdsk_init_io(device, device->block->bp_block, 0, NULL); cqr->status = DASD_CQR_CLEAR_PENDING; - cqr->stopclk = get_clock(); + cqr->stopclk = get_tod_clock(); dasd_schedule_device_bh(device); return 0; } @@ -276,7 +276,7 @@ static void dasd_ext_handler(struct ext_code ext_code, return; } - cqr->stopclk = get_clock(); + cqr->stopclk = get_tod_clock(); expires = 0; if ((ext_code.subcode & 0xff) == 0) { @@ -556,7 +556,7 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev, } } cqr->retries = DIAG_MAX_RETRIES; - cqr->buildclk = get_clock(); + cqr->buildclk = get_tod_clock(); if (blk_noretry_request(req) || block->base->features & DASD_FEATURE_FAILFAST) set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index e37bc1620d14..33f26bfa62f2 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -862,7 +862,7 @@ static void dasd_eckd_fill_rcd_cqr(struct dasd_device *device, cqr->expires = 10*HZ; cqr->lpm = lpm; cqr->retries = 256; - cqr->buildclk = get_clock(); + cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags); } @@ -1449,7 +1449,7 @@ static int dasd_eckd_read_features(struct dasd_device *device) ccw->count = sizeof(struct dasd_rssd_features); ccw->cda = (__u32)(addr_t) features; - cqr->buildclk = get_clock(); + cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; rc = dasd_sleep_on(cqr); if (rc == 0) { @@ -1501,7 +1501,7 @@ static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device, cqr->block = NULL; cqr->retries = 256; cqr->expires = 10*HZ; - cqr->buildclk = get_clock(); + cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; return cqr; } @@ -1841,7 +1841,7 @@ dasd_eckd_analysis_ccw(struct dasd_device *device) cqr->startdev = device; cqr->memdev = device; cqr->retries = 255; - cqr->buildclk = get_clock(); + cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; return cqr; } @@ -2241,7 +2241,7 @@ dasd_eckd_format_device(struct dasd_device * device, fcp->startdev = device; fcp->memdev = device; fcp->retries = 256; - fcp->buildclk = get_clock(); + fcp->buildclk = get_tod_clock(); fcp->status = DASD_CQR_FILLED; return fcp; } @@ -2530,7 +2530,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single( cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */ cqr->lpm = startdev->path_data.ppm; cqr->retries = 256; - cqr->buildclk = get_clock(); + cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; return cqr; } @@ -2705,7 +2705,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track( cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */ cqr->lpm = startdev->path_data.ppm; cqr->retries = 256; - cqr->buildclk = get_clock(); + cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; return cqr; } @@ -2998,7 +2998,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track( cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */ cqr->lpm = startdev->path_data.ppm; cqr->retries = 256; - cqr->buildclk = get_clock(); + cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; return cqr; out_error: @@ -3201,7 +3201,7 @@ static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev, cqr->expires = startdev->default_expires * HZ; cqr->lpm = startdev->path_data.ppm; cqr->retries = 256; - cqr->buildclk = get_clock(); + cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; if (IS_ERR(cqr) && PTR_ERR(cqr) != -EAGAIN) @@ -3402,7 +3402,7 @@ dasd_eckd_release(struct dasd_device *device) set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); cqr->retries = 2; /* set retry counter to enable basic ERP */ cqr->expires = 2 * HZ; - cqr->buildclk = get_clock(); + cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; rc = dasd_sleep_on_immediatly(cqr); @@ -3457,7 +3457,7 @@ dasd_eckd_reserve(struct dasd_device *device) set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); cqr->retries = 2; /* set retry counter to enable basic ERP */ cqr->expires = 2 * HZ; - cqr->buildclk = get_clock(); + cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; rc = dasd_sleep_on_immediatly(cqr); @@ -3511,7 +3511,7 @@ dasd_eckd_steal_lock(struct dasd_device *device) set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); cqr->retries = 2; /* set retry counter to enable basic ERP */ cqr->expires = 2 * HZ; - cqr->buildclk = get_clock(); + cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; rc = dasd_sleep_on_immediatly(cqr); @@ -3572,7 +3572,7 @@ static int dasd_eckd_snid(struct dasd_device *device, set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags); cqr->retries = 5; cqr->expires = 10 * HZ; - cqr->buildclk = get_clock(); + cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; cqr->lpm = usrparm.path_mask; @@ -3642,7 +3642,7 @@ dasd_eckd_performance(struct dasd_device *device, void __user *argp) ccw->count = sizeof(struct dasd_rssd_perf_stats_t); ccw->cda = (__u32)(addr_t) stats; - cqr->buildclk = get_clock(); + cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; rc = dasd_sleep_on(cqr); if (rc == 0) { @@ -3768,7 +3768,7 @@ static int dasd_symm_io(struct dasd_device *device, void __user *argp) cqr->memdev = device; cqr->retries = 3; cqr->expires = 10 * HZ; - cqr->buildclk = get_clock(); + cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; /* Build the ccws */ diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c index ff901b5509c1..21ef63cf0960 100644 --- a/drivers/s390/block/dasd_eer.c +++ b/drivers/s390/block/dasd_eer.c @@ -481,7 +481,7 @@ int dasd_eer_enable(struct dasd_device *device) ccw->flags = 0; ccw->cda = (__u32)(addr_t) cqr->data; - cqr->buildclk = get_clock(); + cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; cqr->callback = dasd_eer_snss_cb; diff --git a/drivers/s390/block/dasd_erp.c b/drivers/s390/block/dasd_erp.c index d01ef82f8757..3250cb471f78 100644 --- a/drivers/s390/block/dasd_erp.c +++ b/drivers/s390/block/dasd_erp.c @@ -102,7 +102,7 @@ dasd_default_erp_action(struct dasd_ccw_req *cqr) pr_err("%s: default ERP has run out of retries and failed\n", dev_name(&device->cdev->dev)); cqr->status = DASD_CQR_FAILED; - cqr->stopclk = get_clock(); + cqr->stopclk = get_tod_clock(); } return cqr; } /* end dasd_default_erp_action */ @@ -146,7 +146,7 @@ struct dasd_ccw_req *dasd_default_erp_postaction(struct dasd_ccw_req *cqr) cqr->status = DASD_CQR_DONE; else { cqr->status = DASD_CQR_FAILED; - cqr->stopclk = get_clock(); + cqr->stopclk = get_tod_clock(); } return cqr; diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c index 414698584344..4dd0e2f6047e 100644 --- a/drivers/s390/block/dasd_fba.c +++ b/drivers/s390/block/dasd_fba.c @@ -370,7 +370,7 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev, cqr->block = block; cqr->expires = memdev->default_expires * HZ; /* default 5 minutes */ cqr->retries = 32; - cqr->buildclk = get_clock(); + cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; return cqr; } diff --git a/drivers/s390/block/scm_blk.h b/drivers/s390/block/scm_blk.h index 7ac6bad919ef..3c1ccf494647 100644 --- a/drivers/s390/block/scm_blk.h +++ b/drivers/s390/block/scm_blk.h @@ -68,19 +68,34 @@ void scm_initiate_cluster_request(struct scm_request *); void scm_cluster_request_irq(struct scm_request *); bool scm_test_cluster_request(struct scm_request *); bool scm_cluster_size_valid(void); -#else -#define __scm_free_rq_cluster(scmrq) {} -#define __scm_alloc_rq_cluster(scmrq) 0 -#define scm_request_cluster_init(scmrq) {} -#define scm_reserve_cluster(scmrq) true -#define scm_release_cluster(scmrq) {} -#define scm_blk_dev_cluster_setup(bdev) {} -#define scm_need_cluster_request(scmrq) false -#define scm_initiate_cluster_request(scmrq) {} -#define scm_cluster_request_irq(scmrq) {} -#define scm_test_cluster_request(scmrq) false -#define scm_cluster_size_valid() true -#endif +#else /* CONFIG_SCM_BLOCK_CLUSTER_WRITE */ +static inline void __scm_free_rq_cluster(struct scm_request *scmrq) {} +static inline int __scm_alloc_rq_cluster(struct scm_request *scmrq) +{ + return 0; +} +static inline void scm_request_cluster_init(struct scm_request *scmrq) {} +static inline bool scm_reserve_cluster(struct scm_request *scmrq) +{ + return true; +} +static inline void scm_release_cluster(struct scm_request *scmrq) {} +static inline void scm_blk_dev_cluster_setup(struct scm_blk_dev *bdev) {} +static inline bool scm_need_cluster_request(struct scm_request *scmrq) +{ + return false; +} +static inline void scm_initiate_cluster_request(struct scm_request *scmrq) {} +static inline void scm_cluster_request_irq(struct scm_request *scmrq) {} +static inline bool scm_test_cluster_request(struct scm_request *scmrq) +{ + return false; +} +static inline bool scm_cluster_size_valid(void) +{ + return true; +} +#endif /* CONFIG_SCM_BLOCK_CLUSTER_WRITE */ extern debug_info_t *scm_debug; diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c index 911704571b9c..230697aac94b 100644 --- a/drivers/s390/char/fs3270.c +++ b/drivers/s390/char/fs3270.c @@ -443,7 +443,7 @@ fs3270_open(struct inode *inode, struct file *filp) tty_kref_put(tty); return -ENODEV; } - minor = tty->index + RAW3270_FIRSTMINOR; + minor = tty->index; tty_kref_put(tty); } mutex_lock(&fs3270_mutex); @@ -524,6 +524,25 @@ static const struct file_operations fs3270_fops = { .llseek = no_llseek, }; +void fs3270_create_cb(int minor) +{ + __register_chrdev(IBM_FS3270_MAJOR, minor, 1, "tub", &fs3270_fops); + device_create(class3270, NULL, MKDEV(IBM_FS3270_MAJOR, minor), + NULL, "3270/tub%d", minor); +} + +void fs3270_destroy_cb(int minor) +{ + device_destroy(class3270, MKDEV(IBM_FS3270_MAJOR, minor)); + __unregister_chrdev(IBM_FS3270_MAJOR, minor, 1, "tub"); +} + +struct raw3270_notifier fs3270_notifier = +{ + .create = fs3270_create_cb, + .destroy = fs3270_destroy_cb, +}; + /* * 3270 fullscreen driver initialization. */ @@ -532,16 +551,20 @@ fs3270_init(void) { int rc; - rc = register_chrdev(IBM_FS3270_MAJOR, "fs3270", &fs3270_fops); + rc = __register_chrdev(IBM_FS3270_MAJOR, 0, 1, "fs3270", &fs3270_fops); if (rc) return rc; + device_create(class3270, NULL, MKDEV(IBM_FS3270_MAJOR, 0), + NULL, "3270/tub"); + raw3270_register_notifier(&fs3270_notifier); return 0; } static void __exit fs3270_exit(void) { - unregister_chrdev(IBM_FS3270_MAJOR, "fs3270"); + raw3270_unregister_notifier(&fs3270_notifier); + __unregister_chrdev(IBM_FS3270_MAJOR, 0, 1, "fs3270"); } MODULE_LICENSE("GPL"); diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c index 9a6c140c5f07..4c9030a5b9f2 100644 --- a/drivers/s390/char/raw3270.c +++ b/drivers/s390/char/raw3270.c @@ -28,7 +28,7 @@ #include <linux/device.h> #include <linux/mutex.h> -static struct class *class3270; +struct class *class3270; /* The main 3270 data structure. */ struct raw3270 { @@ -37,6 +37,7 @@ struct raw3270 { int minor; short model, rows, cols; + unsigned int state; unsigned long flags; struct list_head req_queue; /* Request queue. */ @@ -46,20 +47,26 @@ struct raw3270 { struct timer_list timer; /* Device timer. */ unsigned char *ascebc; /* ascii -> ebcdic table */ - struct device *clttydev; /* 3270-class tty device ptr */ - struct device *cltubdev; /* 3270-class tub device ptr */ - struct raw3270_request init_request; + struct raw3270_view init_view; + struct raw3270_request init_reset; + struct raw3270_request init_readpart; + struct raw3270_request init_readmod; unsigned char init_data[256]; }; +/* raw3270->state */ +#define RAW3270_STATE_INIT 0 /* Initial state */ +#define RAW3270_STATE_RESET 1 /* Reset command is pending */ +#define RAW3270_STATE_W4ATTN 2 /* Wait for attention interrupt */ +#define RAW3270_STATE_READMOD 3 /* Read partition is pending */ +#define RAW3270_STATE_READY 4 /* Device is usable by views */ + /* raw3270->flags */ #define RAW3270_FLAGS_14BITADDR 0 /* 14-bit buffer addresses */ #define RAW3270_FLAGS_BUSY 1 /* Device busy, leave it alone */ -#define RAW3270_FLAGS_ATTN 2 /* Device sent an ATTN interrupt */ -#define RAW3270_FLAGS_READY 4 /* Device is useable by views */ -#define RAW3270_FLAGS_CONSOLE 8 /* Device is the console. */ -#define RAW3270_FLAGS_FROZEN 16 /* set if 3270 is frozen for suspend */ +#define RAW3270_FLAGS_CONSOLE 2 /* Device is the console. */ +#define RAW3270_FLAGS_FROZEN 3 /* set if 3270 is frozen for suspend */ /* Semaphore to protect global data of raw3270 (devices, views, etc). */ static DEFINE_MUTEX(raw3270_mutex); @@ -97,6 +104,17 @@ static unsigned char raw3270_ebcgraf[64] = { 0xf8, 0xf9, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f }; +static inline int raw3270_state_ready(struct raw3270 *rp) +{ + return rp->state == RAW3270_STATE_READY; +} + +static inline int raw3270_state_final(struct raw3270 *rp) +{ + return rp->state == RAW3270_STATE_INIT || + rp->state == RAW3270_STATE_READY; +} + void raw3270_buffer_address(struct raw3270 *rp, char *cp, unsigned short addr) { @@ -214,7 +232,7 @@ raw3270_request_set_idal(struct raw3270_request *rq, struct idal_buffer *ib) * Stop running ccw. */ static int -raw3270_halt_io_nolock(struct raw3270 *rp, struct raw3270_request *rq) +__raw3270_halt_io(struct raw3270 *rp, struct raw3270_request *rq) { int retries; int rc; @@ -233,18 +251,6 @@ raw3270_halt_io_nolock(struct raw3270 *rp, struct raw3270_request *rq) return rc; } -static int -raw3270_halt_io(struct raw3270 *rp, struct raw3270_request *rq) -{ - unsigned long flags; - int rc; - - spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags); - rc = raw3270_halt_io_nolock(rp, rq); - spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags); - return rc; -} - /* * Add the request to the request queue, try to start it if the * 3270 device is idle. Return without waiting for end of i/o. @@ -281,8 +287,8 @@ raw3270_start(struct raw3270_view *view, struct raw3270_request *rq) if (!rp || rp->view != view || test_bit(RAW3270_FLAGS_FROZEN, &rp->flags)) rc = -EACCES; - else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags)) - rc = -ENODEV; + else if (!raw3270_state_ready(rp)) + rc = -EBUSY; else rc = __raw3270_start(rp, view, rq); spin_unlock_irqrestore(get_ccwdev_lock(view->dev->cdev), flags); @@ -299,8 +305,8 @@ raw3270_start_locked(struct raw3270_view *view, struct raw3270_request *rq) if (!rp || rp->view != view || test_bit(RAW3270_FLAGS_FROZEN, &rp->flags)) rc = -EACCES; - else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags)) - rc = -ENODEV; + else if (!raw3270_state_ready(rp)) + rc = -EBUSY; else rc = __raw3270_start(rp, view, rq); return rc; @@ -378,7 +384,7 @@ raw3270_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb) case RAW3270_IO_STOP: if (!rq) break; - raw3270_halt_io_nolock(rp, rq); + __raw3270_halt_io(rp, rq); rq->rc = -EIO; break; default: @@ -413,9 +419,14 @@ raw3270_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb) } /* - * Size sensing. + * To determine the size of the 3270 device we need to do: + * 1) send a 'read partition' data stream to the device + * 2) wait for the attn interrupt that precedes the query reply + * 3) do a read modified to get the query reply + * To make things worse we have to cope with intervention + * required (3270 device switched to 'stand-by') and command + * rejects (old devices that can't do 'read partition'). */ - struct raw3270_ua { /* Query Reply structure for Usable Area */ struct { /* Usable Area Query Reply Base */ short l; /* Length of this structured field */ @@ -451,117 +462,21 @@ struct raw3270_ua { /* Query Reply structure for Usable Area */ } __attribute__ ((packed)) aua; } __attribute__ ((packed)); -static struct diag210 raw3270_init_diag210; -static DEFINE_MUTEX(raw3270_init_mutex); - -static int -raw3270_init_irq(struct raw3270_view *view, struct raw3270_request *rq, - struct irb *irb) -{ - /* - * Unit-Check Processing: - * Expect Command Reject or Intervention Required. - */ - if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) { - /* Request finished abnormally. */ - if (irb->ecw[0] & SNS0_INTERVENTION_REQ) { - set_bit(RAW3270_FLAGS_BUSY, &view->dev->flags); - return RAW3270_IO_BUSY; - } - } - if (rq) { - if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) { - if (irb->ecw[0] & SNS0_CMD_REJECT) - rq->rc = -EOPNOTSUPP; - else - rq->rc = -EIO; - } else - /* Request finished normally. Copy residual count. */ - rq->rescnt = irb->scsw.cmd.count; - } - if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) { - set_bit(RAW3270_FLAGS_ATTN, &view->dev->flags); - wake_up(&raw3270_wait_queue); - } - return RAW3270_IO_DONE; -} - -static struct raw3270_fn raw3270_init_fn = { - .intv = raw3270_init_irq -}; - -static struct raw3270_view raw3270_init_view = { - .fn = &raw3270_init_fn -}; - -/* - * raw3270_wait/raw3270_wait_interruptible/__raw3270_wakeup - * Wait for end of request. The request must have been started - * with raw3270_start, rc = 0. The device lock may NOT have been - * released between calling raw3270_start and raw3270_wait. - */ static void -raw3270_wake_init(struct raw3270_request *rq, void *data) -{ - wake_up((wait_queue_head_t *) data); -} - -/* - * Special wait function that can cope with console initialization. - */ -static int -raw3270_start_init(struct raw3270 *rp, struct raw3270_view *view, - struct raw3270_request *rq) -{ - unsigned long flags; - int rc; - -#ifdef CONFIG_TN3270_CONSOLE - if (raw3270_registered == 0) { - spin_lock_irqsave(get_ccwdev_lock(view->dev->cdev), flags); - rq->callback = NULL; - rc = __raw3270_start(rp, view, rq); - if (rc == 0) - while (!raw3270_request_final(rq)) { - wait_cons_dev(); - barrier(); - } - spin_unlock_irqrestore(get_ccwdev_lock(view->dev->cdev), flags); - return rq->rc; - } -#endif - rq->callback = raw3270_wake_init; - rq->callback_data = &raw3270_wait_queue; - spin_lock_irqsave(get_ccwdev_lock(view->dev->cdev), flags); - rc = __raw3270_start(rp, view, rq); - spin_unlock_irqrestore(get_ccwdev_lock(view->dev->cdev), flags); - if (rc) - return rc; - /* Now wait for the completion. */ - rc = wait_event_interruptible(raw3270_wait_queue, - raw3270_request_final(rq)); - if (rc == -ERESTARTSYS) { /* Interrupted by a signal. */ - raw3270_halt_io(view->dev, rq); - /* No wait for the halt to complete. */ - wait_event(raw3270_wait_queue, raw3270_request_final(rq)); - return -ERESTARTSYS; - } - return rq->rc; -} - -static int -__raw3270_size_device_vm(struct raw3270 *rp) +raw3270_size_device_vm(struct raw3270 *rp) { int rc, model; struct ccw_dev_id dev_id; + struct diag210 diag_data; ccw_device_get_id(rp->cdev, &dev_id); - raw3270_init_diag210.vrdcdvno = dev_id.devno; - raw3270_init_diag210.vrdclen = sizeof(struct diag210); - rc = diag210(&raw3270_init_diag210); - if (rc) - return rc; - model = raw3270_init_diag210.vrdccrmd; + diag_data.vrdcdvno = dev_id.devno; + diag_data.vrdclen = sizeof(struct diag210); + rc = diag210(&diag_data); + model = diag_data.vrdccrmd; + /* Use default model 2 if the size could not be detected */ + if (rc || model < 2 || model > 5) + model = 2; switch (model) { case 2: rp->model = model; @@ -583,77 +498,25 @@ __raw3270_size_device_vm(struct raw3270 *rp) rp->rows = 27; rp->cols = 132; break; - default: - rc = -EOPNOTSUPP; - break; } - return rc; } -static int -__raw3270_size_device(struct raw3270 *rp) +static void +raw3270_size_device(struct raw3270 *rp) { - static const unsigned char wbuf[] = - { 0x00, 0x07, 0x01, 0xff, 0x03, 0x00, 0x81 }; struct raw3270_ua *uap; - int rc; - /* - * To determine the size of the 3270 device we need to do: - * 1) send a 'read partition' data stream to the device - * 2) wait for the attn interrupt that precedes the query reply - * 3) do a read modified to get the query reply - * To make things worse we have to cope with intervention - * required (3270 device switched to 'stand-by') and command - * rejects (old devices that can't do 'read partition'). - */ - memset(&rp->init_request, 0, sizeof(rp->init_request)); - memset(&rp->init_data, 0, 256); - /* Store 'read partition' data stream to init_data */ - memcpy(&rp->init_data, wbuf, sizeof(wbuf)); - INIT_LIST_HEAD(&rp->init_request.list); - rp->init_request.ccw.cmd_code = TC_WRITESF; - rp->init_request.ccw.flags = CCW_FLAG_SLI; - rp->init_request.ccw.count = sizeof(wbuf); - rp->init_request.ccw.cda = (__u32) __pa(&rp->init_data); - - rc = raw3270_start_init(rp, &raw3270_init_view, &rp->init_request); - if (rc) - /* Check error cases: -ERESTARTSYS, -EIO and -EOPNOTSUPP */ - return rc; - - /* Wait for attention interrupt. */ -#ifdef CONFIG_TN3270_CONSOLE - if (raw3270_registered == 0) { - unsigned long flags; - - spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags); - while (!test_and_clear_bit(RAW3270_FLAGS_ATTN, &rp->flags)) - wait_cons_dev(); - spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags); - } else -#endif - rc = wait_event_interruptible(raw3270_wait_queue, - test_and_clear_bit(RAW3270_FLAGS_ATTN, &rp->flags)); - if (rc) - return rc; - - /* - * The device accepted the 'read partition' command. Now - * set up a read ccw and issue it. - */ - rp->init_request.ccw.cmd_code = TC_READMOD; - rp->init_request.ccw.flags = CCW_FLAG_SLI; - rp->init_request.ccw.count = sizeof(rp->init_data); - rp->init_request.ccw.cda = (__u32) __pa(rp->init_data); - rc = raw3270_start_init(rp, &raw3270_init_view, &rp->init_request); - if (rc) - return rc; /* Got a Query Reply */ uap = (struct raw3270_ua *) (rp->init_data + 1); /* Paranoia check. */ - if (rp->init_data[0] != 0x88 || uap->uab.qcode != 0x81) - return -EOPNOTSUPP; + if (rp->init_readmod.rc || rp->init_data[0] != 0x88 || + uap->uab.qcode != 0x81) { + /* Couldn't detect size. Use default model 2. */ + rp->model = 2; + rp->rows = 24; + rp->cols = 80; + return; + } /* Copy rows/columns of default Usable Area */ rp->rows = uap->uab.h; rp->cols = uap->uab.w; @@ -666,66 +529,131 @@ __raw3270_size_device(struct raw3270 *rp) rp->rows = uap->aua.hauai; rp->cols = uap->aua.wauai; } - return 0; + /* Try to find a model. */ + rp->model = 0; + if (rp->rows == 24 && rp->cols == 80) + rp->model = 2; + if (rp->rows == 32 && rp->cols == 80) + rp->model = 3; + if (rp->rows == 43 && rp->cols == 80) + rp->model = 4; + if (rp->rows == 27 && rp->cols == 132) + rp->model = 5; } -static int -raw3270_size_device(struct raw3270 *rp) +static void +raw3270_size_device_done(struct raw3270 *rp) { - int rc; + struct raw3270_view *view; - mutex_lock(&raw3270_init_mutex); - rp->view = &raw3270_init_view; - raw3270_init_view.dev = rp; - if (MACHINE_IS_VM) - rc = __raw3270_size_device_vm(rp); - else - rc = __raw3270_size_device(rp); - raw3270_init_view.dev = NULL; rp->view = NULL; - mutex_unlock(&raw3270_init_mutex); - if (rc == 0) { /* Found something. */ - /* Try to find a model. */ - rp->model = 0; - if (rp->rows == 24 && rp->cols == 80) - rp->model = 2; - if (rp->rows == 32 && rp->cols == 80) - rp->model = 3; - if (rp->rows == 43 && rp->cols == 80) - rp->model = 4; - if (rp->rows == 27 && rp->cols == 132) - rp->model = 5; - } else { - /* Couldn't detect size. Use default model 2. */ - rp->model = 2; - rp->rows = 24; - rp->cols = 80; - return 0; + rp->state = RAW3270_STATE_READY; + /* Notify views about new size */ + list_for_each_entry(view, &rp->view_list, list) + if (view->fn->resize) + view->fn->resize(view, rp->model, rp->rows, rp->cols); + /* Setup processing done, now activate a view */ + list_for_each_entry(view, &rp->view_list, list) { + rp->view = view; + if (view->fn->activate(view) == 0) + break; + rp->view = NULL; } - return rc; +} + +static void +raw3270_read_modified_cb(struct raw3270_request *rq, void *data) +{ + struct raw3270 *rp = rq->view->dev; + + raw3270_size_device(rp); + raw3270_size_device_done(rp); +} + +static void +raw3270_read_modified(struct raw3270 *rp) +{ + if (rp->state != RAW3270_STATE_W4ATTN) + return; + /* Use 'read modified' to get the result of a read partition. */ + memset(&rp->init_readmod, 0, sizeof(rp->init_readmod)); + memset(&rp->init_data, 0, sizeof(rp->init_data)); + rp->init_readmod.ccw.cmd_code = TC_READMOD; + rp->init_readmod.ccw.flags = CCW_FLAG_SLI; + rp->init_readmod.ccw.count = sizeof(rp->init_data); + rp->init_readmod.ccw.cda = (__u32) __pa(rp->init_data); + rp->init_readmod.callback = raw3270_read_modified_cb; + rp->state = RAW3270_STATE_READMOD; + raw3270_start_irq(&rp->init_view, &rp->init_readmod); +} + +static void +raw3270_writesf_readpart(struct raw3270 *rp) +{ + static const unsigned char wbuf[] = + { 0x00, 0x07, 0x01, 0xff, 0x03, 0x00, 0x81 }; + + /* Store 'read partition' data stream to init_data */ + memset(&rp->init_readpart, 0, sizeof(rp->init_readpart)); + memset(&rp->init_data, 0, sizeof(rp->init_data)); + memcpy(&rp->init_data, wbuf, sizeof(wbuf)); + rp->init_readpart.ccw.cmd_code = TC_WRITESF; + rp->init_readpart.ccw.flags = CCW_FLAG_SLI; + rp->init_readpart.ccw.count = sizeof(wbuf); + rp->init_readpart.ccw.cda = (__u32) __pa(&rp->init_data); + rp->state = RAW3270_STATE_W4ATTN; + raw3270_start_irq(&rp->init_view, &rp->init_readpart); +} + +/* + * Device reset + */ +static void +raw3270_reset_device_cb(struct raw3270_request *rq, void *data) +{ + struct raw3270 *rp = rq->view->dev; + + if (rp->state != RAW3270_STATE_RESET) + return; + if (rq && rq->rc) { + /* Reset command failed. */ + rp->state = RAW3270_STATE_INIT; + } else if (0 && MACHINE_IS_VM) { + raw3270_size_device_vm(rp); + raw3270_size_device_done(rp); + } else + raw3270_writesf_readpart(rp); } static int -raw3270_reset_device(struct raw3270 *rp) +__raw3270_reset_device(struct raw3270 *rp) { int rc; - mutex_lock(&raw3270_init_mutex); - memset(&rp->init_request, 0, sizeof(rp->init_request)); + /* Store reset data stream to init_data/init_reset */ + memset(&rp->init_reset, 0, sizeof(rp->init_reset)); memset(&rp->init_data, 0, sizeof(rp->init_data)); - /* Store reset data stream to init_data/init_request */ rp->init_data[0] = TW_KR; - INIT_LIST_HEAD(&rp->init_request.list); - rp->init_request.ccw.cmd_code = TC_EWRITEA; - rp->init_request.ccw.flags = CCW_FLAG_SLI; - rp->init_request.ccw.count = 1; - rp->init_request.ccw.cda = (__u32) __pa(rp->init_data); - rp->view = &raw3270_init_view; - raw3270_init_view.dev = rp; - rc = raw3270_start_init(rp, &raw3270_init_view, &rp->init_request); - raw3270_init_view.dev = NULL; - rp->view = NULL; - mutex_unlock(&raw3270_init_mutex); + rp->init_reset.ccw.cmd_code = TC_EWRITEA; + rp->init_reset.ccw.flags = CCW_FLAG_SLI; + rp->init_reset.ccw.count = 1; + rp->init_reset.ccw.cda = (__u32) __pa(rp->init_data); + rp->init_reset.callback = raw3270_reset_device_cb; + rc = __raw3270_start(rp, &rp->init_view, &rp->init_reset); + if (rc == 0 && rp->state == RAW3270_STATE_INIT) + rp->state = RAW3270_STATE_RESET; + return rc; +} + +static int +raw3270_reset_device(struct raw3270 *rp) +{ + unsigned long flags; + int rc; + + spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags); + rc = __raw3270_reset_device(rp); + spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags); return rc; } @@ -739,13 +667,50 @@ raw3270_reset(struct raw3270_view *view) if (!rp || rp->view != view || test_bit(RAW3270_FLAGS_FROZEN, &rp->flags)) rc = -EACCES; - else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags)) - rc = -ENODEV; + else if (!raw3270_state_ready(rp)) + rc = -EBUSY; else rc = raw3270_reset_device(view->dev); return rc; } +static int +raw3270_init_irq(struct raw3270_view *view, struct raw3270_request *rq, + struct irb *irb) +{ + struct raw3270 *rp; + + /* + * Unit-Check Processing: + * Expect Command Reject or Intervention Required. + */ + if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) { + /* Request finished abnormally. */ + if (irb->ecw[0] & SNS0_INTERVENTION_REQ) { + set_bit(RAW3270_FLAGS_BUSY, &view->dev->flags); + return RAW3270_IO_BUSY; + } + } + if (rq) { + if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) { + if (irb->ecw[0] & SNS0_CMD_REJECT) + rq->rc = -EOPNOTSUPP; + else + rq->rc = -EIO; + } + } + if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) { + /* Queue read modified after attention interrupt */ + rp = view->dev; + raw3270_read_modified(rp); + } + return RAW3270_IO_DONE; +} + +static struct raw3270_fn raw3270_init_fn = { + .intv = raw3270_init_irq +}; + /* * Setup new 3270 device. */ @@ -774,6 +739,10 @@ raw3270_setup_device(struct ccw_device *cdev, struct raw3270 *rp, char *ascebc) INIT_LIST_HEAD(&rp->req_queue); INIT_LIST_HEAD(&rp->view_list); + rp->init_view.dev = rp; + rp->init_view.fn = &raw3270_init_fn; + rp->view = &rp->init_view; + /* * Add device to list and find the smallest unused minor * number for it. Note: there is no device with minor 0, @@ -812,6 +781,7 @@ raw3270_setup_device(struct ccw_device *cdev, struct raw3270 *rp, char *ascebc) */ struct raw3270 __init *raw3270_setup_console(struct ccw_device *cdev) { + unsigned long flags; struct raw3270 *rp; char *ascebc; int rc; @@ -822,16 +792,15 @@ struct raw3270 __init *raw3270_setup_console(struct ccw_device *cdev) if (rc) return ERR_PTR(rc); set_bit(RAW3270_FLAGS_CONSOLE, &rp->flags); - rc = raw3270_reset_device(rp); - if (rc) - return ERR_PTR(rc); - rc = raw3270_size_device(rp); - if (rc) - return ERR_PTR(rc); - rc = raw3270_reset_device(rp); - if (rc) - return ERR_PTR(rc); - set_bit(RAW3270_FLAGS_READY, &rp->flags); + spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags); + do { + __raw3270_reset_device(rp); + while (!raw3270_state_final(rp)) { + wait_cons_dev(); + barrier(); + } + } while (rp->state != RAW3270_STATE_READY); + spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags); return rp; } @@ -893,13 +862,13 @@ raw3270_activate_view(struct raw3270_view *view) spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags); if (rp->view == view) rc = 0; - else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags)) - rc = -ENODEV; + else if (!raw3270_state_ready(rp)) + rc = -EBUSY; else if (test_bit(RAW3270_FLAGS_FROZEN, &rp->flags)) rc = -EACCES; else { oldview = NULL; - if (rp->view) { + if (rp->view && rp->view->fn->deactivate) { oldview = rp->view; oldview->fn->deactivate(oldview); } @@ -944,7 +913,7 @@ raw3270_deactivate_view(struct raw3270_view *view) list_del_init(&view->list); list_add_tail(&view->list, &rp->view_list); /* Try to activate another view. */ - if (test_bit(RAW3270_FLAGS_READY, &rp->flags) && + if (raw3270_state_ready(rp) && !test_bit(RAW3270_FLAGS_FROZEN, &rp->flags)) { list_for_each_entry(view, &rp->view_list, list) { rp->view = view; @@ -975,18 +944,16 @@ raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor) if (rp->minor != minor) continue; spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags); - if (test_bit(RAW3270_FLAGS_READY, &rp->flags)) { - atomic_set(&view->ref_count, 2); - view->dev = rp; - view->fn = fn; - view->model = rp->model; - view->rows = rp->rows; - view->cols = rp->cols; - view->ascebc = rp->ascebc; - spin_lock_init(&view->lock); - list_add(&view->list, &rp->view_list); - rc = 0; - } + atomic_set(&view->ref_count, 2); + view->dev = rp; + view->fn = fn; + view->model = rp->model; + view->rows = rp->rows; + view->cols = rp->cols; + view->ascebc = rp->ascebc; + spin_lock_init(&view->lock); + list_add(&view->list, &rp->view_list); + rc = 0; spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags); break; } @@ -1010,14 +977,11 @@ raw3270_find_view(struct raw3270_fn *fn, int minor) if (rp->minor != minor) continue; spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags); - if (test_bit(RAW3270_FLAGS_READY, &rp->flags)) { - view = ERR_PTR(-ENOENT); - list_for_each_entry(tmp, &rp->view_list, list) { - if (tmp->fn == fn) { - raw3270_get_view(tmp); - view = tmp; - break; - } + list_for_each_entry(tmp, &rp->view_list, list) { + if (tmp->fn == fn) { + raw3270_get_view(tmp); + view = tmp; + break; } } spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags); @@ -1044,7 +1008,7 @@ raw3270_del_view(struct raw3270_view *view) rp->view = NULL; } list_del_init(&view->list); - if (!rp->view && test_bit(RAW3270_FLAGS_READY, &rp->flags) && + if (!rp->view && raw3270_state_ready(rp) && !test_bit(RAW3270_FLAGS_FROZEN, &rp->flags)) { /* Try to activate another view. */ list_for_each_entry(nv, &rp->view_list, list) { @@ -1072,10 +1036,6 @@ raw3270_delete_device(struct raw3270 *rp) /* Remove from device chain. */ mutex_lock(&raw3270_mutex); - if (rp->clttydev && !IS_ERR(rp->clttydev)) - device_destroy(class3270, MKDEV(IBM_TTY3270_MAJOR, rp->minor)); - if (rp->cltubdev && !IS_ERR(rp->cltubdev)) - device_destroy(class3270, MKDEV(IBM_FS3270_MAJOR, rp->minor)); list_del_init(&rp->list); mutex_unlock(&raw3270_mutex); @@ -1139,75 +1099,34 @@ static struct attribute_group raw3270_attr_group = { static int raw3270_create_attributes(struct raw3270 *rp) { - int rc; - - rc = sysfs_create_group(&rp->cdev->dev.kobj, &raw3270_attr_group); - if (rc) - goto out; - - rp->clttydev = device_create(class3270, &rp->cdev->dev, - MKDEV(IBM_TTY3270_MAJOR, rp->minor), NULL, - "tty%s", dev_name(&rp->cdev->dev)); - if (IS_ERR(rp->clttydev)) { - rc = PTR_ERR(rp->clttydev); - goto out_ttydev; - } - - rp->cltubdev = device_create(class3270, &rp->cdev->dev, - MKDEV(IBM_FS3270_MAJOR, rp->minor), NULL, - "tub%s", dev_name(&rp->cdev->dev)); - if (!IS_ERR(rp->cltubdev)) - goto out; - - rc = PTR_ERR(rp->cltubdev); - device_destroy(class3270, MKDEV(IBM_TTY3270_MAJOR, rp->minor)); - -out_ttydev: - sysfs_remove_group(&rp->cdev->dev.kobj, &raw3270_attr_group); -out: - return rc; + return sysfs_create_group(&rp->cdev->dev.kobj, &raw3270_attr_group); } /* * Notifier for device addition/removal */ -struct raw3270_notifier { - struct list_head list; - void (*notifier)(int, int); -}; - static LIST_HEAD(raw3270_notifier); -int raw3270_register_notifier(void (*notifier)(int, int)) +int raw3270_register_notifier(struct raw3270_notifier *notifier) { - struct raw3270_notifier *np; struct raw3270 *rp; - np = kmalloc(sizeof(struct raw3270_notifier), GFP_KERNEL); - if (!np) - return -ENOMEM; - np->notifier = notifier; mutex_lock(&raw3270_mutex); - list_add_tail(&np->list, &raw3270_notifier); - list_for_each_entry(rp, &raw3270_devices, list) { - get_device(&rp->cdev->dev); - notifier(rp->minor, 1); - } + list_add_tail(¬ifier->list, &raw3270_notifier); + list_for_each_entry(rp, &raw3270_devices, list) + notifier->create(rp->minor); mutex_unlock(&raw3270_mutex); return 0; } -void raw3270_unregister_notifier(void (*notifier)(int, int)) +void raw3270_unregister_notifier(struct raw3270_notifier *notifier) { - struct raw3270_notifier *np; + struct raw3270 *rp; mutex_lock(&raw3270_mutex); - list_for_each_entry(np, &raw3270_notifier, list) - if (np->notifier == notifier) { - list_del(&np->list); - kfree(np); - break; - } + list_for_each_entry(rp, &raw3270_devices, list) + notifier->destroy(rp->minor); + list_del(¬ifier->list); mutex_unlock(&raw3270_mutex); } @@ -1217,29 +1136,20 @@ void raw3270_unregister_notifier(void (*notifier)(int, int)) static int raw3270_set_online (struct ccw_device *cdev) { - struct raw3270 *rp; struct raw3270_notifier *np; + struct raw3270 *rp; int rc; rp = raw3270_create_device(cdev); if (IS_ERR(rp)) return PTR_ERR(rp); - rc = raw3270_reset_device(rp); - if (rc) - goto failure; - rc = raw3270_size_device(rp); - if (rc) - goto failure; - rc = raw3270_reset_device(rp); - if (rc) - goto failure; rc = raw3270_create_attributes(rp); if (rc) goto failure; - set_bit(RAW3270_FLAGS_READY, &rp->flags); + raw3270_reset_device(rp); mutex_lock(&raw3270_mutex); list_for_each_entry(np, &raw3270_notifier, list) - np->notifier(rp->minor, 1); + np->create(rp->minor); mutex_unlock(&raw3270_mutex); return 0; @@ -1268,14 +1178,14 @@ raw3270_remove (struct ccw_device *cdev) */ if (rp == NULL) return; - clear_bit(RAW3270_FLAGS_READY, &rp->flags); sysfs_remove_group(&cdev->dev.kobj, &raw3270_attr_group); /* Deactivate current view and remove all views. */ spin_lock_irqsave(get_ccwdev_lock(cdev), flags); if (rp->view) { - rp->view->fn->deactivate(rp->view); + if (rp->view->fn->deactivate) + rp->view->fn->deactivate(rp->view); rp->view = NULL; } while (!list_empty(&rp->view_list)) { @@ -1290,7 +1200,7 @@ raw3270_remove (struct ccw_device *cdev) mutex_lock(&raw3270_mutex); list_for_each_entry(np, &raw3270_notifier, list) - np->notifier(rp->minor, 0); + np->destroy(rp->minor); mutex_unlock(&raw3270_mutex); /* Reset 3270 device. */ @@ -1324,7 +1234,7 @@ static int raw3270_pm_stop(struct ccw_device *cdev) if (!rp) return 0; spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags); - if (rp->view) + if (rp->view && rp->view->fn->deactivate) rp->view->fn->deactivate(rp->view); if (!test_bit(RAW3270_FLAGS_CONSOLE, &rp->flags)) { /* @@ -1351,7 +1261,7 @@ static int raw3270_pm_start(struct ccw_device *cdev) return 0; spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags); clear_bit(RAW3270_FLAGS_FROZEN, &rp->flags); - if (rp->view) + if (rp->view && rp->view->fn->activate) rp->view->fn->activate(rp->view); spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags); return 0; @@ -1434,6 +1344,7 @@ MODULE_LICENSE("GPL"); module_init(raw3270_init); module_exit(raw3270_exit); +EXPORT_SYMBOL(class3270); EXPORT_SYMBOL(raw3270_request_alloc); EXPORT_SYMBOL(raw3270_request_free); EXPORT_SYMBOL(raw3270_request_reset); diff --git a/drivers/s390/char/raw3270.h b/drivers/s390/char/raw3270.h index ed34eb2199cc..7b73ff8c1bd7 100644 --- a/drivers/s390/char/raw3270.h +++ b/drivers/s390/char/raw3270.h @@ -91,6 +91,7 @@ struct raw3270_iocb { struct raw3270; struct raw3270_view; +extern struct class *class3270; /* 3270 CCW request */ struct raw3270_request { @@ -140,6 +141,7 @@ struct raw3270_fn { struct raw3270_request *, struct irb *); void (*release)(struct raw3270_view *); void (*free)(struct raw3270_view *); + void (*resize)(struct raw3270_view *, int, int, int); }; /* @@ -192,8 +194,14 @@ struct raw3270 *raw3270_setup_console(struct ccw_device *cdev); void raw3270_wait_cons_dev(struct raw3270 *); /* Notifier for device addition/removal */ -int raw3270_register_notifier(void (*notifier)(int, int)); -void raw3270_unregister_notifier(void (*notifier)(int, int)); +struct raw3270_notifier { + struct list_head list; + void (*create)(int minor); + void (*destroy)(int minor); +}; + +int raw3270_register_notifier(struct raw3270_notifier *); +void raw3270_unregister_notifier(struct raw3270_notifier *); void raw3270_pm_unfreeze(struct raw3270_view *); /* diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c index 12c16a65dd25..bd6871bf545a 100644 --- a/drivers/s390/char/sclp.c +++ b/drivers/s390/char/sclp.c @@ -450,7 +450,7 @@ sclp_sync_wait(void) timeout = 0; if (timer_pending(&sclp_request_timer)) { /* Get timeout TOD value */ - timeout = get_clock() + + timeout = get_tod_clock() + sclp_tod_from_jiffies(sclp_request_timer.expires - jiffies); } @@ -472,7 +472,7 @@ sclp_sync_wait(void) while (sclp_running_state != sclp_running_state_idle) { /* Check for expired request timer */ if (timer_pending(&sclp_request_timer) && - get_clock() > timeout && + get_tod_clock() > timeout && del_timer(&sclp_request_timer)) sclp_request_timer.function(sclp_request_timer.data); cpu_relax(); diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c index c44d13f607bc..30a2255389e5 100644 --- a/drivers/s390/char/sclp_cmd.c +++ b/drivers/s390/char/sclp_cmd.c @@ -56,7 +56,6 @@ static int __initdata early_read_info_sccb_valid; u64 sclp_facilities; static u8 sclp_fac84; -static u8 sclp_fac85; static unsigned long long rzm; static unsigned long long rnmax; @@ -131,7 +130,8 @@ void __init sclp_facilities_detect(void) sccb = &early_read_info_sccb; sclp_facilities = sccb->facilities; sclp_fac84 = sccb->fac84; - sclp_fac85 = sccb->fac85; + if (sccb->fac85 & 0x02) + S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP; rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2; rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2; rzm <<= 20; @@ -171,12 +171,6 @@ unsigned long long sclp_get_rzm(void) return rzm; } -u8 sclp_get_fac85(void) -{ - return sclp_fac85; -} -EXPORT_SYMBOL_GPL(sclp_get_fac85); - /* * This function will be called after sclp_facilities_detect(), which gets * called from early.c code. Therefore the sccb should have valid contents. diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c index 3860e796b65f..b907dba24025 100644 --- a/drivers/s390/char/tty3270.c +++ b/drivers/s390/char/tty3270.c @@ -15,6 +15,7 @@ #include <linux/init.h> #include <linux/console.h> #include <linux/interrupt.h> +#include <linux/workqueue.h> #include <linux/slab.h> #include <linux/bootmem.h> @@ -80,6 +81,8 @@ struct tty3270 { unsigned int highlight; /* Blink/reverse/underscore */ unsigned int f_color; /* Foreground color */ struct tty3270_line *screen; + unsigned int n_model, n_cols, n_rows; /* New model & size */ + struct work_struct resize_work; /* Input stuff. */ struct string *prompt; /* Output string for input area. */ @@ -115,6 +118,7 @@ struct tty3270 { #define TTY_UPDATE_ALL 16 /* Recreate screen. */ static void tty3270_update(struct tty3270 *); +static void tty3270_resize_work(struct work_struct *work); /* * Setup timeout for a device. On timeout trigger an update. @@ -683,12 +687,6 @@ tty3270_alloc_view(void) INIT_LIST_HEAD(&tp->update); INIT_LIST_HEAD(&tp->rcl_lines); tp->rcl_max = 20; - tty_port_init(&tp->port); - setup_timer(&tp->timer, (void (*)(unsigned long)) tty3270_update, - (unsigned long) tp); - tasklet_init(&tp->readlet, - (void (*)(unsigned long)) tty3270_read_tasklet, - (unsigned long) tp->read); for (pages = 0; pages < TTY3270_STRING_PAGES; pages++) { tp->freemem_pages[pages] = (void *) @@ -710,6 +708,15 @@ tty3270_alloc_view(void) tp->kbd = kbd_alloc(); if (!tp->kbd) goto out_reset; + + tty_port_init(&tp->port); + setup_timer(&tp->timer, (void (*)(unsigned long)) tty3270_update, + (unsigned long) tp); + tasklet_init(&tp->readlet, + (void (*)(unsigned long)) tty3270_read_tasklet, + (unsigned long) tp->read); + INIT_WORK(&tp->resize_work, tty3270_resize_work); + return tp; out_reset: @@ -752,42 +759,96 @@ tty3270_free_view(struct tty3270 *tp) /* * Allocate tty3270 screen. */ -static int -tty3270_alloc_screen(struct tty3270 *tp) +static struct tty3270_line * +tty3270_alloc_screen(unsigned int rows, unsigned int cols) { + struct tty3270_line *screen; unsigned long size; int lines; - size = sizeof(struct tty3270_line) * (tp->view.rows - 2); - tp->screen = kzalloc(size, GFP_KERNEL); - if (!tp->screen) + size = sizeof(struct tty3270_line) * (rows - 2); + screen = kzalloc(size, GFP_KERNEL); + if (!screen) goto out_err; - for (lines = 0; lines < tp->view.rows - 2; lines++) { - size = sizeof(struct tty3270_cell) * tp->view.cols; - tp->screen[lines].cells = kzalloc(size, GFP_KERNEL); - if (!tp->screen[lines].cells) + for (lines = 0; lines < rows - 2; lines++) { + size = sizeof(struct tty3270_cell) * cols; + screen[lines].cells = kzalloc(size, GFP_KERNEL); + if (!screen[lines].cells) goto out_screen; } - return 0; + return screen; out_screen: while (lines--) - kfree(tp->screen[lines].cells); - kfree(tp->screen); + kfree(screen[lines].cells); + kfree(screen); out_err: - return -ENOMEM; + return ERR_PTR(-ENOMEM); } /* * Free tty3270 screen. */ static void -tty3270_free_screen(struct tty3270 *tp) +tty3270_free_screen(struct tty3270_line *screen, unsigned int rows) { int lines; - for (lines = 0; lines < tp->view.rows - 2; lines++) - kfree(tp->screen[lines].cells); - kfree(tp->screen); + for (lines = 0; lines < rows - 2; lines++) + kfree(screen[lines].cells); + kfree(screen); +} + +/* + * Resize tty3270 screen + */ +static void tty3270_resize_work(struct work_struct *work) +{ + struct tty3270 *tp = container_of(work, struct tty3270, resize_work); + struct tty3270_line *screen, *oscreen; + struct tty_struct *tty; + unsigned int orows; + struct winsize ws; + + screen = tty3270_alloc_screen(tp->n_rows, tp->n_cols); + if (!screen) + return; + /* Switch to new output size */ + spin_lock_bh(&tp->view.lock); + oscreen = tp->screen; + orows = tp->view.rows; + tp->view.model = tp->n_model; + tp->view.rows = tp->n_rows; + tp->view.cols = tp->n_cols; + tp->screen = screen; + free_string(&tp->freemem, tp->prompt); + free_string(&tp->freemem, tp->status); + tty3270_create_prompt(tp); + tty3270_create_status(tp); + tp->nr_up = 0; + while (tp->nr_lines < tp->view.rows - 2) + tty3270_blank_line(tp); + tp->update_flags = TTY_UPDATE_ALL; + spin_unlock_bh(&tp->view.lock); + tty3270_free_screen(oscreen, orows); + tty3270_set_timer(tp, 1); + /* Informat tty layer about new size */ + tty = tty_port_tty_get(&tp->port); + if (!tty) + return; + ws.ws_row = tp->view.rows - 2; + ws.ws_col = tp->view.cols; + tty_do_resize(tty, &ws); +} + +static void +tty3270_resize(struct raw3270_view *view, int model, int rows, int cols) +{ + struct tty3270 *tp = container_of(view, struct tty3270, view); + + tp->n_model = model; + tp->n_rows = rows; + tp->n_cols = cols; + schedule_work(&tp->resize_work); } /* @@ -815,7 +876,8 @@ static void tty3270_free(struct raw3270_view *view) { struct tty3270 *tp = container_of(view, struct tty3270, view); - tty3270_free_screen(tp); + + tty3270_free_screen(tp->screen, tp->view.rows); tty3270_free_view(tp); } @@ -827,9 +889,8 @@ tty3270_del_views(void) { int i; - for (i = 0; i < tty3270_max_index; i++) { - struct raw3270_view *view = - raw3270_find_view(&tty3270_fn, i + RAW3270_FIRSTMINOR); + for (i = RAW3270_FIRSTMINOR; i <= tty3270_max_index; i++) { + struct raw3270_view *view = raw3270_find_view(&tty3270_fn, i); if (!IS_ERR(view)) raw3270_del_view(view); } @@ -840,7 +901,8 @@ static struct raw3270_fn tty3270_fn = { .deactivate = tty3270_deactivate, .intv = (void *) tty3270_irq, .release = tty3270_release, - .free = tty3270_free + .free = tty3270_free, + .resize = tty3270_resize }; /* @@ -853,8 +915,7 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty) int i, rc; /* Check if the tty3270 is already there. */ - view = raw3270_find_view(&tty3270_fn, - tty->index + RAW3270_FIRSTMINOR); + view = raw3270_find_view(&tty3270_fn, tty->index); if (!IS_ERR(view)) { tp = container_of(view, struct tty3270, view); tty->driver_data = tp; @@ -866,29 +927,26 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty) tp->inattr = TF_INPUT; return tty_port_install(&tp->port, driver, tty); } - if (tty3270_max_index < tty->index + 1) - tty3270_max_index = tty->index + 1; - - /* Quick exit if there is no device for tty->index. */ - if (PTR_ERR(view) == -ENODEV) - return -ENODEV; + if (tty3270_max_index < tty->index) + tty3270_max_index = tty->index; /* Allocate tty3270 structure on first open. */ tp = tty3270_alloc_view(); if (IS_ERR(tp)) return PTR_ERR(tp); - rc = raw3270_add_view(&tp->view, &tty3270_fn, - tty->index + RAW3270_FIRSTMINOR); + rc = raw3270_add_view(&tp->view, &tty3270_fn, tty->index); if (rc) { tty3270_free_view(tp); return rc; } - rc = tty3270_alloc_screen(tp); - if (rc) { + tp->screen = tty3270_alloc_screen(tp->view.cols, tp->view.rows); + if (IS_ERR(tp->screen)) { + rc = PTR_ERR(tp->screen); raw3270_put_view(&tp->view); raw3270_del_view(&tp->view); + tty3270_free_view(tp); return rc; } @@ -926,6 +984,20 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty) } /* + * This routine is called whenever a 3270 tty is opened. + */ +static int +tty3270_open(struct tty_struct *tty, struct file *filp) +{ + struct tty3270 *tp = tty->driver_data; + struct tty_port *port = &tp->port; + + port->count++; + tty_port_tty_set(port, tty); + return 0; +} + +/* * This routine is called when the 3270 tty is closed. We wait * for the remaining request to be completed. Then we clean up. */ @@ -1753,6 +1825,7 @@ static long tty3270_compat_ioctl(struct tty_struct *tty, static const struct tty_operations tty3270_ops = { .install = tty3270_install, .cleanup = tty3270_cleanup, + .open = tty3270_open, .close = tty3270_close, .write = tty3270_write, .put_char = tty3270_put_char, @@ -1771,6 +1844,22 @@ static const struct tty_operations tty3270_ops = { .set_termios = tty3270_set_termios }; +void tty3270_create_cb(int minor) +{ + tty_register_device(tty3270_driver, minor, NULL); +} + +void tty3270_destroy_cb(int minor) +{ + tty_unregister_device(tty3270_driver, minor); +} + +struct raw3270_notifier tty3270_notifier = +{ + .create = tty3270_create_cb, + .destroy = tty3270_destroy_cb, +}; + /* * 3270 tty registration code called from tty_init(). * Most kernel services (incl. kmalloc) are available at this poimt. @@ -1780,23 +1869,25 @@ static int __init tty3270_init(void) struct tty_driver *driver; int ret; - driver = alloc_tty_driver(RAW3270_MAXDEVS); - if (!driver) - return -ENOMEM; + driver = tty_alloc_driver(RAW3270_MAXDEVS, + TTY_DRIVER_REAL_RAW | + TTY_DRIVER_DYNAMIC_DEV | + TTY_DRIVER_RESET_TERMIOS); + if (IS_ERR(driver)) + return PTR_ERR(driver); /* * Initialize the tty_driver structure * Entries in tty3270_driver that are NOT initialized: * proc_entry, set_termios, flush_buffer, set_ldisc, write_proc */ - driver->driver_name = "ttyTUB"; - driver->name = "ttyTUB"; + driver->driver_name = "tty3270"; + driver->name = "3270/tty"; driver->major = IBM_TTY3270_MAJOR; - driver->minor_start = RAW3270_FIRSTMINOR; + driver->minor_start = 0; driver->type = TTY_DRIVER_TYPE_SYSTEM; driver->subtype = SYSTEM_TYPE_TTY; driver->init_termios = tty_std_termios; - driver->flags = TTY_DRIVER_RESET_TERMIOS; tty_set_operations(driver, &tty3270_ops); ret = tty_register_driver(driver); if (ret) { @@ -1804,6 +1895,7 @@ static int __init tty3270_init(void) return ret; } tty3270_driver = driver; + raw3270_register_notifier(&tty3270_notifier); return 0; } @@ -1812,6 +1904,7 @@ tty3270_exit(void) { struct tty_driver *driver; + raw3270_unregister_notifier(&tty3270_notifier); driver = tty3270_driver; tty3270_driver = NULL; tty_unregister_driver(driver); diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c index e3b9308b0fe3..1d61a01576d2 100644 --- a/drivers/s390/char/zcore.c +++ b/drivers/s390/char/zcore.c @@ -62,6 +62,7 @@ static struct dentry *zcore_dir; static struct dentry *zcore_file; static struct dentry *zcore_memmap_file; static struct dentry *zcore_reipl_file; +static struct dentry *zcore_hsa_file; static struct ipl_parameter_block *ipl_block; /* @@ -77,6 +78,8 @@ static int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode) int offs, blk_num; static char buf[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE))); + if (!hsa_available) + return -ENODATA; if (count == 0) return 0; @@ -278,6 +281,15 @@ next: } /* + * Release the HSA + */ +static void release_hsa(void) +{ + diag308(DIAG308_REL_HSA, NULL); + hsa_available = 0; +} + +/* * Read routine for zcore character device * First 4K are dump header * Next 32MB are HSA Memory @@ -363,8 +375,8 @@ static int zcore_open(struct inode *inode, struct file *filp) static int zcore_release(struct inode *inode, struct file *filep) { - diag308(DIAG308_REL_HSA, NULL); - hsa_available = 0; + if (hsa_available) + release_hsa(); return 0; } @@ -474,6 +486,41 @@ static const struct file_operations zcore_reipl_fops = { .llseek = no_llseek, }; +static ssize_t zcore_hsa_read(struct file *filp, char __user *buf, + size_t count, loff_t *ppos) +{ + static char str[18]; + + if (hsa_available) + snprintf(str, sizeof(str), "%lx\n", ZFCPDUMP_HSA_SIZE); + else + snprintf(str, sizeof(str), "0\n"); + return simple_read_from_buffer(buf, count, ppos, str, strlen(str)); +} + +static ssize_t zcore_hsa_write(struct file *filp, const char __user *buf, + size_t count, loff_t *ppos) +{ + char value; + + if (*ppos != 0) + return -EPIPE; + if (copy_from_user(&value, buf, 1)) + return -EFAULT; + if (value != '0') + return -EINVAL; + release_hsa(); + return count; +} + +static const struct file_operations zcore_hsa_fops = { + .owner = THIS_MODULE, + .write = zcore_hsa_write, + .read = zcore_hsa_read, + .open = nonseekable_open, + .llseek = no_llseek, +}; + #ifdef CONFIG_32BIT static void __init set_lc_mask(struct save_area *map) @@ -590,7 +637,7 @@ static int __init zcore_header_init(int arch, struct zcore_header *hdr) hdr->rmem_size = memory; hdr->mem_end = sys_info.mem_size; hdr->num_pages = memory / PAGE_SIZE; - hdr->tod = get_clock(); + hdr->tod = get_tod_clock(); get_cpu_id(&hdr->cpu_id); for (i = 0; zfcpdump_save_areas[i]; i++) { prefix = zfcpdump_save_areas[i]->pref_reg; @@ -658,6 +705,7 @@ static int __init zcore_init(void) rc = check_sdias(); if (rc) goto fail; + hsa_available = 1; rc = memcpy_hsa_kernel(&arch, __LC_AR_MODE_ID, 1); if (rc) @@ -714,9 +762,16 @@ static int __init zcore_init(void) rc = -ENOMEM; goto fail_memmap_file; } - hsa_available = 1; + zcore_hsa_file = debugfs_create_file("hsa", S_IRUSR|S_IWUSR, zcore_dir, + NULL, &zcore_hsa_fops); + if (!zcore_hsa_file) { + rc = -ENOMEM; + goto fail_reipl_file; + } return 0; +fail_reipl_file: + debugfs_remove(zcore_reipl_file); fail_memmap_file: debugfs_remove(zcore_memmap_file); fail_file: @@ -733,6 +788,7 @@ static void __exit zcore_exit(void) debug_unregister(zcore_dbf); sclp_sdias_exit(); free_page((unsigned long) ipl_block); + debugfs_remove(zcore_hsa_file); debugfs_remove(zcore_reipl_file); debugfs_remove(zcore_memmap_file); debugfs_remove(zcore_file); diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index 10729bbceced..31ceef1beb8b 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c @@ -435,7 +435,6 @@ static void chsc_process_sei_scm_change(struct chsc_sei_nt0_area *sei_area) static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area) { -#ifdef CONFIG_PCI switch (sei_area->cc) { case 1: zpci_event_error(sei_area->ccdf); @@ -444,11 +443,10 @@ static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area) zpci_event_availability(sei_area->ccdf); break; default: - CIO_CRW_EVENT(2, "chsc: unhandled sei content code %d\n", + CIO_CRW_EVENT(2, "chsc: sei nt2 unhandled cc=%d\n", sei_area->cc); break; } -#endif } static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area) @@ -471,13 +469,19 @@ static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area) chsc_process_sei_scm_change(sei_area); break; default: /* other stuff */ - CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n", + CIO_CRW_EVENT(2, "chsc: sei nt0 unhandled cc=%d\n", sei_area->cc); break; } + + /* Check if we might have lost some information. */ + if (sei_area->flags & 0x40) { + CIO_CRW_EVENT(2, "chsc: event overflow\n"); + css_schedule_eval_all(); + } } -static int __chsc_process_crw(struct chsc_sei *sei, u64 ntsm) +static void chsc_process_event_information(struct chsc_sei *sei, u64 ntsm) { do { memset(sei, 0, sizeof(*sei)); @@ -488,40 +492,37 @@ static int __chsc_process_crw(struct chsc_sei *sei, u64 ntsm) if (chsc(sei)) break; - if (sei->response.code == 0x0001) { - CIO_CRW_EVENT(2, "chsc: sei successful\n"); - - /* Check if we might have lost some information. */ - if (sei->u.nt0_area.flags & 0x40) { - CIO_CRW_EVENT(2, "chsc: event overflow\n"); - css_schedule_eval_all(); - } - - switch (sei->nt) { - case 0: - chsc_process_sei_nt0(&sei->u.nt0_area); - break; - case 2: - chsc_process_sei_nt2(&sei->u.nt2_area); - break; - default: - CIO_CRW_EVENT(2, "chsc: unhandled nt=%d\n", - sei->nt); - break; - } - } else { + if (sei->response.code != 0x0001) { CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n", sei->response.code); break; } - } while (sei->u.nt0_area.flags & 0x80); - return 0; + CIO_CRW_EVENT(2, "chsc: sei successful (nt=%d)\n", sei->nt); + switch (sei->nt) { + case 0: + chsc_process_sei_nt0(&sei->u.nt0_area); + break; + case 2: + chsc_process_sei_nt2(&sei->u.nt2_area); + break; + default: + CIO_CRW_EVENT(2, "chsc: unhandled nt: %d\n", sei->nt); + break; + } + } while (sei->u.nt0_area.flags & 0x80); } +/* + * Handle channel subsystem related CRWs. + * Use store event information to find out what's going on. + * + * Note: Access to sei_page is serialized through machine check handler + * thread, so no need for locking. + */ static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow) { - struct chsc_sei *sei; + struct chsc_sei *sei = sei_page; if (overflow) { css_schedule_eval_all(); @@ -531,14 +532,9 @@ static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow) "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc, crw0->erc, crw0->rsid); - if (!sei_page) - return; - /* Access to sei_page is serialized through machine check handler - * thread, so no need for locking. */ - sei = sei_page; CIO_TRACE_EVENT(2, "prcss"); - __chsc_process_crw(sei, CHSC_SEI_NT0 | CHSC_SEI_NT2); + chsc_process_event_information(sei, CHSC_SEI_NT0 | CHSC_SEI_NT2); } void chsc_chp_online(struct chp_id chpid) diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h index 662dab4b93e6..227e05f674b3 100644 --- a/drivers/s390/cio/chsc.h +++ b/drivers/s390/cio/chsc.h @@ -157,7 +157,7 @@ int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token); #ifdef CONFIG_SCM_BUS int scm_update_information(void); #else /* CONFIG_SCM_BUS */ -#define scm_update_information() 0 +static inline int scm_update_information(void) { return 0; } #endif /* CONFIG_SCM_BUS */ diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index c8faf6230b0f..986ef6a92a41 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c @@ -962,9 +962,9 @@ static void css_reset(void) atomic_inc(&chpid_reset_count); } /* Wait for machine check for all channel paths. */ - timeout = get_clock() + (RCHP_TIMEOUT << 12); + timeout = get_tod_clock() + (RCHP_TIMEOUT << 12); while (atomic_read(&chpid_reset_count) != 0) { - if (get_clock() > timeout) + if (get_tod_clock() > timeout) break; cpu_relax(); } diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c index c9fc61c0a866..4495e0627a40 100644 --- a/drivers/s390/cio/cmf.c +++ b/drivers/s390/cio/cmf.c @@ -33,7 +33,7 @@ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/slab.h> -#include <linux/timex.h> /* get_clock() */ +#include <linux/timex.h> /* get_tod_clock() */ #include <asm/ccwdev.h> #include <asm/cio.h> @@ -326,7 +326,7 @@ static int cmf_copy_block(struct ccw_device *cdev) memcpy(cmb_data->last_block, hw_block, cmb_data->size); memcpy(reference_buf, hw_block, cmb_data->size); } while (memcmp(cmb_data->last_block, reference_buf, cmb_data->size)); - cmb_data->last_update = get_clock(); + cmb_data->last_update = get_tod_clock(); kfree(reference_buf); return 0; } @@ -428,7 +428,7 @@ static void cmf_generic_reset(struct ccw_device *cdev) memset(cmbops->align(cmb_data->hw_block), 0, cmb_data->size); cmb_data->last_update = 0; } - cdev->private->cmb_start_time = get_clock(); + cdev->private->cmb_start_time = get_tod_clock(); spin_unlock_irq(cdev->ccwlock); } diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index fd00afd8b850..a239237d43f3 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -780,7 +780,7 @@ static int __init setup_css(int nr) css->cssid = nr; dev_set_name(&css->device, "css%x", nr); css->device.release = channel_subsystem_release; - tod_high = (u32) (get_clock() >> 32); + tod_high = (u32) (get_tod_clock() >> 32); css_generate_pgid(css, tod_high); return 0; } diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 7cd5c6812ac7..c6767f5a58b2 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -632,6 +632,14 @@ initiate_logging(struct device *dev, struct device_attribute *attr, return count; } +static ssize_t vpm_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct subchannel *sch = to_subchannel(dev); + + return sprintf(buf, "%02x\n", sch->vpm); +} + static DEVICE_ATTR(chpids, 0444, chpids_show, NULL); static DEVICE_ATTR(pimpampom, 0444, pimpampom_show, NULL); static DEVICE_ATTR(devtype, 0444, devtype_show, NULL); @@ -640,11 +648,13 @@ static DEVICE_ATTR(modalias, 0444, modalias_show, NULL); static DEVICE_ATTR(online, 0644, online_show, online_store); static DEVICE_ATTR(availability, 0444, available_show, NULL); static DEVICE_ATTR(logging, 0200, NULL, initiate_logging); +static DEVICE_ATTR(vpm, 0444, vpm_show, NULL); static struct attribute *io_subchannel_attrs[] = { &dev_attr_chpids.attr, &dev_attr_pimpampom.attr, &dev_attr_logging.attr, + &dev_attr_vpm.attr, NULL, }; diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index 1bb1d00095af..c7638c543250 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c @@ -47,7 +47,7 @@ static void ccw_timeout_log(struct ccw_device *cdev) cc = stsch_err(sch->schid, &schib); printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, " - "device information:\n", get_clock()); + "device information:\n", get_tod_clock()); printk(KERN_WARNING "cio: orb:\n"); print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, orb, sizeof(*orb), 0); diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c index 908d287f66c1..37ada05e82a5 100644 --- a/drivers/s390/cio/device_pgid.c +++ b/drivers/s390/cio/device_pgid.c @@ -23,6 +23,8 @@ #define PGID_RETRIES 256 #define PGID_TIMEOUT (10 * HZ) +static void verify_start(struct ccw_device *cdev); + /* * Process path verification data and report result. */ @@ -70,8 +72,8 @@ static void nop_do(struct ccw_device *cdev) struct subchannel *sch = to_subchannel(cdev->dev.parent); struct ccw_request *req = &cdev->private->req; - /* Adjust lpm. */ - req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & sch->opm); + req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & sch->opm & + ~cdev->private->path_noirq_mask); if (!req->lpm) goto out_nopath; nop_build_cp(cdev); @@ -102,10 +104,20 @@ static void nop_callback(struct ccw_device *cdev, void *data, int rc) struct subchannel *sch = to_subchannel(cdev->dev.parent); struct ccw_request *req = &cdev->private->req; - if (rc == 0) + switch (rc) { + case 0: sch->vpm |= req->lpm; - else if (rc != -EACCES) + break; + case -ETIME: + cdev->private->path_noirq_mask |= req->lpm; + break; + case -EACCES: + cdev->private->path_notoper_mask |= req->lpm; + break; + default: goto err; + } + /* Continue on the next path. */ req->lpm >>= 1; nop_do(cdev); return; @@ -132,6 +144,48 @@ static void spid_build_cp(struct ccw_device *cdev, u8 fn) req->cp = cp; } +static void pgid_wipeout_callback(struct ccw_device *cdev, void *data, int rc) +{ + if (rc) { + /* We don't know the path groups' state. Abort. */ + verify_done(cdev, rc); + return; + } + /* + * Path groups have been reset. Restart path verification but + * leave paths in path_noirq_mask out. + */ + cdev->private->flags.pgid_unknown = 0; + verify_start(cdev); +} + +/* + * Reset pathgroups and restart path verification, leave unusable paths out. + */ +static void pgid_wipeout_start(struct ccw_device *cdev) +{ + struct subchannel *sch = to_subchannel(cdev->dev.parent); + struct ccw_dev_id *id = &cdev->private->dev_id; + struct ccw_request *req = &cdev->private->req; + u8 fn; + + CIO_MSG_EVENT(2, "wipe: device 0.%x.%04x: pvm=%02x nim=%02x\n", + id->ssid, id->devno, cdev->private->pgid_valid_mask, + cdev->private->path_noirq_mask); + + /* Initialize request data. */ + memset(req, 0, sizeof(*req)); + req->timeout = PGID_TIMEOUT; + req->maxretries = PGID_RETRIES; + req->lpm = sch->schib.pmcw.pam; + req->callback = pgid_wipeout_callback; + fn = SPID_FUNC_DISBAND; + if (cdev->private->flags.mpath) + fn |= SPID_FUNC_MULTI_PATH; + spid_build_cp(cdev, fn); + ccw_request_start(cdev); +} + /* * Perform establish/resign SET PGID on a single path. */ @@ -157,11 +211,14 @@ static void spid_do(struct ccw_device *cdev) return; out_nopath: + if (cdev->private->flags.pgid_unknown) { + /* At least one SPID could be partially done. */ + pgid_wipeout_start(cdev); + return; + } verify_done(cdev, sch->vpm ? 0 : -EACCES); } -static void verify_start(struct ccw_device *cdev); - /* * Process SET PGID request result for a single path. */ @@ -174,7 +231,12 @@ static void spid_callback(struct ccw_device *cdev, void *data, int rc) case 0: sch->vpm |= req->lpm & sch->opm; break; + case -ETIME: + cdev->private->flags.pgid_unknown = 1; + cdev->private->path_noirq_mask |= req->lpm; + break; case -EACCES: + cdev->private->path_notoper_mask |= req->lpm; break; case -EOPNOTSUPP: if (cdev->private->flags.mpath) { @@ -330,8 +392,9 @@ static void snid_done(struct ccw_device *cdev, int rc) else { donepm = pgid_to_donepm(cdev); sch->vpm = donepm & sch->opm; - cdev->private->pgid_todo_mask &= ~donepm; cdev->private->pgid_reset_mask |= reset; + cdev->private->pgid_todo_mask &= + ~(donepm | cdev->private->path_noirq_mask); pgid_fill(cdev, pgid); } out: @@ -341,6 +404,10 @@ out: cdev->private->pgid_todo_mask, mismatch, reserved, reset); switch (rc) { case 0: + if (cdev->private->flags.pgid_unknown) { + pgid_wipeout_start(cdev); + return; + } /* Anything left to do? */ if (cdev->private->pgid_todo_mask == 0) { verify_done(cdev, sch->vpm == 0 ? -EACCES : 0); @@ -384,9 +451,10 @@ static void snid_do(struct ccw_device *cdev) { struct subchannel *sch = to_subchannel(cdev->dev.parent); struct ccw_request *req = &cdev->private->req; + int ret; - /* Adjust lpm if paths are not set in pam. */ - req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam); + req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & + ~cdev->private->path_noirq_mask); if (!req->lpm) goto out_nopath; snid_build_cp(cdev); @@ -394,7 +462,13 @@ static void snid_do(struct ccw_device *cdev) return; out_nopath: - snid_done(cdev, cdev->private->pgid_valid_mask ? 0 : -EACCES); + if (cdev->private->pgid_valid_mask) + ret = 0; + else if (cdev->private->path_noirq_mask) + ret = -ETIME; + else + ret = -EACCES; + snid_done(cdev, ret); } /* @@ -404,10 +478,21 @@ static void snid_callback(struct ccw_device *cdev, void *data, int rc) { struct ccw_request *req = &cdev->private->req; - if (rc == 0) + switch (rc) { + case 0: cdev->private->pgid_valid_mask |= req->lpm; - else if (rc != -EACCES) + break; + case -ETIME: + cdev->private->flags.pgid_unknown = 1; + cdev->private->path_noirq_mask |= req->lpm; + break; + case -EACCES: + cdev->private->path_notoper_mask |= req->lpm; + break; + default: goto err; + } + /* Continue on the next path. */ req->lpm >>= 1; snid_do(cdev); return; @@ -427,6 +512,13 @@ static void verify_start(struct ccw_device *cdev) sch->vpm = 0; sch->lpm = sch->schib.pmcw.pam; + + /* Initialize PGID data. */ + memset(cdev->private->pgid, 0, sizeof(cdev->private->pgid)); + cdev->private->pgid_valid_mask = 0; + cdev->private->pgid_todo_mask = sch->schib.pmcw.pam; + cdev->private->path_notoper_mask = 0; + /* Initialize request data. */ memset(req, 0, sizeof(*req)); req->timeout = PGID_TIMEOUT; @@ -459,14 +551,8 @@ static void verify_start(struct ccw_device *cdev) */ void ccw_device_verify_start(struct ccw_device *cdev) { - struct subchannel *sch = to_subchannel(cdev->dev.parent); - CIO_TRACE_EVENT(4, "vrfy"); CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id)); - /* Initialize PGID data. */ - memset(cdev->private->pgid, 0, sizeof(cdev->private->pgid)); - cdev->private->pgid_valid_mask = 0; - cdev->private->pgid_todo_mask = sch->schib.pmcw.pam; /* * Initialize pathgroup and multipath state with target values. * They may change in the course of path verification. @@ -474,6 +560,7 @@ void ccw_device_verify_start(struct ccw_device *cdev) cdev->private->flags.pgroup = cdev->private->options.pgroup; cdev->private->flags.mpath = cdev->private->options.mpath; cdev->private->flags.doverify = 0; + cdev->private->path_noirq_mask = 0; verify_start(cdev); } diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h index 76253dfcc1be..b108f4a5c7dd 100644 --- a/drivers/s390/cio/io_sch.h +++ b/drivers/s390/cio/io_sch.h @@ -126,6 +126,10 @@ struct ccw_device_private { u8 pgid_valid_mask; /* mask of valid PGIDs */ u8 pgid_todo_mask; /* mask of PGIDs to be adjusted */ u8 pgid_reset_mask; /* mask of PGIDs which were reset */ + u8 path_noirq_mask; /* mask of paths for which no irq was + received */ + u8 path_notoper_mask; /* mask of paths which were found + not operable */ u8 path_gone_mask; /* mask of paths, that became unavailable */ u8 path_new_mask; /* mask of paths, that became available */ struct { @@ -145,6 +149,7 @@ struct ccw_device_private { unsigned int resuming:1; /* recognition while resume */ unsigned int pgroup:1; /* pathgroup is set up */ unsigned int mpath:1; /* multipathing is set up */ + unsigned int pgid_unknown:1;/* unknown pgid state */ unsigned int initialized:1; /* set if initial reference held */ } __attribute__((packed)) flags; unsigned long intparm; /* user interruption parameter */ diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index 1671d3461f29..abc550e5dd35 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c @@ -338,10 +338,10 @@ again: retries++; if (!start_time) { - start_time = get_clock(); + start_time = get_tod_clock(); goto again; } - if ((get_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE) + if ((get_tod_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE) goto again; } if (retries) { @@ -504,7 +504,7 @@ static int get_inbound_buffer_frontier(struct qdio_q *q) int count, stop; unsigned char state = 0; - q->timestamp = get_clock(); + q->timestamp = get_tod_clock(); /* * Don't check 128 buffers, as otherwise qdio_inbound_q_moved @@ -563,7 +563,7 @@ static int qdio_inbound_q_moved(struct qdio_q *q) if (bufnr != q->last_move) { q->last_move = bufnr; if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR) - q->u.in.timestamp = get_clock(); + q->u.in.timestamp = get_tod_clock(); return 1; } else return 0; @@ -595,7 +595,7 @@ static inline int qdio_inbound_q_done(struct qdio_q *q) * At this point we know, that inbound first_to_check * has (probably) not moved (see qdio_inbound_processing). */ - if (get_clock() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) { + if (get_tod_clock() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) { DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x", q->first_to_check); return 1; @@ -772,7 +772,7 @@ static int get_outbound_buffer_frontier(struct qdio_q *q) int count, stop; unsigned char state = 0; - q->timestamp = get_clock(); + q->timestamp = get_tod_clock(); if (need_siga_sync(q)) if (((queue_type(q) != QDIO_IQDIO_QFMT) && diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index d690b33846cc..d87961d4c0de 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -818,7 +818,7 @@ static inline struct qeth_card *CARD_FROM_CDEV(struct ccw_device *cdev) static inline int qeth_get_micros(void) { - return (int) (get_clock() >> 12); + return (int) (get_tod_clock() >> 12); } static inline int qeth_get_ip_version(struct sk_buff *skb) diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index c96320d79fbc..c7e148f33b2a 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -727,7 +727,7 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req) zfcp_reqlist_add(adapter->req_list, req); req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q_free); - req->issued = get_clock(); + req->issued = get_tod_clock(); if (zfcp_qdio_send(qdio, &req->qdio_req)) { del_timer(&req->timer); /* lookup request again, list might have changed */ diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c index 50b5615848f6..665e3cfaaf85 100644 --- a/drivers/s390/scsi/zfcp_qdio.c +++ b/drivers/s390/scsi/zfcp_qdio.c @@ -68,7 +68,7 @@ static inline void zfcp_qdio_account(struct zfcp_qdio *qdio) unsigned long long now, span; int used; - now = get_clock_monotonic(); + now = get_tod_clock_monotonic(); span = (now - qdio->req_q_time) >> 12; used = QDIO_MAX_BUFFERS_PER_Q - atomic_read(&qdio->req_q_free); qdio->req_q_util += used * span; |