summaryrefslogtreecommitdiff
path: root/drivers/s390/char/sclp.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390/char/sclp.c')
-rw-r--r--drivers/s390/char/sclp.c93
1 files changed, 62 insertions, 31 deletions
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index 8a056df09d6b..f171de3b0b11 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -59,7 +59,8 @@ static volatile enum sclp_init_state_t {
/* Internal state: is a request active at the sclp? */
static volatile enum sclp_running_state_t {
sclp_running_state_idle,
- sclp_running_state_running
+ sclp_running_state_running,
+ sclp_running_state_reset_pending
} sclp_running_state = sclp_running_state_idle;
/* Internal state: is a read request pending? */
@@ -88,15 +89,15 @@ static volatile enum sclp_mask_state_t {
/* Timeout intervals in seconds.*/
#define SCLP_BUSY_INTERVAL 10
-#define SCLP_RETRY_INTERVAL 15
+#define SCLP_RETRY_INTERVAL 30
static void sclp_process_queue(void);
static int sclp_init_mask(int calculate);
static int sclp_init(void);
/* Perform service call. Return 0 on success, non-zero otherwise. */
-static int
-service_call(sclp_cmdw_t command, void *sccb)
+int
+sclp_service_call(sclp_cmdw_t command, void *sccb)
{
int cc;
@@ -113,19 +114,17 @@ service_call(sclp_cmdw_t command, void *sccb)
return 0;
}
-/* Request timeout handler. Restart the request queue. If DATA is non-zero,
- * force restart of running request. */
+static inline void __sclp_make_read_req(void);
+
static void
-sclp_request_timeout(unsigned long data)
+__sclp_queue_read_req(void)
{
- unsigned long flags;
-
- if (data) {
- spin_lock_irqsave(&sclp_lock, flags);
- sclp_running_state = sclp_running_state_idle;
- spin_unlock_irqrestore(&sclp_lock, flags);
+ if (sclp_reading_state == sclp_reading_state_idle) {
+ sclp_reading_state = sclp_reading_state_reading;
+ __sclp_make_read_req();
+ /* Add request to head of queue */
+ list_add(&sclp_read_req.list, &sclp_req_queue);
}
- sclp_process_queue();
}
/* Set up request retry timer. Called while sclp_lock is locked. */
@@ -140,6 +139,29 @@ __sclp_set_request_timer(unsigned long time, void (*function)(unsigned long),
add_timer(&sclp_request_timer);
}
+/* Request timeout handler. Restart the request queue. If DATA is non-zero,
+ * force restart of running request. */
+static void
+sclp_request_timeout(unsigned long data)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&sclp_lock, flags);
+ if (data) {
+ if (sclp_running_state == sclp_running_state_running) {
+ /* Break running state and queue NOP read event request
+ * to get a defined interface state. */
+ __sclp_queue_read_req();
+ sclp_running_state = sclp_running_state_idle;
+ }
+ } else {
+ __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
+ sclp_request_timeout, 0);
+ }
+ spin_unlock_irqrestore(&sclp_lock, flags);
+ sclp_process_queue();
+}
+
/* Try to start a request. Return zero if the request was successfully
* started or if it will be started at a later time. Return non-zero otherwise.
* Called while sclp_lock is locked. */
@@ -151,7 +173,7 @@ __sclp_start_request(struct sclp_req *req)
if (sclp_running_state != sclp_running_state_idle)
return 0;
del_timer(&sclp_request_timer);
- rc = service_call(req->command, req->sccb);
+ rc = sclp_service_call(req->command, req->sccb);
req->start_count++;
if (rc == 0) {
@@ -191,7 +213,15 @@ sclp_process_queue(void)
rc = __sclp_start_request(req);
if (rc == 0)
break;
- /* Request failed. */
+ /* Request failed */
+ if (req->start_count > 1) {
+ /* Cannot abort already submitted request - could still
+ * be active at the SCLP */
+ __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
+ sclp_request_timeout, 0);
+ break;
+ }
+ /* Post-processing for aborted request */
list_del(&req->list);
if (req->callback) {
spin_unlock_irqrestore(&sclp_lock, flags);
@@ -221,7 +251,8 @@ sclp_add_request(struct sclp_req *req)
list_add_tail(&req->list, &sclp_req_queue);
rc = 0;
/* Start if request is first in list */
- if (req->list.prev == &sclp_req_queue) {
+ if (sclp_running_state == sclp_running_state_idle &&
+ req->list.prev == &sclp_req_queue) {
rc = __sclp_start_request(req);
if (rc)
list_del(&req->list);
@@ -294,7 +325,7 @@ __sclp_make_read_req(void)
sccb = (struct sccb_header *) sclp_read_sccb;
clear_page(sccb);
memset(&sclp_read_req, 0, sizeof(struct sclp_req));
- sclp_read_req.command = SCLP_CMDW_READDATA;
+ sclp_read_req.command = SCLP_CMDW_READ_EVENT_DATA;
sclp_read_req.status = SCLP_REQ_QUEUED;
sclp_read_req.start_count = 0;
sclp_read_req.callback = sclp_read_cb;
@@ -334,6 +365,8 @@ sclp_interrupt_handler(__u16 code)
finished_sccb = S390_lowcore.ext_params & 0xfffffff8;
evbuf_pending = S390_lowcore.ext_params & 0x3;
if (finished_sccb) {
+ del_timer(&sclp_request_timer);
+ sclp_running_state = sclp_running_state_reset_pending;
req = __sclp_find_req(finished_sccb);
if (req) {
/* Request post-processing */
@@ -348,13 +381,8 @@ sclp_interrupt_handler(__u16 code)
sclp_running_state = sclp_running_state_idle;
}
if (evbuf_pending && sclp_receive_mask != 0 &&
- sclp_reading_state == sclp_reading_state_idle &&
- sclp_activation_state == sclp_activation_state_active ) {
- sclp_reading_state = sclp_reading_state_reading;
- __sclp_make_read_req();
- /* Add request to head of queue */
- list_add(&sclp_read_req.list, &sclp_req_queue);
- }
+ sclp_activation_state == sclp_activation_state_active)
+ __sclp_queue_read_req();
spin_unlock(&sclp_lock);
sclp_process_queue();
}
@@ -374,6 +402,7 @@ sclp_sync_wait(void)
unsigned long flags;
unsigned long cr0, cr0_sync;
u64 timeout;
+ int irq_context;
/* We'll be disabling timer interrupts, so we need a custom timeout
* mechanism */
@@ -386,7 +415,9 @@ sclp_sync_wait(void)
}
local_irq_save(flags);
/* Prevent bottom half from executing once we force interrupts open */
- local_bh_disable();
+ irq_context = in_interrupt();
+ if (!irq_context)
+ local_bh_disable();
/* Enable service-signal interruption, disable timer interrupts */
trace_hardirqs_on();
__ctl_store(cr0, 0, 0);
@@ -402,19 +433,19 @@ sclp_sync_wait(void)
get_clock() > timeout &&
del_timer(&sclp_request_timer))
sclp_request_timer.function(sclp_request_timer.data);
- barrier();
cpu_relax();
}
local_irq_disable();
__ctl_load(cr0, 0, 0);
- _local_bh_enable();
+ if (!irq_context)
+ _local_bh_enable();
local_irq_restore(flags);
}
EXPORT_SYMBOL(sclp_sync_wait);
/* Dispatch changes in send and receive mask to registered listeners. */
-static inline void
+static void
sclp_dispatch_state_change(void)
{
struct list_head *l;
@@ -597,7 +628,7 @@ __sclp_make_init_req(u32 receive_mask, u32 send_mask)
sccb = (struct init_sccb *) sclp_init_sccb;
clear_page(sccb);
memset(&sclp_init_req, 0, sizeof(struct sclp_req));
- sclp_init_req.command = SCLP_CMDW_WRITEMASK;
+ sclp_init_req.command = SCLP_CMDW_WRITE_EVENT_MASK;
sclp_init_req.status = SCLP_REQ_FILLED;
sclp_init_req.start_count = 0;
sclp_init_req.callback = NULL;
@@ -800,7 +831,7 @@ sclp_check_interface(void)
for (retry = 0; retry <= SCLP_INIT_RETRY; retry++) {
__sclp_make_init_req(0, 0);
sccb = (struct init_sccb *) sclp_init_req.sccb;
- rc = service_call(sclp_init_req.command, sccb);
+ rc = sclp_service_call(sclp_init_req.command, sccb);
if (rc == -EIO)
break;
sclp_init_req.status = SCLP_REQ_RUNNING;