]> git.openfabrics.org - ~shefty/rdma-dev.git/blob - drivers/s390/cio/device.c
881bdfd99140c03fa6f0c75aa8c290ea46c37ccd
[~shefty/rdma-dev.git] / drivers / s390 / cio / device.c
1 /*
2  *  drivers/s390/cio/device.c
3  *  bus driver for ccw devices
4  *
5  *    Copyright IBM Corp. 2002,2008
6  *    Author(s): Arnd Bergmann (arndb@de.ibm.com)
7  *               Cornelia Huck (cornelia.huck@de.ibm.com)
8  *               Martin Schwidefsky (schwidefsky@de.ibm.com)
9  */
10
11 #define KMSG_COMPONENT "cio"
12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/spinlock.h>
17 #include <linux/errno.h>
18 #include <linux/err.h>
19 #include <linux/slab.h>
20 #include <linux/list.h>
21 #include <linux/device.h>
22 #include <linux/workqueue.h>
23 #include <linux/timer.h>
24
25 #include <asm/ccwdev.h>
26 #include <asm/cio.h>
27 #include <asm/param.h>          /* HZ */
28 #include <asm/cmb.h>
29 #include <asm/isc.h>
30
31 #include "chp.h"
32 #include "cio.h"
33 #include "cio_debug.h"
34 #include "css.h"
35 #include "device.h"
36 #include "ioasm.h"
37 #include "io_sch.h"
38 #include "blacklist.h"
39 #include "chsc.h"
40
41 static struct timer_list recovery_timer;
42 static DEFINE_SPINLOCK(recovery_lock);
43 static int recovery_phase;
44 static const unsigned long recovery_delay[] = { 3, 30, 300 };
45
46 /******************* bus type handling ***********************/
47
48 /* The Linux driver model distinguishes between a bus type and
49  * the bus itself. Of course we only have one channel
50  * subsystem driver and one channel system per machine, but
51  * we still use the abstraction. T.R. says it's a good idea. */
52 static int
53 ccw_bus_match (struct device * dev, struct device_driver * drv)
54 {
55         struct ccw_device *cdev = to_ccwdev(dev);
56         struct ccw_driver *cdrv = to_ccwdrv(drv);
57         const struct ccw_device_id *ids = cdrv->ids, *found;
58
59         if (!ids)
60                 return 0;
61
62         found = ccw_device_id_match(ids, &cdev->id);
63         if (!found)
64                 return 0;
65
66         cdev->id.driver_info = found->driver_info;
67
68         return 1;
69 }
70
71 /* Store modalias string delimited by prefix/suffix string into buffer with
72  * specified size. Return length of resulting string (excluding trailing '\0')
73  * even if string doesn't fit buffer (snprintf semantics). */
74 static int snprint_alias(char *buf, size_t size,
75                          struct ccw_device_id *id, const char *suffix)
76 {
77         int len;
78
79         len = snprintf(buf, size, "ccw:t%04Xm%02X", id->cu_type, id->cu_model);
80         if (len > size)
81                 return len;
82         buf += len;
83         size -= len;
84
85         if (id->dev_type != 0)
86                 len += snprintf(buf, size, "dt%04Xdm%02X%s", id->dev_type,
87                                 id->dev_model, suffix);
88         else
89                 len += snprintf(buf, size, "dtdm%s", suffix);
90
91         return len;
92 }
93
94 /* Set up environment variables for ccw device uevent. Return 0 on success,
95  * non-zero otherwise. */
96 static int ccw_uevent(struct device *dev, struct kobj_uevent_env *env)
97 {
98         struct ccw_device *cdev = to_ccwdev(dev);
99         struct ccw_device_id *id = &(cdev->id);
100         int ret;
101         char modalias_buf[30];
102
103         /* CU_TYPE= */
104         ret = add_uevent_var(env, "CU_TYPE=%04X", id->cu_type);
105         if (ret)
106                 return ret;
107
108         /* CU_MODEL= */
109         ret = add_uevent_var(env, "CU_MODEL=%02X", id->cu_model);
110         if (ret)
111                 return ret;
112
113         /* The next two can be zero, that's ok for us */
114         /* DEV_TYPE= */
115         ret = add_uevent_var(env, "DEV_TYPE=%04X", id->dev_type);
116         if (ret)
117                 return ret;
118
119         /* DEV_MODEL= */
120         ret = add_uevent_var(env, "DEV_MODEL=%02X", id->dev_model);
121         if (ret)
122                 return ret;
123
124         /* MODALIAS=  */
125         snprint_alias(modalias_buf, sizeof(modalias_buf), id, "");
126         ret = add_uevent_var(env, "MODALIAS=%s", modalias_buf);
127         return ret;
128 }
129
130 struct bus_type ccw_bus_type;
131
132 static void io_subchannel_irq(struct subchannel *);
133 static int io_subchannel_probe(struct subchannel *);
134 static int io_subchannel_remove(struct subchannel *);
135 static void io_subchannel_shutdown(struct subchannel *);
136 static int io_subchannel_sch_event(struct subchannel *, int);
137 static int io_subchannel_chp_event(struct subchannel *, struct chp_link *,
138                                    int);
139 static void recovery_func(unsigned long data);
140 wait_queue_head_t ccw_device_init_wq;
141 atomic_t ccw_device_init_count;
142
143 static struct css_device_id io_subchannel_ids[] = {
144         { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, },
145         { /* end of list */ },
146 };
147 MODULE_DEVICE_TABLE(css, io_subchannel_ids);
148
149 static int io_subchannel_prepare(struct subchannel *sch)
150 {
151         struct ccw_device *cdev;
152         /*
153          * Don't allow suspend while a ccw device registration
154          * is still outstanding.
155          */
156         cdev = sch_get_cdev(sch);
157         if (cdev && !device_is_registered(&cdev->dev))
158                 return -EAGAIN;
159         return 0;
160 }
161
162 static int io_subchannel_settle(void)
163 {
164         int ret;
165
166         ret = wait_event_interruptible(ccw_device_init_wq,
167                                 atomic_read(&ccw_device_init_count) == 0);
168         if (ret)
169                 return -EINTR;
170         flush_workqueue(cio_work_q);
171         return 0;
172 }
173
174 static struct css_driver io_subchannel_driver = {
175         .owner = THIS_MODULE,
176         .subchannel_type = io_subchannel_ids,
177         .name = "io_subchannel",
178         .irq = io_subchannel_irq,
179         .sch_event = io_subchannel_sch_event,
180         .chp_event = io_subchannel_chp_event,
181         .probe = io_subchannel_probe,
182         .remove = io_subchannel_remove,
183         .shutdown = io_subchannel_shutdown,
184         .prepare = io_subchannel_prepare,
185         .settle = io_subchannel_settle,
186 };
187
188 int __init io_subchannel_init(void)
189 {
190         int ret;
191
192         init_waitqueue_head(&ccw_device_init_wq);
193         atomic_set(&ccw_device_init_count, 0);
194         setup_timer(&recovery_timer, recovery_func, 0);
195
196         ret = bus_register(&ccw_bus_type);
197         if (ret)
198                 return ret;
199         ret = css_driver_register(&io_subchannel_driver);
200         if (ret)
201                 bus_unregister(&ccw_bus_type);
202
203         return ret;
204 }
205
206
207 /************************ device handling **************************/
208
209 /*
210  * A ccw_device has some interfaces in sysfs in addition to the
211  * standard ones.
212  * The following entries are designed to export the information which
213  * resided in 2.4 in /proc/subchannels. Subchannel and device number
214  * are obvious, so they don't have an entry :)
215  * TODO: Split chpids and pimpampom up? Where is "in use" in the tree?
216  */
217 static ssize_t
218 chpids_show (struct device * dev, struct device_attribute *attr, char * buf)
219 {
220         struct subchannel *sch = to_subchannel(dev);
221         struct chsc_ssd_info *ssd = &sch->ssd_info;
222         ssize_t ret = 0;
223         int chp;
224         int mask;
225
226         for (chp = 0; chp < 8; chp++) {
227                 mask = 0x80 >> chp;
228                 if (ssd->path_mask & mask)
229                         ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id);
230                 else
231                         ret += sprintf(buf + ret, "00 ");
232         }
233         ret += sprintf (buf+ret, "\n");
234         return min((ssize_t)PAGE_SIZE, ret);
235 }
236
237 static ssize_t
238 pimpampom_show (struct device * dev, struct device_attribute *attr, char * buf)
239 {
240         struct subchannel *sch = to_subchannel(dev);
241         struct pmcw *pmcw = &sch->schib.pmcw;
242
243         return sprintf (buf, "%02x %02x %02x\n",
244                         pmcw->pim, pmcw->pam, pmcw->pom);
245 }
246
247 static ssize_t
248 devtype_show (struct device *dev, struct device_attribute *attr, char *buf)
249 {
250         struct ccw_device *cdev = to_ccwdev(dev);
251         struct ccw_device_id *id = &(cdev->id);
252
253         if (id->dev_type != 0)
254                 return sprintf(buf, "%04x/%02x\n",
255                                 id->dev_type, id->dev_model);
256         else
257                 return sprintf(buf, "n/a\n");
258 }
259
260 static ssize_t
261 cutype_show (struct device *dev, struct device_attribute *attr, char *buf)
262 {
263         struct ccw_device *cdev = to_ccwdev(dev);
264         struct ccw_device_id *id = &(cdev->id);
265
266         return sprintf(buf, "%04x/%02x\n",
267                        id->cu_type, id->cu_model);
268 }
269
270 static ssize_t
271 modalias_show (struct device *dev, struct device_attribute *attr, char *buf)
272 {
273         struct ccw_device *cdev = to_ccwdev(dev);
274         struct ccw_device_id *id = &(cdev->id);
275         int len;
276
277         len = snprint_alias(buf, PAGE_SIZE, id, "\n");
278
279         return len > PAGE_SIZE ? PAGE_SIZE : len;
280 }
281
282 static ssize_t
283 online_show (struct device *dev, struct device_attribute *attr, char *buf)
284 {
285         struct ccw_device *cdev = to_ccwdev(dev);
286
287         return sprintf(buf, cdev->online ? "1\n" : "0\n");
288 }
289
290 int ccw_device_is_orphan(struct ccw_device *cdev)
291 {
292         return sch_is_pseudo_sch(to_subchannel(cdev->dev.parent));
293 }
294
295 static void ccw_device_unregister(struct ccw_device *cdev)
296 {
297         if (device_is_registered(&cdev->dev)) {
298                 /* Undo device_add(). */
299                 device_del(&cdev->dev);
300         }
301         if (cdev->private->flags.initialized) {
302                 cdev->private->flags.initialized = 0;
303                 /* Release reference from device_initialize(). */
304                 put_device(&cdev->dev);
305         }
306 }
307
308 static void io_subchannel_quiesce(struct subchannel *);
309
310 /**
311  * ccw_device_set_offline() - disable a ccw device for I/O
312  * @cdev: target ccw device
313  *
314  * This function calls the driver's set_offline() function for @cdev, if
315  * given, and then disables @cdev.
316  * Returns:
317  *   %0 on success and a negative error value on failure.
318  * Context:
319  *  enabled, ccw device lock not held
320  */
321 int ccw_device_set_offline(struct ccw_device *cdev)
322 {
323         struct subchannel *sch;
324         int ret, state;
325
326         if (!cdev)
327                 return -ENODEV;
328         if (!cdev->online || !cdev->drv)
329                 return -EINVAL;
330
331         if (cdev->drv->set_offline) {
332                 ret = cdev->drv->set_offline(cdev);
333                 if (ret != 0)
334                         return ret;
335         }
336         cdev->online = 0;
337         spin_lock_irq(cdev->ccwlock);
338         sch = to_subchannel(cdev->dev.parent);
339         /* Wait until a final state or DISCONNECTED is reached */
340         while (!dev_fsm_final_state(cdev) &&
341                cdev->private->state != DEV_STATE_DISCONNECTED) {
342                 spin_unlock_irq(cdev->ccwlock);
343                 wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
344                            cdev->private->state == DEV_STATE_DISCONNECTED));
345                 spin_lock_irq(cdev->ccwlock);
346         }
347         do {
348                 ret = ccw_device_offline(cdev);
349                 if (!ret)
350                         break;
351                 CIO_MSG_EVENT(0, "ccw_device_offline returned %d, device "
352                               "0.%x.%04x\n", ret, cdev->private->dev_id.ssid,
353                               cdev->private->dev_id.devno);
354                 if (ret != -EBUSY)
355                         goto error;
356                 state = cdev->private->state;
357                 spin_unlock_irq(cdev->ccwlock);
358                 io_subchannel_quiesce(sch);
359                 spin_lock_irq(cdev->ccwlock);
360                 cdev->private->state = state;
361         } while (ret == -EBUSY);
362         spin_unlock_irq(cdev->ccwlock);
363         wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
364                    cdev->private->state == DEV_STATE_DISCONNECTED));
365         /* Inform the user if set offline failed. */
366         if (cdev->private->state == DEV_STATE_BOXED) {
367                 pr_warning("%s: The device entered boxed state while "
368                            "being set offline\n", dev_name(&cdev->dev));
369         } else if (cdev->private->state == DEV_STATE_NOT_OPER) {
370                 pr_warning("%s: The device stopped operating while "
371                            "being set offline\n", dev_name(&cdev->dev));
372         }
373         /* Give up reference from ccw_device_set_online(). */
374         put_device(&cdev->dev);
375         return 0;
376
377 error:
378         cdev->private->state = DEV_STATE_OFFLINE;
379         dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
380         spin_unlock_irq(cdev->ccwlock);
381         /* Give up reference from ccw_device_set_online(). */
382         put_device(&cdev->dev);
383         return -ENODEV;
384 }
385
386 /**
387  * ccw_device_set_online() - enable a ccw device for I/O
388  * @cdev: target ccw device
389  *
390  * This function first enables @cdev and then calls the driver's set_online()
391  * function for @cdev, if given. If set_online() returns an error, @cdev is
392  * disabled again.
393  * Returns:
394  *   %0 on success and a negative error value on failure.
395  * Context:
396  *  enabled, ccw device lock not held
397  */
398 int ccw_device_set_online(struct ccw_device *cdev)
399 {
400         int ret;
401         int ret2;
402
403         if (!cdev)
404                 return -ENODEV;
405         if (cdev->online || !cdev->drv)
406                 return -EINVAL;
407         /* Hold on to an extra reference while device is online. */
408         if (!get_device(&cdev->dev))
409                 return -ENODEV;
410
411         spin_lock_irq(cdev->ccwlock);
412         ret = ccw_device_online(cdev);
413         spin_unlock_irq(cdev->ccwlock);
414         if (ret == 0)
415                 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
416         else {
417                 CIO_MSG_EVENT(0, "ccw_device_online returned %d, "
418                               "device 0.%x.%04x\n",
419                               ret, cdev->private->dev_id.ssid,
420                               cdev->private->dev_id.devno);
421                 /* Give up online reference since onlining failed. */
422                 put_device(&cdev->dev);
423                 return ret;
424         }
425         spin_lock_irq(cdev->ccwlock);
426         /* Check if online processing was successful */
427         if ((cdev->private->state != DEV_STATE_ONLINE) &&
428             (cdev->private->state != DEV_STATE_W4SENSE)) {
429                 spin_unlock_irq(cdev->ccwlock);
430                 /* Inform the user that set online failed. */
431                 if (cdev->private->state == DEV_STATE_BOXED) {
432                         pr_warning("%s: Setting the device online failed "
433                                    "because it is boxed\n",
434                                    dev_name(&cdev->dev));
435                 } else if (cdev->private->state == DEV_STATE_NOT_OPER) {
436                         pr_warning("%s: Setting the device online failed "
437                                    "because it is not operational\n",
438                                    dev_name(&cdev->dev));
439                 }
440                 /* Give up online reference since onlining failed. */
441                 put_device(&cdev->dev);
442                 return -ENODEV;
443         }
444         spin_unlock_irq(cdev->ccwlock);
445         if (cdev->drv->set_online)
446                 ret = cdev->drv->set_online(cdev);
447         if (ret)
448                 goto rollback;
449         cdev->online = 1;
450         return 0;
451
452 rollback:
453         spin_lock_irq(cdev->ccwlock);
454         /* Wait until a final state or DISCONNECTED is reached */
455         while (!dev_fsm_final_state(cdev) &&
456                cdev->private->state != DEV_STATE_DISCONNECTED) {
457                 spin_unlock_irq(cdev->ccwlock);
458                 wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
459                            cdev->private->state == DEV_STATE_DISCONNECTED));
460                 spin_lock_irq(cdev->ccwlock);
461         }
462         ret2 = ccw_device_offline(cdev);
463         if (ret2)
464                 goto error;
465         spin_unlock_irq(cdev->ccwlock);
466         wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
467                    cdev->private->state == DEV_STATE_DISCONNECTED));
468         /* Give up online reference since onlining failed. */
469         put_device(&cdev->dev);
470         return ret;
471
472 error:
473         CIO_MSG_EVENT(0, "rollback ccw_device_offline returned %d, "
474                       "device 0.%x.%04x\n",
475                       ret2, cdev->private->dev_id.ssid,
476                       cdev->private->dev_id.devno);
477         cdev->private->state = DEV_STATE_OFFLINE;
478         spin_unlock_irq(cdev->ccwlock);
479         /* Give up online reference since onlining failed. */
480         put_device(&cdev->dev);
481         return ret;
482 }
483
484 static int online_store_handle_offline(struct ccw_device *cdev)
485 {
486         if (cdev->private->state == DEV_STATE_DISCONNECTED) {
487                 spin_lock_irq(cdev->ccwlock);
488                 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG_EVAL);
489                 spin_unlock_irq(cdev->ccwlock);
490                 return 0;
491         }
492         if (cdev->drv && cdev->drv->set_offline)
493                 return ccw_device_set_offline(cdev);
494         return -EINVAL;
495 }
496
497 static int online_store_recog_and_online(struct ccw_device *cdev)
498 {
499         /* Do device recognition, if needed. */
500         if (cdev->private->state == DEV_STATE_BOXED) {
501                 spin_lock_irq(cdev->ccwlock);
502                 ccw_device_recognition(cdev);
503                 spin_unlock_irq(cdev->ccwlock);
504                 wait_event(cdev->private->wait_q,
505                            cdev->private->flags.recog_done);
506                 if (cdev->private->state != DEV_STATE_OFFLINE)
507                         /* recognition failed */
508                         return -EAGAIN;
509         }
510         if (cdev->drv && cdev->drv->set_online)
511                 return ccw_device_set_online(cdev);
512         return -EINVAL;
513 }
514
515 static int online_store_handle_online(struct ccw_device *cdev, int force)
516 {
517         int ret;
518
519         ret = online_store_recog_and_online(cdev);
520         if (ret && !force)
521                 return ret;
522         if (force && cdev->private->state == DEV_STATE_BOXED) {
523                 ret = ccw_device_stlck(cdev);
524                 if (ret)
525                         return ret;
526                 if (cdev->id.cu_type == 0)
527                         cdev->private->state = DEV_STATE_NOT_OPER;
528                 ret = online_store_recog_and_online(cdev);
529                 if (ret)
530                         return ret;
531         }
532         return 0;
533 }
534
535 static ssize_t online_store (struct device *dev, struct device_attribute *attr,
536                              const char *buf, size_t count)
537 {
538         struct ccw_device *cdev = to_ccwdev(dev);
539         int force, ret;
540         unsigned long i;
541
542         if (!dev_fsm_final_state(cdev) &&
543             cdev->private->state != DEV_STATE_DISCONNECTED)
544                 return -EAGAIN;
545         if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
546                 return -EAGAIN;
547
548         if (cdev->drv && !try_module_get(cdev->drv->owner)) {
549                 atomic_set(&cdev->private->onoff, 0);
550                 return -EINVAL;
551         }
552         if (!strncmp(buf, "force\n", count)) {
553                 force = 1;
554                 i = 1;
555                 ret = 0;
556         } else {
557                 force = 0;
558                 ret = strict_strtoul(buf, 16, &i);
559         }
560         if (ret)
561                 goto out;
562         switch (i) {
563         case 0:
564                 ret = online_store_handle_offline(cdev);
565                 break;
566         case 1:
567                 ret = online_store_handle_online(cdev, force);
568                 break;
569         default:
570                 ret = -EINVAL;
571         }
572 out:
573         if (cdev->drv)
574                 module_put(cdev->drv->owner);
575         atomic_set(&cdev->private->onoff, 0);
576         return (ret < 0) ? ret : count;
577 }
578
579 static ssize_t
580 available_show (struct device *dev, struct device_attribute *attr, char *buf)
581 {
582         struct ccw_device *cdev = to_ccwdev(dev);
583         struct subchannel *sch;
584
585         if (ccw_device_is_orphan(cdev))
586                 return sprintf(buf, "no device\n");
587         switch (cdev->private->state) {
588         case DEV_STATE_BOXED:
589                 return sprintf(buf, "boxed\n");
590         case DEV_STATE_DISCONNECTED:
591         case DEV_STATE_DISCONNECTED_SENSE_ID:
592         case DEV_STATE_NOT_OPER:
593                 sch = to_subchannel(dev->parent);
594                 if (!sch->lpm)
595                         return sprintf(buf, "no path\n");
596                 else
597                         return sprintf(buf, "no device\n");
598         default:
599                 /* All other states considered fine. */
600                 return sprintf(buf, "good\n");
601         }
602 }
603
604 static ssize_t
605 initiate_logging(struct device *dev, struct device_attribute *attr,
606                  const char *buf, size_t count)
607 {
608         struct subchannel *sch = to_subchannel(dev);
609         int rc;
610
611         rc = chsc_siosl(sch->schid);
612         if (rc < 0) {
613                 pr_warning("Logging for subchannel 0.%x.%04x failed with "
614                            "errno=%d\n",
615                            sch->schid.ssid, sch->schid.sch_no, rc);
616                 return rc;
617         }
618         pr_notice("Logging for subchannel 0.%x.%04x was triggered\n",
619                   sch->schid.ssid, sch->schid.sch_no);
620         return count;
621 }
622
623 static DEVICE_ATTR(chpids, 0444, chpids_show, NULL);
624 static DEVICE_ATTR(pimpampom, 0444, pimpampom_show, NULL);
625 static DEVICE_ATTR(devtype, 0444, devtype_show, NULL);
626 static DEVICE_ATTR(cutype, 0444, cutype_show, NULL);
627 static DEVICE_ATTR(modalias, 0444, modalias_show, NULL);
628 static DEVICE_ATTR(online, 0644, online_show, online_store);
629 static DEVICE_ATTR(availability, 0444, available_show, NULL);
630 static DEVICE_ATTR(logging, 0200, NULL, initiate_logging);
631
632 static struct attribute *io_subchannel_attrs[] = {
633         &dev_attr_chpids.attr,
634         &dev_attr_pimpampom.attr,
635         &dev_attr_logging.attr,
636         NULL,
637 };
638
639 static struct attribute_group io_subchannel_attr_group = {
640         .attrs = io_subchannel_attrs,
641 };
642
643 static struct attribute * ccwdev_attrs[] = {
644         &dev_attr_devtype.attr,
645         &dev_attr_cutype.attr,
646         &dev_attr_modalias.attr,
647         &dev_attr_online.attr,
648         &dev_attr_cmb_enable.attr,
649         &dev_attr_availability.attr,
650         NULL,
651 };
652
653 static struct attribute_group ccwdev_attr_group = {
654         .attrs = ccwdev_attrs,
655 };
656
657 static const struct attribute_group *ccwdev_attr_groups[] = {
658         &ccwdev_attr_group,
659         NULL,
660 };
661
662 /* this is a simple abstraction for device_register that sets the
663  * correct bus type and adds the bus specific files */
664 static int ccw_device_register(struct ccw_device *cdev)
665 {
666         struct device *dev = &cdev->dev;
667         int ret;
668
669         dev->bus = &ccw_bus_type;
670         ret = dev_set_name(&cdev->dev, "0.%x.%04x", cdev->private->dev_id.ssid,
671                            cdev->private->dev_id.devno);
672         if (ret)
673                 return ret;
674         return device_add(dev);
675 }
676
677 static int match_dev_id(struct device *dev, void *data)
678 {
679         struct ccw_device *cdev = to_ccwdev(dev);
680         struct ccw_dev_id *dev_id = data;
681
682         return ccw_dev_id_is_equal(&cdev->private->dev_id, dev_id);
683 }
684
685 static struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id)
686 {
687         struct device *dev;
688
689         dev = bus_find_device(&ccw_bus_type, NULL, dev_id, match_dev_id);
690
691         return dev ? to_ccwdev(dev) : NULL;
692 }
693
694 static void ccw_device_do_unbind_bind(struct ccw_device *cdev)
695 {
696         int ret;
697
698         if (device_is_registered(&cdev->dev)) {
699                 device_release_driver(&cdev->dev);
700                 ret = device_attach(&cdev->dev);
701                 WARN_ON(ret == -ENODEV);
702         }
703 }
704
705 static void
706 ccw_device_release(struct device *dev)
707 {
708         struct ccw_device *cdev;
709
710         cdev = to_ccwdev(dev);
711         /* Release reference of parent subchannel. */
712         put_device(cdev->dev.parent);
713         kfree(cdev->private);
714         kfree(cdev);
715 }
716
717 static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch)
718 {
719         struct ccw_device *cdev;
720
721         cdev  = kzalloc(sizeof(*cdev), GFP_KERNEL);
722         if (cdev) {
723                 cdev->private = kzalloc(sizeof(struct ccw_device_private),
724                                         GFP_KERNEL | GFP_DMA);
725                 if (cdev->private)
726                         return cdev;
727         }
728         kfree(cdev);
729         return ERR_PTR(-ENOMEM);
730 }
731
732 static void ccw_device_todo(struct work_struct *work);
733
734 static int io_subchannel_initialize_dev(struct subchannel *sch,
735                                         struct ccw_device *cdev)
736 {
737         cdev->private->cdev = cdev;
738         atomic_set(&cdev->private->onoff, 0);
739         cdev->dev.parent = &sch->dev;
740         cdev->dev.release = ccw_device_release;
741         INIT_WORK(&cdev->private->todo_work, ccw_device_todo);
742         cdev->dev.groups = ccwdev_attr_groups;
743         /* Do first half of device_register. */
744         device_initialize(&cdev->dev);
745         if (!get_device(&sch->dev)) {
746                 /* Release reference from device_initialize(). */
747                 put_device(&cdev->dev);
748                 return -ENODEV;
749         }
750         cdev->private->flags.initialized = 1;
751         return 0;
752 }
753
754 static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch)
755 {
756         struct ccw_device *cdev;
757         int ret;
758
759         cdev = io_subchannel_allocate_dev(sch);
760         if (!IS_ERR(cdev)) {
761                 ret = io_subchannel_initialize_dev(sch, cdev);
762                 if (ret)
763                         cdev = ERR_PTR(ret);
764         }
765         return cdev;
766 }
767
768 static void io_subchannel_recog(struct ccw_device *, struct subchannel *);
769
770 static void sch_create_and_recog_new_device(struct subchannel *sch)
771 {
772         struct ccw_device *cdev;
773
774         /* Need to allocate a new ccw device. */
775         cdev = io_subchannel_create_ccwdev(sch);
776         if (IS_ERR(cdev)) {
777                 /* OK, we did everything we could... */
778                 css_sch_device_unregister(sch);
779                 return;
780         }
781         /* Start recognition for the new ccw device. */
782         io_subchannel_recog(cdev, sch);
783 }
784
785 /*
786  * Register recognized device.
787  */
788 static void io_subchannel_register(struct ccw_device *cdev)
789 {
790         struct subchannel *sch;
791         int ret, adjust_init_count = 1;
792         unsigned long flags;
793
794         sch = to_subchannel(cdev->dev.parent);
795         /*
796          * Check if subchannel is still registered. It may have become
797          * unregistered if a machine check hit us after finishing
798          * device recognition but before the register work could be
799          * queued.
800          */
801         if (!device_is_registered(&sch->dev))
802                 goto out_err;
803         css_update_ssd_info(sch);
804         /*
805          * io_subchannel_register() will also be called after device
806          * recognition has been done for a boxed device (which will already
807          * be registered). We need to reprobe since we may now have sense id
808          * information.
809          */
810         if (device_is_registered(&cdev->dev)) {
811                 if (!cdev->drv) {
812                         ret = device_reprobe(&cdev->dev);
813                         if (ret)
814                                 /* We can't do much here. */
815                                 CIO_MSG_EVENT(0, "device_reprobe() returned"
816                                               " %d for 0.%x.%04x\n", ret,
817                                               cdev->private->dev_id.ssid,
818                                               cdev->private->dev_id.devno);
819                 }
820                 adjust_init_count = 0;
821                 goto out;
822         }
823         /*
824          * Now we know this subchannel will stay, we can throw
825          * our delayed uevent.
826          */
827         dev_set_uevent_suppress(&sch->dev, 0);
828         kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
829         /* make it known to the system */
830         ret = ccw_device_register(cdev);
831         if (ret) {
832                 CIO_MSG_EVENT(0, "Could not register ccw dev 0.%x.%04x: %d\n",
833                               cdev->private->dev_id.ssid,
834                               cdev->private->dev_id.devno, ret);
835                 spin_lock_irqsave(sch->lock, flags);
836                 sch_set_cdev(sch, NULL);
837                 spin_unlock_irqrestore(sch->lock, flags);
838                 /* Release initial device reference. */
839                 put_device(&cdev->dev);
840                 goto out_err;
841         }
842 out:
843         cdev->private->flags.recog_done = 1;
844         wake_up(&cdev->private->wait_q);
845 out_err:
846         if (adjust_init_count && atomic_dec_and_test(&ccw_device_init_count))
847                 wake_up(&ccw_device_init_wq);
848 }
849
850 static void ccw_device_call_sch_unregister(struct ccw_device *cdev)
851 {
852         struct subchannel *sch;
853
854         /* Get subchannel reference for local processing. */
855         if (!get_device(cdev->dev.parent))
856                 return;
857         sch = to_subchannel(cdev->dev.parent);
858         css_sch_device_unregister(sch);
859         /* Release subchannel reference for local processing. */
860         put_device(&sch->dev);
861 }
862
863 /*
864  * subchannel recognition done. Called from the state machine.
865  */
866 void
867 io_subchannel_recog_done(struct ccw_device *cdev)
868 {
869         if (css_init_done == 0) {
870                 cdev->private->flags.recog_done = 1;
871                 return;
872         }
873         switch (cdev->private->state) {
874         case DEV_STATE_BOXED:
875                 /* Device did not respond in time. */
876         case DEV_STATE_NOT_OPER:
877                 cdev->private->flags.recog_done = 1;
878                 /* Remove device found not operational. */
879                 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
880                 if (atomic_dec_and_test(&ccw_device_init_count))
881                         wake_up(&ccw_device_init_wq);
882                 break;
883         case DEV_STATE_OFFLINE:
884                 /* 
885                  * We can't register the device in interrupt context so
886                  * we schedule a work item.
887                  */
888                 ccw_device_sched_todo(cdev, CDEV_TODO_REGISTER);
889                 break;
890         }
891 }
892
893 static void io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
894 {
895         struct ccw_device_private *priv;
896
897         cdev->ccwlock = sch->lock;
898
899         /* Init private data. */
900         priv = cdev->private;
901         priv->dev_id.devno = sch->schib.pmcw.dev;
902         priv->dev_id.ssid = sch->schid.ssid;
903         priv->schid = sch->schid;
904         priv->state = DEV_STATE_NOT_OPER;
905         INIT_LIST_HEAD(&priv->cmb_list);
906         init_waitqueue_head(&priv->wait_q);
907         init_timer(&priv->timer);
908
909         /* Increase counter of devices currently in recognition. */
910         atomic_inc(&ccw_device_init_count);
911
912         /* Start async. device sensing. */
913         spin_lock_irq(sch->lock);
914         sch_set_cdev(sch, cdev);
915         ccw_device_recognition(cdev);
916         spin_unlock_irq(sch->lock);
917 }
918
919 static int ccw_device_move_to_sch(struct ccw_device *cdev,
920                                   struct subchannel *sch)
921 {
922         struct subchannel *old_sch;
923         int rc, old_enabled = 0;
924
925         old_sch = to_subchannel(cdev->dev.parent);
926         /* Obtain child reference for new parent. */
927         if (!get_device(&sch->dev))
928                 return -ENODEV;
929
930         if (!sch_is_pseudo_sch(old_sch)) {
931                 spin_lock_irq(old_sch->lock);
932                 old_enabled = old_sch->schib.pmcw.ena;
933                 rc = 0;
934                 if (old_enabled)
935                         rc = cio_disable_subchannel(old_sch);
936                 spin_unlock_irq(old_sch->lock);
937                 if (rc == -EBUSY) {
938                         /* Release child reference for new parent. */
939                         put_device(&sch->dev);
940                         return rc;
941                 }
942         }
943
944         mutex_lock(&sch->reg_mutex);
945         rc = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV);
946         mutex_unlock(&sch->reg_mutex);
947         if (rc) {
948                 CIO_MSG_EVENT(0, "device_move(0.%x.%04x,0.%x.%04x)=%d\n",
949                               cdev->private->dev_id.ssid,
950                               cdev->private->dev_id.devno, sch->schid.ssid,
951                               sch->schib.pmcw.dev, rc);
952                 if (old_enabled) {
953                         /* Try to reenable the old subchannel. */
954                         spin_lock_irq(old_sch->lock);
955                         cio_enable_subchannel(old_sch, (u32)(addr_t)old_sch);
956                         spin_unlock_irq(old_sch->lock);
957                 }
958                 /* Release child reference for new parent. */
959                 put_device(&sch->dev);
960                 return rc;
961         }
962         /* Clean up old subchannel. */
963         if (!sch_is_pseudo_sch(old_sch)) {
964                 spin_lock_irq(old_sch->lock);
965                 sch_set_cdev(old_sch, NULL);
966                 spin_unlock_irq(old_sch->lock);
967                 css_schedule_eval(old_sch->schid);
968         }
969         /* Release child reference for old parent. */
970         put_device(&old_sch->dev);
971         /* Initialize new subchannel. */
972         spin_lock_irq(sch->lock);
973         cdev->private->schid = sch->schid;
974         cdev->ccwlock = sch->lock;
975         if (!sch_is_pseudo_sch(sch))
976                 sch_set_cdev(sch, cdev);
977         spin_unlock_irq(sch->lock);
978         if (!sch_is_pseudo_sch(sch))
979                 css_update_ssd_info(sch);
980         return 0;
981 }
982
983 static int ccw_device_move_to_orph(struct ccw_device *cdev)
984 {
985         struct subchannel *sch = to_subchannel(cdev->dev.parent);
986         struct channel_subsystem *css = to_css(sch->dev.parent);
987
988         return ccw_device_move_to_sch(cdev, css->pseudo_subchannel);
989 }
990
991 static void io_subchannel_irq(struct subchannel *sch)
992 {
993         struct ccw_device *cdev;
994
995         cdev = sch_get_cdev(sch);
996
997         CIO_TRACE_EVENT(6, "IRQ");
998         CIO_TRACE_EVENT(6, dev_name(&sch->dev));
999         if (cdev)
1000                 dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
1001 }
1002
1003 void io_subchannel_init_config(struct subchannel *sch)
1004 {
1005         memset(&sch->config, 0, sizeof(sch->config));
1006         sch->config.csense = 1;
1007 }
1008
1009 static void io_subchannel_init_fields(struct subchannel *sch)
1010 {
1011         if (cio_is_console(sch->schid))
1012                 sch->opm = 0xff;
1013         else
1014                 sch->opm = chp_get_sch_opm(sch);
1015         sch->lpm = sch->schib.pmcw.pam & sch->opm;
1016         sch->isc = cio_is_console(sch->schid) ? CONSOLE_ISC : IO_SCH_ISC;
1017
1018         CIO_MSG_EVENT(6, "Detected device %04x on subchannel 0.%x.%04X"
1019                       " - PIM = %02X, PAM = %02X, POM = %02X\n",
1020                       sch->schib.pmcw.dev, sch->schid.ssid,
1021                       sch->schid.sch_no, sch->schib.pmcw.pim,
1022                       sch->schib.pmcw.pam, sch->schib.pmcw.pom);
1023
1024         io_subchannel_init_config(sch);
1025 }
1026
1027 /*
1028  * Note: We always return 0 so that we bind to the device even on error.
1029  * This is needed so that our remove function is called on unregister.
1030  */
1031 static int io_subchannel_probe(struct subchannel *sch)
1032 {
1033         struct ccw_device *cdev;
1034         int rc;
1035
1036         if (cio_is_console(sch->schid)) {
1037                 rc = sysfs_create_group(&sch->dev.kobj,
1038                                         &io_subchannel_attr_group);
1039                 if (rc)
1040                         CIO_MSG_EVENT(0, "Failed to create io subchannel "
1041                                       "attributes for subchannel "
1042                                       "0.%x.%04x (rc=%d)\n",
1043                                       sch->schid.ssid, sch->schid.sch_no, rc);
1044                 /*
1045                  * The console subchannel already has an associated ccw_device.
1046                  * Throw the delayed uevent for the subchannel, register
1047                  * the ccw_device and exit.
1048                  */
1049                 dev_set_uevent_suppress(&sch->dev, 0);
1050                 kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
1051                 cdev = sch_get_cdev(sch);
1052                 cdev->dev.groups = ccwdev_attr_groups;
1053                 device_initialize(&cdev->dev);
1054                 cdev->private->flags.initialized = 1;
1055                 ccw_device_register(cdev);
1056                 /*
1057                  * Check if the device is already online. If it is
1058                  * the reference count needs to be corrected since we
1059                  * didn't obtain a reference in ccw_device_set_online.
1060                  */
1061                 if (cdev->private->state != DEV_STATE_NOT_OPER &&
1062                     cdev->private->state != DEV_STATE_OFFLINE &&
1063                     cdev->private->state != DEV_STATE_BOXED)
1064                         get_device(&cdev->dev);
1065                 return 0;
1066         }
1067         io_subchannel_init_fields(sch);
1068         rc = cio_commit_config(sch);
1069         if (rc)
1070                 goto out_schedule;
1071         rc = sysfs_create_group(&sch->dev.kobj,
1072                                 &io_subchannel_attr_group);
1073         if (rc)
1074                 goto out_schedule;
1075         /* Allocate I/O subchannel private data. */
1076         sch->private = kzalloc(sizeof(struct io_subchannel_private),
1077                                GFP_KERNEL | GFP_DMA);
1078         if (!sch->private)
1079                 goto out_schedule;
1080         css_schedule_eval(sch->schid);
1081         return 0;
1082
1083 out_schedule:
1084         spin_lock_irq(sch->lock);
1085         css_sched_sch_todo(sch, SCH_TODO_UNREG);
1086         spin_unlock_irq(sch->lock);
1087         return 0;
1088 }
1089
1090 static int
1091 io_subchannel_remove (struct subchannel *sch)
1092 {
1093         struct ccw_device *cdev;
1094
1095         cdev = sch_get_cdev(sch);
1096         if (!cdev)
1097                 goto out_free;
1098         io_subchannel_quiesce(sch);
1099         /* Set ccw device to not operational and drop reference. */
1100         spin_lock_irq(cdev->ccwlock);
1101         sch_set_cdev(sch, NULL);
1102         cdev->private->state = DEV_STATE_NOT_OPER;
1103         spin_unlock_irq(cdev->ccwlock);
1104         ccw_device_unregister(cdev);
1105 out_free:
1106         kfree(sch->private);
1107         sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
1108         return 0;
1109 }
1110
1111 static void io_subchannel_verify(struct subchannel *sch)
1112 {
1113         struct ccw_device *cdev;
1114
1115         cdev = sch_get_cdev(sch);
1116         if (cdev)
1117                 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1118 }
1119
1120 static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask)
1121 {
1122         struct ccw_device *cdev;
1123
1124         cdev = sch_get_cdev(sch);
1125         if (!cdev)
1126                 return;
1127         if (cio_update_schib(sch))
1128                 goto err;
1129         /* Check for I/O on path. */
1130         if (scsw_actl(&sch->schib.scsw) == 0 || sch->schib.pmcw.lpum != mask)
1131                 goto out;
1132         if (cdev->private->state == DEV_STATE_ONLINE) {
1133                 ccw_device_kill_io(cdev);
1134                 goto out;
1135         }
1136         if (cio_clear(sch))
1137                 goto err;
1138 out:
1139         /* Trigger path verification. */
1140         dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1141         return;
1142
1143 err:
1144         dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
1145 }
1146
1147 static int io_subchannel_chp_event(struct subchannel *sch,
1148                                    struct chp_link *link, int event)
1149 {
1150         struct ccw_device *cdev = sch_get_cdev(sch);
1151         int mask;
1152
1153         mask = chp_ssd_get_mask(&sch->ssd_info, link);
1154         if (!mask)
1155                 return 0;
1156         switch (event) {
1157         case CHP_VARY_OFF:
1158                 sch->opm &= ~mask;
1159                 sch->lpm &= ~mask;
1160                 if (cdev)
1161                         cdev->private->path_gone_mask |= mask;
1162                 io_subchannel_terminate_path(sch, mask);
1163                 break;
1164         case CHP_VARY_ON:
1165                 sch->opm |= mask;
1166                 sch->lpm |= mask;
1167                 if (cdev)
1168                         cdev->private->path_new_mask |= mask;
1169                 io_subchannel_verify(sch);
1170                 break;
1171         case CHP_OFFLINE:
1172                 if (cio_update_schib(sch))
1173                         return -ENODEV;
1174                 if (cdev)
1175                         cdev->private->path_gone_mask |= mask;
1176                 io_subchannel_terminate_path(sch, mask);
1177                 break;
1178         case CHP_ONLINE:
1179                 if (cio_update_schib(sch))
1180                         return -ENODEV;
1181                 sch->lpm |= mask & sch->opm;
1182                 if (cdev)
1183                         cdev->private->path_new_mask |= mask;
1184                 io_subchannel_verify(sch);
1185                 break;
1186         }
1187         return 0;
1188 }
1189
1190 static void io_subchannel_quiesce(struct subchannel *sch)
1191 {
1192         struct ccw_device *cdev;
1193         int ret;
1194
1195         spin_lock_irq(sch->lock);
1196         cdev = sch_get_cdev(sch);
1197         if (cio_is_console(sch->schid))
1198                 goto out_unlock;
1199         if (!sch->schib.pmcw.ena)
1200                 goto out_unlock;
1201         ret = cio_disable_subchannel(sch);
1202         if (ret != -EBUSY)
1203                 goto out_unlock;
1204         if (cdev->handler)
1205                 cdev->handler(cdev, cdev->private->intparm, ERR_PTR(-EIO));
1206         while (ret == -EBUSY) {
1207                 cdev->private->state = DEV_STATE_QUIESCE;
1208                 ret = ccw_device_cancel_halt_clear(cdev);
1209                 if (ret == -EBUSY) {
1210                         ccw_device_set_timeout(cdev, HZ/10);
1211                         spin_unlock_irq(sch->lock);
1212                         wait_event(cdev->private->wait_q,
1213                                    cdev->private->state != DEV_STATE_QUIESCE);
1214                         spin_lock_irq(sch->lock);
1215                 }
1216                 ret = cio_disable_subchannel(sch);
1217         }
1218 out_unlock:
1219         spin_unlock_irq(sch->lock);
1220 }
1221
1222 static void io_subchannel_shutdown(struct subchannel *sch)
1223 {
1224         io_subchannel_quiesce(sch);
1225 }
1226
1227 static int device_is_disconnected(struct ccw_device *cdev)
1228 {
1229         if (!cdev)
1230                 return 0;
1231         return (cdev->private->state == DEV_STATE_DISCONNECTED ||
1232                 cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID);
1233 }
1234
1235 static int recovery_check(struct device *dev, void *data)
1236 {
1237         struct ccw_device *cdev = to_ccwdev(dev);
1238         int *redo = data;
1239
1240         spin_lock_irq(cdev->ccwlock);
1241         switch (cdev->private->state) {
1242         case DEV_STATE_DISCONNECTED:
1243                 CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n",
1244                               cdev->private->dev_id.ssid,
1245                               cdev->private->dev_id.devno);
1246                 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1247                 *redo = 1;
1248                 break;
1249         case DEV_STATE_DISCONNECTED_SENSE_ID:
1250                 *redo = 1;
1251                 break;
1252         }
1253         spin_unlock_irq(cdev->ccwlock);
1254
1255         return 0;
1256 }
1257
1258 static void recovery_work_func(struct work_struct *unused)
1259 {
1260         int redo = 0;
1261
1262         bus_for_each_dev(&ccw_bus_type, NULL, &redo, recovery_check);
1263         if (redo) {
1264                 spin_lock_irq(&recovery_lock);
1265                 if (!timer_pending(&recovery_timer)) {
1266                         if (recovery_phase < ARRAY_SIZE(recovery_delay) - 1)
1267                                 recovery_phase++;
1268                         mod_timer(&recovery_timer, jiffies +
1269                                   recovery_delay[recovery_phase] * HZ);
1270                 }
1271                 spin_unlock_irq(&recovery_lock);
1272         } else
1273                 CIO_MSG_EVENT(4, "recovery: end\n");
1274 }
1275
1276 static DECLARE_WORK(recovery_work, recovery_work_func);
1277
1278 static void recovery_func(unsigned long data)
1279 {
1280         /*
1281          * We can't do our recovery in softirq context and it's not
1282          * performance critical, so we schedule it.
1283          */
1284         schedule_work(&recovery_work);
1285 }
1286
1287 static void ccw_device_schedule_recovery(void)
1288 {
1289         unsigned long flags;
1290
1291         CIO_MSG_EVENT(4, "recovery: schedule\n");
1292         spin_lock_irqsave(&recovery_lock, flags);
1293         if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) {
1294                 recovery_phase = 0;
1295                 mod_timer(&recovery_timer, jiffies + recovery_delay[0] * HZ);
1296         }
1297         spin_unlock_irqrestore(&recovery_lock, flags);
1298 }
1299
1300 static int purge_fn(struct device *dev, void *data)
1301 {
1302         struct ccw_device *cdev = to_ccwdev(dev);
1303         struct ccw_dev_id *id = &cdev->private->dev_id;
1304
1305         spin_lock_irq(cdev->ccwlock);
1306         if (is_blacklisted(id->ssid, id->devno) &&
1307             (cdev->private->state == DEV_STATE_OFFLINE)) {
1308                 CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", id->ssid,
1309                               id->devno);
1310                 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
1311         }
1312         spin_unlock_irq(cdev->ccwlock);
1313         /* Abort loop in case of pending signal. */
1314         if (signal_pending(current))
1315                 return -EINTR;
1316
1317         return 0;
1318 }
1319
1320 /**
1321  * ccw_purge_blacklisted - purge unused, blacklisted devices
1322  *
1323  * Unregister all ccw devices that are offline and on the blacklist.
1324  */
1325 int ccw_purge_blacklisted(void)
1326 {
1327         CIO_MSG_EVENT(2, "ccw: purging blacklisted devices\n");
1328         bus_for_each_dev(&ccw_bus_type, NULL, NULL, purge_fn);
1329         return 0;
1330 }
1331
1332 void ccw_device_set_disconnected(struct ccw_device *cdev)
1333 {
1334         if (!cdev)
1335                 return;
1336         ccw_device_set_timeout(cdev, 0);
1337         cdev->private->flags.fake_irb = 0;
1338         cdev->private->state = DEV_STATE_DISCONNECTED;
1339         if (cdev->online)
1340                 ccw_device_schedule_recovery();
1341 }
1342
1343 void ccw_device_set_notoper(struct ccw_device *cdev)
1344 {
1345         struct subchannel *sch = to_subchannel(cdev->dev.parent);
1346
1347         CIO_TRACE_EVENT(2, "notoper");
1348         CIO_TRACE_EVENT(2, dev_name(&sch->dev));
1349         ccw_device_set_timeout(cdev, 0);
1350         cio_disable_subchannel(sch);
1351         cdev->private->state = DEV_STATE_NOT_OPER;
1352 }
1353
1354 enum io_sch_action {
1355         IO_SCH_UNREG,
1356         IO_SCH_ORPH_UNREG,
1357         IO_SCH_ATTACH,
1358         IO_SCH_UNREG_ATTACH,
1359         IO_SCH_ORPH_ATTACH,
1360         IO_SCH_REPROBE,
1361         IO_SCH_VERIFY,
1362         IO_SCH_DISC,
1363         IO_SCH_NOP,
1364 };
1365
1366 static enum io_sch_action sch_get_action(struct subchannel *sch)
1367 {
1368         struct ccw_device *cdev;
1369
1370         cdev = sch_get_cdev(sch);
1371         if (cio_update_schib(sch)) {
1372                 /* Not operational. */
1373                 if (!cdev)
1374                         return IO_SCH_UNREG;
1375                 if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
1376                         return IO_SCH_UNREG;
1377                 return IO_SCH_ORPH_UNREG;
1378         }
1379         /* Operational. */
1380         if (!cdev)
1381                 return IO_SCH_ATTACH;
1382         if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
1383                 if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
1384                         return IO_SCH_UNREG_ATTACH;
1385                 return IO_SCH_ORPH_ATTACH;
1386         }
1387         if ((sch->schib.pmcw.pam & sch->opm) == 0) {
1388                 if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK)
1389                         return IO_SCH_UNREG;
1390                 return IO_SCH_DISC;
1391         }
1392         if (device_is_disconnected(cdev))
1393                 return IO_SCH_REPROBE;
1394         if (cdev->online)
1395                 return IO_SCH_VERIFY;
1396         return IO_SCH_NOP;
1397 }
1398
1399 /**
1400  * io_subchannel_sch_event - process subchannel event
1401  * @sch: subchannel
1402  * @process: non-zero if function is called in process context
1403  *
1404  * An unspecified event occurred for this subchannel. Adjust data according
1405  * to the current operational state of the subchannel and device. Return
1406  * zero when the event has been handled sufficiently or -EAGAIN when this
1407  * function should be called again in process context.
1408  */
1409 static int io_subchannel_sch_event(struct subchannel *sch, int process)
1410 {
1411         unsigned long flags;
1412         struct ccw_device *cdev;
1413         struct ccw_dev_id dev_id;
1414         enum io_sch_action action;
1415         int rc = -EAGAIN;
1416
1417         spin_lock_irqsave(sch->lock, flags);
1418         if (!device_is_registered(&sch->dev))
1419                 goto out_unlock;
1420         if (work_pending(&sch->todo_work))
1421                 goto out_unlock;
1422         cdev = sch_get_cdev(sch);
1423         if (cdev && work_pending(&cdev->private->todo_work))
1424                 goto out_unlock;
1425         action = sch_get_action(sch);
1426         CIO_MSG_EVENT(2, "event: sch 0.%x.%04x, process=%d, action=%d\n",
1427                       sch->schid.ssid, sch->schid.sch_no, process,
1428                       action);
1429         /* Perform immediate actions while holding the lock. */
1430         switch (action) {
1431         case IO_SCH_REPROBE:
1432                 /* Trigger device recognition. */
1433                 ccw_device_trigger_reprobe(cdev);
1434                 rc = 0;
1435                 goto out_unlock;
1436         case IO_SCH_VERIFY:
1437                 if (cdev->private->flags.resuming == 1) {
1438                         if (cio_enable_subchannel(sch, (u32)(addr_t)sch)) {
1439                                 ccw_device_set_notoper(cdev);
1440                                 break;
1441                         }
1442                 }
1443                 /* Trigger path verification. */
1444                 io_subchannel_verify(sch);
1445                 rc = 0;
1446                 goto out_unlock;
1447         case IO_SCH_DISC:
1448                 ccw_device_set_disconnected(cdev);
1449                 rc = 0;
1450                 goto out_unlock;
1451         case IO_SCH_ORPH_UNREG:
1452         case IO_SCH_ORPH_ATTACH:
1453                 ccw_device_set_disconnected(cdev);
1454                 break;
1455         case IO_SCH_UNREG_ATTACH:
1456         case IO_SCH_UNREG:
1457                 if (cdev)
1458                         ccw_device_set_notoper(cdev);
1459                 break;
1460         case IO_SCH_NOP:
1461                 rc = 0;
1462                 goto out_unlock;
1463         default:
1464                 break;
1465         }
1466         spin_unlock_irqrestore(sch->lock, flags);
1467         /* All other actions require process context. */
1468         if (!process)
1469                 goto out;
1470         /* Handle attached ccw device. */
1471         switch (action) {
1472         case IO_SCH_ORPH_UNREG:
1473         case IO_SCH_ORPH_ATTACH:
1474                 /* Move ccw device to orphanage. */
1475                 rc = ccw_device_move_to_orph(cdev);
1476                 if (rc)
1477                         goto out;
1478                 break;
1479         case IO_SCH_UNREG_ATTACH:
1480                 if (cdev->private->flags.resuming) {
1481                         /* Device will be handled later. */
1482                         rc = 0;
1483                         goto out;
1484                 }
1485                 /* Unregister ccw device. */
1486                 ccw_device_unregister(cdev);
1487                 break;
1488         default:
1489                 break;
1490         }
1491         /* Handle subchannel. */
1492         switch (action) {
1493         case IO_SCH_ORPH_UNREG:
1494         case IO_SCH_UNREG:
1495                 if (!cdev || !cdev->private->flags.resuming)
1496                         css_sch_device_unregister(sch);
1497                 break;
1498         case IO_SCH_ORPH_ATTACH:
1499         case IO_SCH_UNREG_ATTACH:
1500         case IO_SCH_ATTACH:
1501                 dev_id.ssid = sch->schid.ssid;
1502                 dev_id.devno = sch->schib.pmcw.dev;
1503                 cdev = get_ccwdev_by_dev_id(&dev_id);
1504                 if (!cdev) {
1505                         sch_create_and_recog_new_device(sch);
1506                         break;
1507                 }
1508                 rc = ccw_device_move_to_sch(cdev, sch);
1509                 if (rc) {
1510                         /* Release reference from get_ccwdev_by_dev_id() */
1511                         put_device(&cdev->dev);
1512                         goto out;
1513                 }
1514                 spin_lock_irqsave(sch->lock, flags);
1515                 ccw_device_trigger_reprobe(cdev);
1516                 spin_unlock_irqrestore(sch->lock, flags);
1517                 /* Release reference from get_ccwdev_by_dev_id() */
1518                 put_device(&cdev->dev);
1519                 break;
1520         default:
1521                 break;
1522         }
1523         return 0;
1524
1525 out_unlock:
1526         spin_unlock_irqrestore(sch->lock, flags);
1527 out:
1528         return rc;
1529 }
1530
1531 #ifdef CONFIG_CCW_CONSOLE
1532 static struct ccw_device console_cdev;
1533 static struct ccw_device_private console_private;
1534 static int console_cdev_in_use;
1535
1536 static DEFINE_SPINLOCK(ccw_console_lock);
1537
1538 spinlock_t * cio_get_console_lock(void)
1539 {
1540         return &ccw_console_lock;
1541 }
1542
1543 static int ccw_device_console_enable(struct ccw_device *cdev,
1544                                      struct subchannel *sch)
1545 {
1546         int rc;
1547
1548         /* Attach subchannel private data. */
1549         sch->private = cio_get_console_priv();
1550         memset(sch->private, 0, sizeof(struct io_subchannel_private));
1551         io_subchannel_init_fields(sch);
1552         rc = cio_commit_config(sch);
1553         if (rc)
1554                 return rc;
1555         sch->driver = &io_subchannel_driver;
1556         /* Initialize the ccw_device structure. */
1557         cdev->dev.parent= &sch->dev;
1558         sch_set_cdev(sch, cdev);
1559         io_subchannel_recog(cdev, sch);
1560         /* Now wait for the async. recognition to come to an end. */
1561         spin_lock_irq(cdev->ccwlock);
1562         while (!dev_fsm_final_state(cdev))
1563                 wait_cons_dev();
1564         rc = -EIO;
1565         if (cdev->private->state != DEV_STATE_OFFLINE)
1566                 goto out_unlock;
1567         ccw_device_online(cdev);
1568         while (!dev_fsm_final_state(cdev))
1569                 wait_cons_dev();
1570         if (cdev->private->state != DEV_STATE_ONLINE)
1571                 goto out_unlock;
1572         rc = 0;
1573 out_unlock:
1574         spin_unlock_irq(cdev->ccwlock);
1575         return rc;
1576 }
1577
1578 struct ccw_device *
1579 ccw_device_probe_console(void)
1580 {
1581         struct subchannel *sch;
1582         int ret;
1583
1584         if (xchg(&console_cdev_in_use, 1) != 0)
1585                 return ERR_PTR(-EBUSY);
1586         sch = cio_probe_console();
1587         if (IS_ERR(sch)) {
1588                 console_cdev_in_use = 0;
1589                 return (void *) sch;
1590         }
1591         memset(&console_cdev, 0, sizeof(struct ccw_device));
1592         memset(&console_private, 0, sizeof(struct ccw_device_private));
1593         console_cdev.private = &console_private;
1594         console_private.cdev = &console_cdev;
1595         ret = ccw_device_console_enable(&console_cdev, sch);
1596         if (ret) {
1597                 cio_release_console();
1598                 console_cdev_in_use = 0;
1599                 return ERR_PTR(ret);
1600         }
1601         console_cdev.online = 1;
1602         return &console_cdev;
1603 }
1604
1605 static int ccw_device_pm_restore(struct device *dev);
1606
1607 int ccw_device_force_console(void)
1608 {
1609         if (!console_cdev_in_use)
1610                 return -ENODEV;
1611         return ccw_device_pm_restore(&console_cdev.dev);
1612 }
1613 EXPORT_SYMBOL_GPL(ccw_device_force_console);
1614 #endif
1615
1616 /*
1617  * get ccw_device matching the busid, but only if owned by cdrv
1618  */
1619 static int
1620 __ccwdev_check_busid(struct device *dev, void *id)
1621 {
1622         char *bus_id;
1623
1624         bus_id = id;
1625
1626         return (strcmp(bus_id, dev_name(dev)) == 0);
1627 }
1628
1629
1630 /**
1631  * get_ccwdev_by_busid() - obtain device from a bus id
1632  * @cdrv: driver the device is owned by
1633  * @bus_id: bus id of the device to be searched
1634  *
1635  * This function searches all devices owned by @cdrv for a device with a bus
1636  * id matching @bus_id.
1637  * Returns:
1638  *  If a match is found, its reference count of the found device is increased
1639  *  and it is returned; else %NULL is returned.
1640  */
1641 struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv,
1642                                        const char *bus_id)
1643 {
1644         struct device *dev;
1645         struct device_driver *drv;
1646
1647         drv = get_driver(&cdrv->driver);
1648         if (!drv)
1649                 return NULL;
1650
1651         dev = driver_find_device(drv, NULL, (void *)bus_id,
1652                                  __ccwdev_check_busid);
1653         put_driver(drv);
1654
1655         return dev ? to_ccwdev(dev) : NULL;
1656 }
1657
1658 /************************** device driver handling ************************/
1659
1660 /* This is the implementation of the ccw_driver class. The probe, remove
1661  * and release methods are initially very similar to the device_driver
1662  * implementations, with the difference that they have ccw_device
1663  * arguments.
1664  *
1665  * A ccw driver also contains the information that is needed for
1666  * device matching.
1667  */
1668 static int
1669 ccw_device_probe (struct device *dev)
1670 {
1671         struct ccw_device *cdev = to_ccwdev(dev);
1672         struct ccw_driver *cdrv = to_ccwdrv(dev->driver);
1673         int ret;
1674
1675         cdev->drv = cdrv; /* to let the driver call _set_online */
1676
1677         ret = cdrv->probe ? cdrv->probe(cdev) : -ENODEV;
1678
1679         if (ret) {
1680                 cdev->drv = NULL;
1681                 return ret;
1682         }
1683
1684         return 0;
1685 }
1686
1687 static int
1688 ccw_device_remove (struct device *dev)
1689 {
1690         struct ccw_device *cdev = to_ccwdev(dev);
1691         struct ccw_driver *cdrv = cdev->drv;
1692         int ret;
1693
1694         if (cdrv->remove)
1695                 cdrv->remove(cdev);
1696         if (cdev->online) {
1697                 cdev->online = 0;
1698                 spin_lock_irq(cdev->ccwlock);
1699                 ret = ccw_device_offline(cdev);
1700                 spin_unlock_irq(cdev->ccwlock);
1701                 if (ret == 0)
1702                         wait_event(cdev->private->wait_q,
1703                                    dev_fsm_final_state(cdev));
1704                 else
1705                         CIO_MSG_EVENT(0, "ccw_device_offline returned %d, "
1706                                       "device 0.%x.%04x\n",
1707                                       ret, cdev->private->dev_id.ssid,
1708                                       cdev->private->dev_id.devno);
1709                 /* Give up reference obtained in ccw_device_set_online(). */
1710                 put_device(&cdev->dev);
1711         }
1712         ccw_device_set_timeout(cdev, 0);
1713         cdev->drv = NULL;
1714         return 0;
1715 }
1716
1717 static void ccw_device_shutdown(struct device *dev)
1718 {
1719         struct ccw_device *cdev;
1720
1721         cdev = to_ccwdev(dev);
1722         if (cdev->drv && cdev->drv->shutdown)
1723                 cdev->drv->shutdown(cdev);
1724         disable_cmf(cdev);
1725 }
1726
1727 static int ccw_device_pm_prepare(struct device *dev)
1728 {
1729         struct ccw_device *cdev = to_ccwdev(dev);
1730
1731         if (work_pending(&cdev->private->todo_work))
1732                 return -EAGAIN;
1733         /* Fail while device is being set online/offline. */
1734         if (atomic_read(&cdev->private->onoff))
1735                 return -EAGAIN;
1736
1737         if (cdev->online && cdev->drv && cdev->drv->prepare)
1738                 return cdev->drv->prepare(cdev);
1739
1740         return 0;
1741 }
1742
1743 static void ccw_device_pm_complete(struct device *dev)
1744 {
1745         struct ccw_device *cdev = to_ccwdev(dev);
1746
1747         if (cdev->online && cdev->drv && cdev->drv->complete)
1748                 cdev->drv->complete(cdev);
1749 }
1750
1751 static int ccw_device_pm_freeze(struct device *dev)
1752 {
1753         struct ccw_device *cdev = to_ccwdev(dev);
1754         struct subchannel *sch = to_subchannel(cdev->dev.parent);
1755         int ret, cm_enabled;
1756
1757         /* Fail suspend while device is in transistional state. */
1758         if (!dev_fsm_final_state(cdev))
1759                 return -EAGAIN;
1760         if (!cdev->online)
1761                 return 0;
1762         if (cdev->drv && cdev->drv->freeze) {
1763                 ret = cdev->drv->freeze(cdev);
1764                 if (ret)
1765                         return ret;
1766         }
1767
1768         spin_lock_irq(sch->lock);
1769         cm_enabled = cdev->private->cmb != NULL;
1770         spin_unlock_irq(sch->lock);
1771         if (cm_enabled) {
1772                 /* Don't have the css write on memory. */
1773                 ret = ccw_set_cmf(cdev, 0);
1774                 if (ret)
1775                         return ret;
1776         }
1777         /* From here on, disallow device driver I/O. */
1778         spin_lock_irq(sch->lock);
1779         ret = cio_disable_subchannel(sch);
1780         spin_unlock_irq(sch->lock);
1781
1782         return ret;
1783 }
1784
1785 static int ccw_device_pm_thaw(struct device *dev)
1786 {
1787         struct ccw_device *cdev = to_ccwdev(dev);
1788         struct subchannel *sch = to_subchannel(cdev->dev.parent);
1789         int ret, cm_enabled;
1790
1791         if (!cdev->online)
1792                 return 0;
1793
1794         spin_lock_irq(sch->lock);
1795         /* Allow device driver I/O again. */
1796         ret = cio_enable_subchannel(sch, (u32)(addr_t)sch);
1797         cm_enabled = cdev->private->cmb != NULL;
1798         spin_unlock_irq(sch->lock);
1799         if (ret)
1800                 return ret;
1801
1802         if (cm_enabled) {
1803                 ret = ccw_set_cmf(cdev, 1);
1804                 if (ret)
1805                         return ret;
1806         }
1807
1808         if (cdev->drv && cdev->drv->thaw)
1809                 ret = cdev->drv->thaw(cdev);
1810
1811         return ret;
1812 }
1813
1814 static void __ccw_device_pm_restore(struct ccw_device *cdev)
1815 {
1816         struct subchannel *sch = to_subchannel(cdev->dev.parent);
1817
1818         spin_lock_irq(sch->lock);
1819         if (cio_is_console(sch->schid)) {
1820                 cio_enable_subchannel(sch, (u32)(addr_t)sch);
1821                 goto out_unlock;
1822         }
1823         /*
1824          * While we were sleeping, devices may have gone or become
1825          * available again. Kick re-detection.
1826          */
1827         cdev->private->flags.resuming = 1;
1828         css_schedule_eval(sch->schid);
1829         spin_unlock_irq(sch->lock);
1830         css_complete_work();
1831
1832         /* cdev may have been moved to a different subchannel. */
1833         sch = to_subchannel(cdev->dev.parent);
1834         spin_lock_irq(sch->lock);
1835         if (cdev->private->state != DEV_STATE_ONLINE &&
1836             cdev->private->state != DEV_STATE_OFFLINE)
1837                 goto out_unlock;
1838
1839         ccw_device_recognition(cdev);
1840         spin_unlock_irq(sch->lock);
1841         wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev) ||
1842                    cdev->private->state == DEV_STATE_DISCONNECTED);
1843         spin_lock_irq(sch->lock);
1844
1845 out_unlock:
1846         cdev->private->flags.resuming = 0;
1847         spin_unlock_irq(sch->lock);
1848 }
1849
1850 static int resume_handle_boxed(struct ccw_device *cdev)
1851 {
1852         cdev->private->state = DEV_STATE_BOXED;
1853         if (ccw_device_notify(cdev, CIO_BOXED) == NOTIFY_OK)
1854                 return 0;
1855         ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
1856         return -ENODEV;
1857 }
1858
1859 static int resume_handle_disc(struct ccw_device *cdev)
1860 {
1861         cdev->private->state = DEV_STATE_DISCONNECTED;
1862         if (ccw_device_notify(cdev, CIO_GONE) == NOTIFY_OK)
1863                 return 0;
1864         ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
1865         return -ENODEV;
1866 }
1867
1868 static int ccw_device_pm_restore(struct device *dev)
1869 {
1870         struct ccw_device *cdev = to_ccwdev(dev);
1871         struct subchannel *sch;
1872         int ret = 0;
1873
1874         __ccw_device_pm_restore(cdev);
1875         sch = to_subchannel(cdev->dev.parent);
1876         spin_lock_irq(sch->lock);
1877         if (cio_is_console(sch->schid))
1878                 goto out_restore;
1879
1880         /* check recognition results */
1881         switch (cdev->private->state) {
1882         case DEV_STATE_OFFLINE:
1883         case DEV_STATE_ONLINE:
1884                 cdev->private->flags.donotify = 0;
1885                 break;
1886         case DEV_STATE_BOXED:
1887                 ret = resume_handle_boxed(cdev);
1888                 if (ret)
1889                         goto out_unlock;
1890                 goto out_restore;
1891         default:
1892                 ret = resume_handle_disc(cdev);
1893                 if (ret)
1894                         goto out_unlock;
1895                 goto out_restore;
1896         }
1897         /* check if the device type has changed */
1898         if (!ccw_device_test_sense_data(cdev)) {
1899                 ccw_device_update_sense_data(cdev);
1900                 ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
1901                 ret = -ENODEV;
1902                 goto out_unlock;
1903         }
1904         if (!cdev->online)
1905                 goto out_unlock;
1906
1907         if (ccw_device_online(cdev)) {
1908                 ret = resume_handle_disc(cdev);
1909                 if (ret)
1910                         goto out_unlock;
1911                 goto out_restore;
1912         }
1913         spin_unlock_irq(sch->lock);
1914         wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
1915         spin_lock_irq(sch->lock);
1916
1917         if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_BAD) {
1918                 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
1919                 ret = -ENODEV;
1920                 goto out_unlock;
1921         }
1922
1923         /* reenable cmf, if needed */
1924         if (cdev->private->cmb) {
1925                 spin_unlock_irq(sch->lock);
1926                 ret = ccw_set_cmf(cdev, 1);
1927                 spin_lock_irq(sch->lock);
1928                 if (ret) {
1929                         CIO_MSG_EVENT(2, "resume: cdev 0.%x.%04x: cmf failed "
1930                                       "(rc=%d)\n", cdev->private->dev_id.ssid,
1931                                       cdev->private->dev_id.devno, ret);
1932                         ret = 0;
1933                 }
1934         }
1935
1936 out_restore:
1937         spin_unlock_irq(sch->lock);
1938         if (cdev->online && cdev->drv && cdev->drv->restore)
1939                 ret = cdev->drv->restore(cdev);
1940         return ret;
1941
1942 out_unlock:
1943         spin_unlock_irq(sch->lock);
1944         return ret;
1945 }
1946
1947 static const struct dev_pm_ops ccw_pm_ops = {
1948         .prepare = ccw_device_pm_prepare,
1949         .complete = ccw_device_pm_complete,
1950         .freeze = ccw_device_pm_freeze,
1951         .thaw = ccw_device_pm_thaw,
1952         .restore = ccw_device_pm_restore,
1953 };
1954
1955 struct bus_type ccw_bus_type = {
1956         .name   = "ccw",
1957         .match  = ccw_bus_match,
1958         .uevent = ccw_uevent,
1959         .probe  = ccw_device_probe,
1960         .remove = ccw_device_remove,
1961         .shutdown = ccw_device_shutdown,
1962         .pm = &ccw_pm_ops,
1963 };
1964
1965 /**
1966  * ccw_driver_register() - register a ccw driver
1967  * @cdriver: driver to be registered
1968  *
1969  * This function is mainly a wrapper around driver_register().
1970  * Returns:
1971  *   %0 on success and a negative error value on failure.
1972  */
1973 int ccw_driver_register(struct ccw_driver *cdriver)
1974 {
1975         struct device_driver *drv = &cdriver->driver;
1976
1977         drv->bus = &ccw_bus_type;
1978         drv->name = cdriver->name;
1979         drv->owner = cdriver->owner;
1980
1981         return driver_register(drv);
1982 }
1983
1984 /**
1985  * ccw_driver_unregister() - deregister a ccw driver
1986  * @cdriver: driver to be deregistered
1987  *
1988  * This function is mainly a wrapper around driver_unregister().
1989  */
1990 void ccw_driver_unregister(struct ccw_driver *cdriver)
1991 {
1992         driver_unregister(&cdriver->driver);
1993 }
1994
1995 /* Helper func for qdio. */
1996 struct subchannel_id
1997 ccw_device_get_subchannel_id(struct ccw_device *cdev)
1998 {
1999         struct subchannel *sch;
2000
2001         sch = to_subchannel(cdev->dev.parent);
2002         return sch->schid;
2003 }
2004
2005 static void ccw_device_todo(struct work_struct *work)
2006 {
2007         struct ccw_device_private *priv;
2008         struct ccw_device *cdev;
2009         struct subchannel *sch;
2010         enum cdev_todo todo;
2011
2012         priv = container_of(work, struct ccw_device_private, todo_work);
2013         cdev = priv->cdev;
2014         sch = to_subchannel(cdev->dev.parent);
2015         /* Find out todo. */
2016         spin_lock_irq(cdev->ccwlock);
2017         todo = priv->todo;
2018         priv->todo = CDEV_TODO_NOTHING;
2019         CIO_MSG_EVENT(4, "cdev_todo: cdev=0.%x.%04x todo=%d\n",
2020                       priv->dev_id.ssid, priv->dev_id.devno, todo);
2021         spin_unlock_irq(cdev->ccwlock);
2022         /* Perform todo. */
2023         switch (todo) {
2024         case CDEV_TODO_ENABLE_CMF:
2025                 cmf_reenable(cdev);
2026                 break;
2027         case CDEV_TODO_REBIND:
2028                 ccw_device_do_unbind_bind(cdev);
2029                 break;
2030         case CDEV_TODO_REGISTER:
2031                 io_subchannel_register(cdev);
2032                 break;
2033         case CDEV_TODO_UNREG_EVAL:
2034                 if (!sch_is_pseudo_sch(sch))
2035                         css_schedule_eval(sch->schid);
2036                 /* fall-through */
2037         case CDEV_TODO_UNREG:
2038                 if (sch_is_pseudo_sch(sch))
2039                         ccw_device_unregister(cdev);
2040                 else
2041                         ccw_device_call_sch_unregister(cdev);
2042                 break;
2043         default:
2044                 break;
2045         }
2046         /* Release workqueue ref. */
2047         put_device(&cdev->dev);
2048 }
2049
2050 /**
2051  * ccw_device_sched_todo - schedule ccw device operation
2052  * @cdev: ccw device
2053  * @todo: todo
2054  *
2055  * Schedule the operation identified by @todo to be performed on the slow path
2056  * workqueue. Do nothing if another operation with higher priority is already
2057  * scheduled. Needs to be called with ccwdev lock held.
2058  */
2059 void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo)
2060 {
2061         CIO_MSG_EVENT(4, "cdev_todo: sched cdev=0.%x.%04x todo=%d\n",
2062                       cdev->private->dev_id.ssid, cdev->private->dev_id.devno,
2063                       todo);
2064         if (cdev->private->todo >= todo)
2065                 return;
2066         cdev->private->todo = todo;
2067         /* Get workqueue ref. */
2068         if (!get_device(&cdev->dev))
2069                 return;
2070         if (!queue_work(cio_work_q, &cdev->private->todo_work)) {
2071                 /* Already queued, release workqueue ref. */
2072                 put_device(&cdev->dev);
2073         }
2074 }
2075
2076 /**
2077  * ccw_device_siosl() - initiate logging
2078  * @cdev: ccw device
2079  *
2080  * This function is used to invoke model-dependent logging within the channel
2081  * subsystem.
2082  */
2083 int ccw_device_siosl(struct ccw_device *cdev)
2084 {
2085         struct subchannel *sch = to_subchannel(cdev->dev.parent);
2086
2087         return chsc_siosl(sch->schid);
2088 }
2089 EXPORT_SYMBOL_GPL(ccw_device_siosl);
2090
2091 MODULE_LICENSE("GPL");
2092 EXPORT_SYMBOL(ccw_device_set_online);
2093 EXPORT_SYMBOL(ccw_device_set_offline);
2094 EXPORT_SYMBOL(ccw_driver_register);
2095 EXPORT_SYMBOL(ccw_driver_unregister);
2096 EXPORT_SYMBOL(get_ccwdev_by_busid);
2097 EXPORT_SYMBOL(ccw_bus_type);
2098 EXPORT_SYMBOL_GPL(ccw_device_get_subchannel_id);