1 // SPDX-License-Identifier: GPL-1.0+
2 /*
3 * Copyright IBM Corp. 2002, 2009
4 *
5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
6 * Cornelia Huck (cornelia.huck@de.ibm.com)
7 */
8 #include <linux/export.h>
9 #include <linux/init.h>
10 #include <linux/errno.h>
11 #include <linux/slab.h>
12 #include <linux/list.h>
13 #include <linux/device.h>
14 #include <linux/delay.h>
15 #include <linux/completion.h>
16
17 #include <asm/ccwdev.h>
18 #include <asm/idals.h>
19 #include <asm/chpid.h>
20 #include <asm/fcx.h>
21
22 #include "cio.h"
23 #include "cio_debug.h"
24 #include "css.h"
25 #include "chsc.h"
26 #include "device.h"
27 #include "chp.h"
28
29 /**
30 * ccw_device_set_options_mask() - set some options and unset the rest
31 * @cdev: device for which the options are to be set
32 * @flags: options to be set
33 *
34 * All flags specified in @flags are set, all flags not specified in @flags
35 * are cleared.
36 * Returns:
37 * %0 on success, -%EINVAL on an invalid flag combination.
38 */
ccw_device_set_options_mask(struct ccw_device * cdev,unsigned long flags)39 int ccw_device_set_options_mask(struct ccw_device *cdev, unsigned long flags)
40 {
41 /*
42 * The flag usage is mutal exclusive ...
43 */
44 if ((flags & CCWDEV_EARLY_NOTIFICATION) &&
45 (flags & CCWDEV_REPORT_ALL))
46 return -EINVAL;
47 cdev->private->options.fast = (flags & CCWDEV_EARLY_NOTIFICATION) != 0;
48 cdev->private->options.repall = (flags & CCWDEV_REPORT_ALL) != 0;
49 cdev->private->options.pgroup = (flags & CCWDEV_DO_PATHGROUP) != 0;
50 cdev->private->options.force = (flags & CCWDEV_ALLOW_FORCE) != 0;
51 cdev->private->options.mpath = (flags & CCWDEV_DO_MULTIPATH) != 0;
52 return 0;
53 }
54
55 /**
56 * ccw_device_set_options() - set some options
57 * @cdev: device for which the options are to be set
58 * @flags: options to be set
59 *
60 * All flags specified in @flags are set, the remainder is left untouched.
61 * Returns:
62 * %0 on success, -%EINVAL if an invalid flag combination would ensue.
63 */
ccw_device_set_options(struct ccw_device * cdev,unsigned long flags)64 int ccw_device_set_options(struct ccw_device *cdev, unsigned long flags)
65 {
66 /*
67 * The flag usage is mutal exclusive ...
68 */
69 if (((flags & CCWDEV_EARLY_NOTIFICATION) &&
70 (flags & CCWDEV_REPORT_ALL)) ||
71 ((flags & CCWDEV_EARLY_NOTIFICATION) &&
72 cdev->private->options.repall) ||
73 ((flags & CCWDEV_REPORT_ALL) &&
74 cdev->private->options.fast))
75 return -EINVAL;
76 cdev->private->options.fast |= (flags & CCWDEV_EARLY_NOTIFICATION) != 0;
77 cdev->private->options.repall |= (flags & CCWDEV_REPORT_ALL) != 0;
78 cdev->private->options.pgroup |= (flags & CCWDEV_DO_PATHGROUP) != 0;
79 cdev->private->options.force |= (flags & CCWDEV_ALLOW_FORCE) != 0;
80 cdev->private->options.mpath |= (flags & CCWDEV_DO_MULTIPATH) != 0;
81 return 0;
82 }
83
84 /**
85 * ccw_device_clear_options() - clear some options
86 * @cdev: device for which the options are to be cleared
87 * @flags: options to be cleared
88 *
89 * All flags specified in @flags are cleared, the remainder is left untouched.
90 */
ccw_device_clear_options(struct ccw_device * cdev,unsigned long flags)91 void ccw_device_clear_options(struct ccw_device *cdev, unsigned long flags)
92 {
93 cdev->private->options.fast &= (flags & CCWDEV_EARLY_NOTIFICATION) == 0;
94 cdev->private->options.repall &= (flags & CCWDEV_REPORT_ALL) == 0;
95 cdev->private->options.pgroup &= (flags & CCWDEV_DO_PATHGROUP) == 0;
96 cdev->private->options.force &= (flags & CCWDEV_ALLOW_FORCE) == 0;
97 cdev->private->options.mpath &= (flags & CCWDEV_DO_MULTIPATH) == 0;
98 }
99
100 /**
101 * ccw_device_is_pathgroup() - determine if paths to this device are grouped
102 * @cdev: ccw device
103 *
104 * Return non-zero if there is a path group, zero otherwise.
105 */
ccw_device_is_pathgroup(struct ccw_device * cdev)106 int ccw_device_is_pathgroup(struct ccw_device *cdev)
107 {
108 return cdev->private->flags.pgroup;
109 }
110 EXPORT_SYMBOL(ccw_device_is_pathgroup);
111
112 /**
113 * ccw_device_is_multipath() - determine if device is operating in multipath mode
114 * @cdev: ccw device
115 *
116 * Return non-zero if device is operating in multipath mode, zero otherwise.
117 */
ccw_device_is_multipath(struct ccw_device * cdev)118 int ccw_device_is_multipath(struct ccw_device *cdev)
119 {
120 return cdev->private->flags.mpath;
121 }
122 EXPORT_SYMBOL(ccw_device_is_multipath);
123
124 /**
125 * ccw_device_clear() - terminate I/O request processing
126 * @cdev: target ccw device
127 * @intparm: interruption parameter; value is only used if no I/O is
128 * outstanding, otherwise the intparm associated with the I/O request
129 * is returned
130 *
131 * ccw_device_clear() calls csch on @cdev's subchannel.
132 * Returns:
133 * %0 on success,
134 * -%ENODEV on device not operational,
135 * -%EINVAL on invalid device state.
136 * Context:
137 * Interrupts disabled, ccw device lock held
138 */
ccw_device_clear(struct ccw_device * cdev,unsigned long intparm)139 int ccw_device_clear(struct ccw_device *cdev, unsigned long intparm)
140 {
141 struct subchannel *sch;
142 int ret;
143
144 if (!cdev || !cdev->dev.parent)
145 return -ENODEV;
146 sch = to_subchannel(cdev->dev.parent);
147 if (!sch->schib.pmcw.ena)
148 return -EINVAL;
149 if (cdev->private->state == DEV_STATE_NOT_OPER)
150 return -ENODEV;
151 if (cdev->private->state != DEV_STATE_ONLINE &&
152 cdev->private->state != DEV_STATE_W4SENSE)
153 return -EINVAL;
154
155 ret = cio_clear(sch);
156 if (ret == 0)
157 cdev->private->intparm = intparm;
158 return ret;
159 }
160
161 /**
162 * ccw_device_start_timeout_key() - start a s390 channel program with timeout and key
163 * @cdev: target ccw device
164 * @cpa: logical start address of channel program
165 * @intparm: user specific interruption parameter; will be presented back to
166 * @cdev's interrupt handler. Allows a device driver to associate
167 * the interrupt with a particular I/O request.
168 * @lpm: defines the channel path to be used for a specific I/O request. A
169 * value of 0 will make cio use the opm.
170 * @key: storage key to be used for the I/O
171 * @flags: additional flags; defines the action to be performed for I/O
172 * processing.
173 * @expires: timeout value in jiffies
174 *
175 * Start a S/390 channel program. When the interrupt arrives, the
176 * IRQ handler is called, either immediately, delayed (dev-end missing,
177 * or sense required) or never (no IRQ handler registered).
178 * This function notifies the device driver if the channel program has not
179 * completed during the time specified by @expires. If a timeout occurs, the
180 * channel program is terminated via xsch, hsch or csch, and the device's
181 * interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT).
182 * Returns:
183 * %0, if the operation was successful;
184 * -%EBUSY, if the device is busy, or status pending;
185 * -%EACCES, if no path specified in @lpm is operational;
186 * -%ENODEV, if the device is not operational.
187 * Context:
188 * Interrupts disabled, ccw device lock held
189 */
ccw_device_start_timeout_key(struct ccw_device * cdev,struct ccw1 * cpa,unsigned long intparm,__u8 lpm,__u8 key,unsigned long flags,int expires)190 int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa,
191 unsigned long intparm, __u8 lpm, __u8 key,
192 unsigned long flags, int expires)
193 {
194 struct subchannel *sch;
195 int ret;
196
197 if (!cdev || !cdev->dev.parent)
198 return -ENODEV;
199 sch = to_subchannel(cdev->dev.parent);
200 if (!sch->schib.pmcw.ena)
201 return -EINVAL;
202 if (cdev->private->state == DEV_STATE_NOT_OPER)
203 return -ENODEV;
204 if (cdev->private->state == DEV_STATE_VERIFY) {
205 /* Remember to fake irb when finished. */
206 if (!cdev->private->flags.fake_irb) {
207 cdev->private->flags.fake_irb = FAKE_CMD_IRB;
208 cdev->private->intparm = intparm;
209 return 0;
210 } else
211 /* There's already a fake I/O around. */
212 return -EBUSY;
213 }
214 if (cdev->private->state != DEV_STATE_ONLINE ||
215 ((sch->schib.scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) &&
216 !(sch->schib.scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS)) ||
217 cdev->private->flags.doverify)
218 return -EBUSY;
219 ret = cio_set_options (sch, flags);
220 if (ret)
221 return ret;
222 /* Adjust requested path mask to exclude unusable paths. */
223 if (lpm) {
224 lpm &= sch->lpm;
225 if (lpm == 0)
226 return -EACCES;
227 }
228 ret = cio_start_key (sch, cpa, lpm, key);
229 switch (ret) {
230 case 0:
231 cdev->private->intparm = intparm;
232 if (expires)
233 ccw_device_set_timeout(cdev, expires);
234 break;
235 case -EACCES:
236 case -ENODEV:
237 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
238 break;
239 }
240 return ret;
241 }
242
243 /**
244 * ccw_device_start_key() - start a s390 channel program with key
245 * @cdev: target ccw device
246 * @cpa: logical start address of channel program
247 * @intparm: user specific interruption parameter; will be presented back to
248 * @cdev's interrupt handler. Allows a device driver to associate
249 * the interrupt with a particular I/O request.
250 * @lpm: defines the channel path to be used for a specific I/O request. A
251 * value of 0 will make cio use the opm.
252 * @key: storage key to be used for the I/O
253 * @flags: additional flags; defines the action to be performed for I/O
254 * processing.
255 *
256 * Start a S/390 channel program. When the interrupt arrives, the
257 * IRQ handler is called, either immediately, delayed (dev-end missing,
258 * or sense required) or never (no IRQ handler registered).
259 * Returns:
260 * %0, if the operation was successful;
261 * -%EBUSY, if the device is busy, or status pending;
262 * -%EACCES, if no path specified in @lpm is operational;
263 * -%ENODEV, if the device is not operational.
264 * Context:
265 * Interrupts disabled, ccw device lock held
266 */
ccw_device_start_key(struct ccw_device * cdev,struct ccw1 * cpa,unsigned long intparm,__u8 lpm,__u8 key,unsigned long flags)267 int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
268 unsigned long intparm, __u8 lpm, __u8 key,
269 unsigned long flags)
270 {
271 return ccw_device_start_timeout_key(cdev, cpa, intparm, lpm, key,
272 flags, 0);
273 }
274
275 /**
276 * ccw_device_start() - start a s390 channel program
277 * @cdev: target ccw device
278 * @cpa: logical start address of channel program
279 * @intparm: user specific interruption parameter; will be presented back to
280 * @cdev's interrupt handler. Allows a device driver to associate
281 * the interrupt with a particular I/O request.
282 * @lpm: defines the channel path to be used for a specific I/O request. A
283 * value of 0 will make cio use the opm.
284 * @flags: additional flags; defines the action to be performed for I/O
285 * processing.
286 *
287 * Start a S/390 channel program. When the interrupt arrives, the
288 * IRQ handler is called, either immediately, delayed (dev-end missing,
289 * or sense required) or never (no IRQ handler registered).
290 * Returns:
291 * %0, if the operation was successful;
292 * -%EBUSY, if the device is busy, or status pending;
293 * -%EACCES, if no path specified in @lpm is operational;
294 * -%ENODEV, if the device is not operational.
295 * Context:
296 * Interrupts disabled, ccw device lock held
297 */
ccw_device_start(struct ccw_device * cdev,struct ccw1 * cpa,unsigned long intparm,__u8 lpm,unsigned long flags)298 int ccw_device_start(struct ccw_device *cdev, struct ccw1 *cpa,
299 unsigned long intparm, __u8 lpm, unsigned long flags)
300 {
301 return ccw_device_start_key(cdev, cpa, intparm, lpm,
302 PAGE_DEFAULT_KEY, flags);
303 }
304
305 /**
306 * ccw_device_start_timeout() - start a s390 channel program with timeout
307 * @cdev: target ccw device
308 * @cpa: logical start address of channel program
309 * @intparm: user specific interruption parameter; will be presented back to
310 * @cdev's interrupt handler. Allows a device driver to associate
311 * the interrupt with a particular I/O request.
312 * @lpm: defines the channel path to be used for a specific I/O request. A
313 * value of 0 will make cio use the opm.
314 * @flags: additional flags; defines the action to be performed for I/O
315 * processing.
316 * @expires: timeout value in jiffies
317 *
318 * Start a S/390 channel program. When the interrupt arrives, the
319 * IRQ handler is called, either immediately, delayed (dev-end missing,
320 * or sense required) or never (no IRQ handler registered).
321 * This function notifies the device driver if the channel program has not
322 * completed during the time specified by @expires. If a timeout occurs, the
323 * channel program is terminated via xsch, hsch or csch, and the device's
324 * interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT).
325 * Returns:
326 * %0, if the operation was successful;
327 * -%EBUSY, if the device is busy, or status pending;
328 * -%EACCES, if no path specified in @lpm is operational;
329 * -%ENODEV, if the device is not operational.
330 * Context:
331 * Interrupts disabled, ccw device lock held
332 */
ccw_device_start_timeout(struct ccw_device * cdev,struct ccw1 * cpa,unsigned long intparm,__u8 lpm,unsigned long flags,int expires)333 int ccw_device_start_timeout(struct ccw_device *cdev, struct ccw1 *cpa,
334 unsigned long intparm, __u8 lpm,
335 unsigned long flags, int expires)
336 {
337 return ccw_device_start_timeout_key(cdev, cpa, intparm, lpm,
338 PAGE_DEFAULT_KEY, flags,
339 expires);
340 }
341
342
343 /**
344 * ccw_device_halt() - halt I/O request processing
345 * @cdev: target ccw device
346 * @intparm: interruption parameter; value is only used if no I/O is
347 * outstanding, otherwise the intparm associated with the I/O request
348 * is returned
349 *
350 * ccw_device_halt() calls hsch on @cdev's subchannel.
351 * Returns:
352 * %0 on success,
353 * -%ENODEV on device not operational,
354 * -%EINVAL on invalid device state,
355 * -%EBUSY on device busy or interrupt pending.
356 * Context:
357 * Interrupts disabled, ccw device lock held
358 */
ccw_device_halt(struct ccw_device * cdev,unsigned long intparm)359 int ccw_device_halt(struct ccw_device *cdev, unsigned long intparm)
360 {
361 struct subchannel *sch;
362 int ret;
363
364 if (!cdev || !cdev->dev.parent)
365 return -ENODEV;
366 sch = to_subchannel(cdev->dev.parent);
367 if (!sch->schib.pmcw.ena)
368 return -EINVAL;
369 if (cdev->private->state == DEV_STATE_NOT_OPER)
370 return -ENODEV;
371 if (cdev->private->state != DEV_STATE_ONLINE &&
372 cdev->private->state != DEV_STATE_W4SENSE)
373 return -EINVAL;
374
375 ret = cio_halt(sch);
376 if (ret == 0)
377 cdev->private->intparm = intparm;
378 return ret;
379 }
380
381 /**
382 * ccw_device_resume() - resume channel program execution
383 * @cdev: target ccw device
384 *
385 * ccw_device_resume() calls rsch on @cdev's subchannel.
386 * Returns:
387 * %0 on success,
388 * -%ENODEV on device not operational,
389 * -%EINVAL on invalid device state,
390 * -%EBUSY on device busy or interrupt pending.
391 * Context:
392 * Interrupts disabled, ccw device lock held
393 */
ccw_device_resume(struct ccw_device * cdev)394 int ccw_device_resume(struct ccw_device *cdev)
395 {
396 struct subchannel *sch;
397
398 if (!cdev || !cdev->dev.parent)
399 return -ENODEV;
400 sch = to_subchannel(cdev->dev.parent);
401 if (!sch->schib.pmcw.ena)
402 return -EINVAL;
403 if (cdev->private->state == DEV_STATE_NOT_OPER)
404 return -ENODEV;
405 if (cdev->private->state != DEV_STATE_ONLINE ||
406 !(sch->schib.scsw.cmd.actl & SCSW_ACTL_SUSPENDED))
407 return -EINVAL;
408 return cio_resume(sch);
409 }
410
411 /**
412 * ccw_device_get_ciw() - Search for CIW command in extended sense data.
413 * @cdev: ccw device to inspect
414 * @ct: command type to look for
415 *
416 * During SenseID, command information words (CIWs) describing special
417 * commands available to the device may have been stored in the extended
418 * sense data. This function searches for CIWs of a specified command
419 * type in the extended sense data.
420 * Returns:
421 * %NULL if no extended sense data has been stored or if no CIW of the
422 * specified command type could be found,
423 * else a pointer to the CIW of the specified command type.
424 */
ccw_device_get_ciw(struct ccw_device * cdev,__u32 ct)425 struct ciw *ccw_device_get_ciw(struct ccw_device *cdev, __u32 ct)
426 {
427 int ciw_cnt;
428
429 if (cdev->private->flags.esid == 0)
430 return NULL;
431 for (ciw_cnt = 0; ciw_cnt < MAX_CIWS; ciw_cnt++)
432 if (cdev->private->senseid.ciw[ciw_cnt].ct == ct)
433 return cdev->private->senseid.ciw + ciw_cnt;
434 return NULL;
435 }
436
437 /**
438 * ccw_device_get_path_mask() - get currently available paths
439 * @cdev: ccw device to be queried
440 * Returns:
441 * %0 if no subchannel for the device is available,
442 * else the mask of currently available paths for the ccw device's subchannel.
443 */
ccw_device_get_path_mask(struct ccw_device * cdev)444 __u8 ccw_device_get_path_mask(struct ccw_device *cdev)
445 {
446 struct subchannel *sch;
447
448 if (!cdev->dev.parent)
449 return 0;
450
451 sch = to_subchannel(cdev->dev.parent);
452 return sch->lpm;
453 }
454
455 /**
456 * ccw_device_get_chp_desc() - return newly allocated channel-path descriptor
457 * @cdev: device to obtain the descriptor for
458 * @chp_idx: index of the channel path
459 *
460 * On success return a newly allocated copy of the channel-path description
461 * data associated with the given channel path. Return %NULL on error.
462 */
ccw_device_get_chp_desc(struct ccw_device * cdev,int chp_idx)463 struct channel_path_desc_fmt0 *ccw_device_get_chp_desc(struct ccw_device *cdev,
464 int chp_idx)
465 {
466 struct subchannel *sch;
467 struct chp_id chpid;
468
469 sch = to_subchannel(cdev->dev.parent);
470 chp_id_init(&chpid);
471 chpid.id = sch->schib.pmcw.chpid[chp_idx];
472 return chp_get_chp_desc(chpid);
473 }
474
475 /**
476 * ccw_device_get_util_str() - return newly allocated utility strings
477 * @cdev: device to obtain the utility strings for
478 * @chp_idx: index of the channel path
479 *
480 * On success return a newly allocated copy of the utility strings
481 * associated with the given channel path. Return %NULL on error.
482 */
ccw_device_get_util_str(struct ccw_device * cdev,int chp_idx)483 u8 *ccw_device_get_util_str(struct ccw_device *cdev, int chp_idx)
484 {
485 struct subchannel *sch = to_subchannel(cdev->dev.parent);
486 struct channel_path *chp;
487 struct chp_id chpid;
488 u8 *util_str;
489
490 chp_id_init(&chpid);
491 chpid.id = sch->schib.pmcw.chpid[chp_idx];
492 chp = chpid_to_chp(chpid);
493
494 util_str = kmalloc(sizeof(chp->desc_fmt3.util_str), GFP_KERNEL);
495 if (!util_str)
496 return NULL;
497
498 mutex_lock(&chp->lock);
499 memcpy(util_str, chp->desc_fmt3.util_str, sizeof(chp->desc_fmt3.util_str));
500 mutex_unlock(&chp->lock);
501
502 return util_str;
503 }
504
505 /**
506 * ccw_device_get_id() - obtain a ccw device id
507 * @cdev: device to obtain the id for
508 * @dev_id: where to fill in the values
509 */
ccw_device_get_id(struct ccw_device * cdev,struct ccw_dev_id * dev_id)510 void ccw_device_get_id(struct ccw_device *cdev, struct ccw_dev_id *dev_id)
511 {
512 *dev_id = cdev->private->dev_id;
513 }
514 EXPORT_SYMBOL(ccw_device_get_id);
515
516 /**
517 * ccw_device_tm_start_timeout_key() - perform start function
518 * @cdev: ccw device on which to perform the start function
519 * @tcw: transport-command word to be started
520 * @intparm: user defined parameter to be passed to the interrupt handler
521 * @lpm: mask of paths to use
522 * @key: storage key to use for storage access
523 * @expires: time span in jiffies after which to abort request
524 *
525 * Start the tcw on the given ccw device. Return zero on success, non-zero
526 * otherwise.
527 */
ccw_device_tm_start_timeout_key(struct ccw_device * cdev,struct tcw * tcw,unsigned long intparm,u8 lpm,u8 key,int expires)528 int ccw_device_tm_start_timeout_key(struct ccw_device *cdev, struct tcw *tcw,
529 unsigned long intparm, u8 lpm, u8 key,
530 int expires)
531 {
532 struct subchannel *sch;
533 int rc;
534
535 sch = to_subchannel(cdev->dev.parent);
536 if (!sch->schib.pmcw.ena)
537 return -EINVAL;
538 if (cdev->private->state == DEV_STATE_VERIFY) {
539 /* Remember to fake irb when finished. */
540 if (!cdev->private->flags.fake_irb) {
541 cdev->private->flags.fake_irb = FAKE_TM_IRB;
542 cdev->private->intparm = intparm;
543 return 0;
544 } else
545 /* There's already a fake I/O around. */
546 return -EBUSY;
547 }
548 if (cdev->private->state != DEV_STATE_ONLINE)
549 return -EIO;
550 /* Adjust requested path mask to exclude unusable paths. */
551 if (lpm) {
552 lpm &= sch->lpm;
553 if (lpm == 0)
554 return -EACCES;
555 }
556 rc = cio_tm_start_key(sch, tcw, lpm, key);
557 if (rc == 0) {
558 cdev->private->intparm = intparm;
559 if (expires)
560 ccw_device_set_timeout(cdev, expires);
561 }
562 return rc;
563 }
564 EXPORT_SYMBOL(ccw_device_tm_start_timeout_key);
565
566 /**
567 * ccw_device_tm_start_key() - perform start function
568 * @cdev: ccw device on which to perform the start function
569 * @tcw: transport-command word to be started
570 * @intparm: user defined parameter to be passed to the interrupt handler
571 * @lpm: mask of paths to use
572 * @key: storage key to use for storage access
573 *
574 * Start the tcw on the given ccw device. Return zero on success, non-zero
575 * otherwise.
576 */
ccw_device_tm_start_key(struct ccw_device * cdev,struct tcw * tcw,unsigned long intparm,u8 lpm,u8 key)577 int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw,
578 unsigned long intparm, u8 lpm, u8 key)
579 {
580 return ccw_device_tm_start_timeout_key(cdev, tcw, intparm, lpm, key, 0);
581 }
582 EXPORT_SYMBOL(ccw_device_tm_start_key);
583
584 /**
585 * ccw_device_tm_start() - perform start function
586 * @cdev: ccw device on which to perform the start function
587 * @tcw: transport-command word to be started
588 * @intparm: user defined parameter to be passed to the interrupt handler
589 * @lpm: mask of paths to use
590 *
591 * Start the tcw on the given ccw device. Return zero on success, non-zero
592 * otherwise.
593 */
ccw_device_tm_start(struct ccw_device * cdev,struct tcw * tcw,unsigned long intparm,u8 lpm)594 int ccw_device_tm_start(struct ccw_device *cdev, struct tcw *tcw,
595 unsigned long intparm, u8 lpm)
596 {
597 return ccw_device_tm_start_key(cdev, tcw, intparm, lpm,
598 PAGE_DEFAULT_KEY);
599 }
600 EXPORT_SYMBOL(ccw_device_tm_start);
601
602 /**
603 * ccw_device_tm_start_timeout() - perform start function
604 * @cdev: ccw device on which to perform the start function
605 * @tcw: transport-command word to be started
606 * @intparm: user defined parameter to be passed to the interrupt handler
607 * @lpm: mask of paths to use
608 * @expires: time span in jiffies after which to abort request
609 *
610 * Start the tcw on the given ccw device. Return zero on success, non-zero
611 * otherwise.
612 */
ccw_device_tm_start_timeout(struct ccw_device * cdev,struct tcw * tcw,unsigned long intparm,u8 lpm,int expires)613 int ccw_device_tm_start_timeout(struct ccw_device *cdev, struct tcw *tcw,
614 unsigned long intparm, u8 lpm, int expires)
615 {
616 return ccw_device_tm_start_timeout_key(cdev, tcw, intparm, lpm,
617 PAGE_DEFAULT_KEY, expires);
618 }
619 EXPORT_SYMBOL(ccw_device_tm_start_timeout);
620
621 /**
622 * ccw_device_get_mdc() - accumulate max data count
623 * @cdev: ccw device for which the max data count is accumulated
624 * @mask: mask of paths to use
625 *
626 * Return the number of 64K-bytes blocks all paths at least support
627 * for a transport command. Return value 0 indicates failure.
628 */
ccw_device_get_mdc(struct ccw_device * cdev,u8 mask)629 int ccw_device_get_mdc(struct ccw_device *cdev, u8 mask)
630 {
631 struct subchannel *sch = to_subchannel(cdev->dev.parent);
632 struct channel_path *chp;
633 struct chp_id chpid;
634 int mdc = 0, i;
635
636 /* Adjust requested path mask to excluded varied off paths. */
637 if (mask)
638 mask &= sch->lpm;
639 else
640 mask = sch->lpm;
641
642 chp_id_init(&chpid);
643 for (i = 0; i < 8; i++) {
644 if (!(mask & (0x80 >> i)))
645 continue;
646 chpid.id = sch->schib.pmcw.chpid[i];
647 chp = chpid_to_chp(chpid);
648 if (!chp)
649 continue;
650
651 mutex_lock(&chp->lock);
652 if (!chp->desc_fmt1.f) {
653 mutex_unlock(&chp->lock);
654 return 0;
655 }
656 if (!chp->desc_fmt1.r)
657 mdc = 1;
658 mdc = mdc ? min_t(int, mdc, chp->desc_fmt1.mdc) :
659 chp->desc_fmt1.mdc;
660 mutex_unlock(&chp->lock);
661 }
662
663 return mdc;
664 }
665 EXPORT_SYMBOL(ccw_device_get_mdc);
666
667 /**
668 * ccw_device_tm_intrg() - perform interrogate function
669 * @cdev: ccw device on which to perform the interrogate function
670 *
671 * Perform an interrogate function on the given ccw device. Return zero on
672 * success, non-zero otherwise.
673 */
ccw_device_tm_intrg(struct ccw_device * cdev)674 int ccw_device_tm_intrg(struct ccw_device *cdev)
675 {
676 struct subchannel *sch = to_subchannel(cdev->dev.parent);
677
678 if (!sch->schib.pmcw.ena)
679 return -EINVAL;
680 if (cdev->private->state != DEV_STATE_ONLINE)
681 return -EIO;
682 if (!scsw_is_tm(&sch->schib.scsw) ||
683 !(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_START_PEND))
684 return -EINVAL;
685 return cio_tm_intrg(sch);
686 }
687 EXPORT_SYMBOL(ccw_device_tm_intrg);
688
689 /**
690 * ccw_device_get_schid() - obtain a subchannel id
691 * @cdev: device to obtain the id for
692 * @schid: where to fill in the values
693 */
ccw_device_get_schid(struct ccw_device * cdev,struct subchannel_id * schid)694 void ccw_device_get_schid(struct ccw_device *cdev, struct subchannel_id *schid)
695 {
696 struct subchannel *sch = to_subchannel(cdev->dev.parent);
697
698 *schid = sch->schid;
699 }
700 EXPORT_SYMBOL_GPL(ccw_device_get_schid);
701
702 EXPORT_SYMBOL(ccw_device_set_options_mask);
703 EXPORT_SYMBOL(ccw_device_set_options);
704 EXPORT_SYMBOL(ccw_device_clear_options);
705 EXPORT_SYMBOL(ccw_device_clear);
706 EXPORT_SYMBOL(ccw_device_halt);
707 EXPORT_SYMBOL(ccw_device_resume);
708 EXPORT_SYMBOL(ccw_device_start_timeout);
709 EXPORT_SYMBOL(ccw_device_start);
710 EXPORT_SYMBOL(ccw_device_start_timeout_key);
711 EXPORT_SYMBOL(ccw_device_start_key);
712 EXPORT_SYMBOL(ccw_device_get_ciw);
713 EXPORT_SYMBOL(ccw_device_get_path_mask);
714 EXPORT_SYMBOL_GPL(ccw_device_get_chp_desc);
715 EXPORT_SYMBOL_GPL(ccw_device_get_util_str);
716