3c8bce8bbb0b9c51569d8a7e2193e8ce9fe6f051
[deliverable/linux.git] / drivers / scsi / cxlflash / superpipe.c
1 /*
2 * CXL Flash Device Driver
3 *
4 * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
5 * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
6 *
7 * Copyright (C) 2015 IBM Corporation
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15 #include <linux/delay.h>
16 #include <linux/file.h>
17 #include <linux/syscalls.h>
18 #include <misc/cxl.h>
19 #include <asm/unaligned.h>
20
21 #include <scsi/scsi.h>
22 #include <scsi/scsi_host.h>
23 #include <scsi/scsi_cmnd.h>
24 #include <scsi/scsi_eh.h>
25 #include <uapi/scsi/cxlflash_ioctl.h>
26
27 #include "sislite.h"
28 #include "common.h"
29 #include "superpipe.h"
30
31 struct cxlflash_global global;
32
33 /**
34 * marshal_det_to_rele() - translate detach to release structure
35 * @detach: Destination structure for the translate/copy.
36 * @rele: Source structure from which to translate/copy.
37 */
38 static void marshal_det_to_rele(struct dk_cxlflash_detach *detach,
39 struct dk_cxlflash_release *release)
40 {
41 release->hdr = detach->hdr;
42 release->context_id = detach->context_id;
43 }
44
45 /**
46 * cxlflash_free_errpage() - frees resources associated with global error page
47 */
48 void cxlflash_free_errpage(void)
49 {
50
51 mutex_lock(&global.mutex);
52 if (global.err_page) {
53 __free_page(global.err_page);
54 global.err_page = NULL;
55 }
56 mutex_unlock(&global.mutex);
57 }
58
59 /**
60 * cxlflash_stop_term_user_contexts() - stops/terminates known user contexts
61 * @cfg: Internal structure associated with the host.
62 *
63 * When the host needs to go down, all users must be quiesced and their
64 * memory freed. This is accomplished by putting the contexts in error
65 * state which will notify the user and let them 'drive' the tear-down.
66 * Meanwhile, this routine camps until all user contexts have been removed.
67 */
68 void cxlflash_stop_term_user_contexts(struct cxlflash_cfg *cfg)
69 {
70 struct device *dev = &cfg->dev->dev;
71 int i, found;
72
73 cxlflash_mark_contexts_error(cfg);
74
75 while (true) {
76 found = false;
77
78 for (i = 0; i < MAX_CONTEXT; i++)
79 if (cfg->ctx_tbl[i]) {
80 found = true;
81 break;
82 }
83
84 if (!found && list_empty(&cfg->ctx_err_recovery))
85 return;
86
87 dev_dbg(dev, "%s: Wait for user contexts to quiesce...\n",
88 __func__);
89 wake_up_all(&cfg->limbo_waitq);
90 ssleep(1);
91 }
92 }
93
94 /**
95 * find_error_context() - locates a context by cookie on the error recovery list
96 * @cfg: Internal structure associated with the host.
97 * @rctxid: Desired context by id.
98 * @file: Desired context by file.
99 *
100 * Return: Found context on success, NULL on failure
101 */
102 static struct ctx_info *find_error_context(struct cxlflash_cfg *cfg, u64 rctxid,
103 struct file *file)
104 {
105 struct ctx_info *ctxi;
106
107 list_for_each_entry(ctxi, &cfg->ctx_err_recovery, list)
108 if ((ctxi->ctxid == rctxid) || (ctxi->file == file))
109 return ctxi;
110
111 return NULL;
112 }
113
114 /**
115 * get_context() - obtains a validated and locked context reference
116 * @cfg: Internal structure associated with the host.
117 * @rctxid: Desired context (raw, un-decoded format).
118 * @arg: LUN information or file associated with request.
119 * @ctx_ctrl: Control information to 'steer' desired lookup.
120 *
121 * NOTE: despite the name pid, in linux, current->pid actually refers
122 * to the lightweight process id (tid) and can change if the process is
123 * multi threaded. The tgid remains constant for the process and only changes
124 * when the process of fork. For all intents and purposes, think of tgid
125 * as a pid in the traditional sense.
126 *
127 * Return: Validated context on success, NULL on failure
128 */
129 struct ctx_info *get_context(struct cxlflash_cfg *cfg, u64 rctxid,
130 void *arg, enum ctx_ctrl ctx_ctrl)
131 {
132 struct device *dev = &cfg->dev->dev;
133 struct ctx_info *ctxi = NULL;
134 struct lun_access *lun_access = NULL;
135 struct file *file = NULL;
136 struct llun_info *lli = arg;
137 u64 ctxid = DECODE_CTXID(rctxid);
138 int rc;
139 pid_t pid = current->tgid, ctxpid = 0;
140
141 if (ctx_ctrl & CTX_CTRL_FILE) {
142 lli = NULL;
143 file = (struct file *)arg;
144 }
145
146 if (ctx_ctrl & CTX_CTRL_CLONE)
147 pid = current->parent->tgid;
148
149 if (likely(ctxid < MAX_CONTEXT)) {
150 while (true) {
151 rc = mutex_lock_interruptible(&cfg->ctx_tbl_list_mutex);
152 if (rc)
153 goto out;
154
155 ctxi = cfg->ctx_tbl[ctxid];
156 if (ctxi)
157 if ((file && (ctxi->file != file)) ||
158 (!file && (ctxi->ctxid != rctxid)))
159 ctxi = NULL;
160
161 if ((ctx_ctrl & CTX_CTRL_ERR) ||
162 (!ctxi && (ctx_ctrl & CTX_CTRL_ERR_FALLBACK)))
163 ctxi = find_error_context(cfg, rctxid, file);
164 if (!ctxi) {
165 mutex_unlock(&cfg->ctx_tbl_list_mutex);
166 goto out;
167 }
168
169 /*
170 * Need to acquire ownership of the context while still
171 * under the table/list lock to serialize with a remove
172 * thread. Use the 'try' to avoid stalling the
173 * table/list lock for a single context.
174 *
175 * Note that the lock order is:
176 *
177 * cfg->ctx_tbl_list_mutex -> ctxi->mutex
178 *
179 * Therefore release ctx_tbl_list_mutex before retrying.
180 */
181 rc = mutex_trylock(&ctxi->mutex);
182 mutex_unlock(&cfg->ctx_tbl_list_mutex);
183 if (rc)
184 break; /* got the context's lock! */
185 }
186
187 if (ctxi->unavail)
188 goto denied;
189
190 ctxpid = ctxi->pid;
191 if (likely(!(ctx_ctrl & CTX_CTRL_NOPID)))
192 if (pid != ctxpid)
193 goto denied;
194
195 if (lli) {
196 list_for_each_entry(lun_access, &ctxi->luns, list)
197 if (lun_access->lli == lli)
198 goto out;
199 goto denied;
200 }
201 }
202
203 out:
204 dev_dbg(dev, "%s: rctxid=%016llX ctxinfo=%p ctxpid=%u pid=%u "
205 "ctx_ctrl=%u\n", __func__, rctxid, ctxi, ctxpid, pid,
206 ctx_ctrl);
207
208 return ctxi;
209
210 denied:
211 mutex_unlock(&ctxi->mutex);
212 ctxi = NULL;
213 goto out;
214 }
215
216 /**
217 * put_context() - release a context that was retrieved from get_context()
218 * @ctxi: Context to release.
219 *
220 * For now, releasing the context equates to unlocking it's mutex.
221 */
222 void put_context(struct ctx_info *ctxi)
223 {
224 mutex_unlock(&ctxi->mutex);
225 }
226
227 /**
228 * afu_attach() - attach a context to the AFU
229 * @cfg: Internal structure associated with the host.
230 * @ctxi: Context to attach.
231 *
232 * Upon setting the context capabilities, they must be confirmed with
233 * a read back operation as the context might have been closed since
234 * the mailbox was unlocked. When this occurs, registration is failed.
235 *
236 * Return: 0 on success, -errno on failure
237 */
238 static int afu_attach(struct cxlflash_cfg *cfg, struct ctx_info *ctxi)
239 {
240 struct device *dev = &cfg->dev->dev;
241 struct afu *afu = cfg->afu;
242 struct sisl_ctrl_map *ctrl_map = ctxi->ctrl_map;
243 int rc = 0;
244 u64 val;
245
246 /* Unlock cap and restrict user to read/write cmds in translated mode */
247 readq_be(&ctrl_map->mbox_r);
248 val = (SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD);
249 writeq_be(val, &ctrl_map->ctx_cap);
250 val = readq_be(&ctrl_map->ctx_cap);
251 if (val != (SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD)) {
252 dev_err(dev, "%s: ctx may be closed val=%016llX\n",
253 __func__, val);
254 rc = -EAGAIN;
255 goto out;
256 }
257
258 /* Set up MMIO registers pointing to the RHT */
259 writeq_be((u64)ctxi->rht_start, &ctrl_map->rht_start);
260 val = SISL_RHT_CNT_ID((u64)MAX_RHT_PER_CONTEXT, (u64)(afu->ctx_hndl));
261 writeq_be(val, &ctrl_map->rht_cnt_id);
262 out:
263 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
264 return rc;
265 }
266
267 /**
268 * read_cap16() - issues a SCSI READ_CAP16 command
269 * @sdev: SCSI device associated with LUN.
270 * @lli: LUN destined for capacity request.
271 *
272 * Return: 0 on success, -errno on failure
273 */
274 static int read_cap16(struct scsi_device *sdev, struct llun_info *lli)
275 {
276 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
277 struct device *dev = &cfg->dev->dev;
278 struct glun_info *gli = lli->parent;
279 u8 *cmd_buf = NULL;
280 u8 *scsi_cmd = NULL;
281 u8 *sense_buf = NULL;
282 int rc = 0;
283 int result = 0;
284 int retry_cnt = 0;
285 u32 tout = (MC_DISCOVERY_TIMEOUT * HZ);
286
287 retry:
288 cmd_buf = kzalloc(CMD_BUFSIZE, GFP_KERNEL);
289 scsi_cmd = kzalloc(MAX_COMMAND_SIZE, GFP_KERNEL);
290 sense_buf = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
291 if (unlikely(!cmd_buf || !scsi_cmd || !sense_buf)) {
292 rc = -ENOMEM;
293 goto out;
294 }
295
296 scsi_cmd[0] = SERVICE_ACTION_IN_16; /* read cap(16) */
297 scsi_cmd[1] = SAI_READ_CAPACITY_16; /* service action */
298 put_unaligned_be32(CMD_BUFSIZE, &scsi_cmd[10]);
299
300 dev_dbg(dev, "%s: %ssending cmd(0x%x)\n", __func__,
301 retry_cnt ? "re" : "", scsi_cmd[0]);
302
303 result = scsi_execute(sdev, scsi_cmd, DMA_FROM_DEVICE, cmd_buf,
304 CMD_BUFSIZE, sense_buf, tout, 5, 0, NULL);
305
306 if (driver_byte(result) == DRIVER_SENSE) {
307 result &= ~(0xFF<<24); /* DRIVER_SENSE is not an error */
308 if (result & SAM_STAT_CHECK_CONDITION) {
309 struct scsi_sense_hdr sshdr;
310
311 scsi_normalize_sense(sense_buf, SCSI_SENSE_BUFFERSIZE,
312 &sshdr);
313 switch (sshdr.sense_key) {
314 case NO_SENSE:
315 case RECOVERED_ERROR:
316 /* fall through */
317 case NOT_READY:
318 result &= ~SAM_STAT_CHECK_CONDITION;
319 break;
320 case UNIT_ATTENTION:
321 switch (sshdr.asc) {
322 case 0x29: /* Power on Reset or Device Reset */
323 /* fall through */
324 case 0x2A: /* Device capacity changed */
325 case 0x3F: /* Report LUNs changed */
326 /* Retry the command once more */
327 if (retry_cnt++ < 1) {
328 kfree(cmd_buf);
329 kfree(scsi_cmd);
330 kfree(sense_buf);
331 goto retry;
332 }
333 }
334 break;
335 default:
336 break;
337 }
338 }
339 }
340
341 if (result) {
342 dev_err(dev, "%s: command failed, result=0x%x\n",
343 __func__, result);
344 rc = -EIO;
345 goto out;
346 }
347
348 /*
349 * Read cap was successful, grab values from the buffer;
350 * note that we don't need to worry about unaligned access
351 * as the buffer is allocated on an aligned boundary.
352 */
353 mutex_lock(&gli->mutex);
354 gli->max_lba = be64_to_cpu(*((u64 *)&cmd_buf[0]));
355 gli->blk_len = be32_to_cpu(*((u32 *)&cmd_buf[8]));
356 mutex_unlock(&gli->mutex);
357
358 out:
359 kfree(cmd_buf);
360 kfree(scsi_cmd);
361 kfree(sense_buf);
362
363 dev_dbg(dev, "%s: maxlba=%lld blklen=%d rc=%d\n",
364 __func__, gli->max_lba, gli->blk_len, rc);
365 return rc;
366 }
367
368 /**
369 * get_rhte() - obtains validated resource handle table entry reference
370 * @ctxi: Context owning the resource handle.
371 * @rhndl: Resource handle associated with entry.
372 * @lli: LUN associated with request.
373 *
374 * Return: Validated RHTE on success, NULL on failure
375 */
376 struct sisl_rht_entry *get_rhte(struct ctx_info *ctxi, res_hndl_t rhndl,
377 struct llun_info *lli)
378 {
379 struct sisl_rht_entry *rhte = NULL;
380
381 if (unlikely(!ctxi->rht_start)) {
382 pr_debug("%s: Context does not have allocated RHT!\n",
383 __func__);
384 goto out;
385 }
386
387 if (unlikely(rhndl >= MAX_RHT_PER_CONTEXT)) {
388 pr_debug("%s: Bad resource handle! (%d)\n", __func__, rhndl);
389 goto out;
390 }
391
392 if (unlikely(ctxi->rht_lun[rhndl] != lli)) {
393 pr_debug("%s: Bad resource handle LUN! (%d)\n",
394 __func__, rhndl);
395 goto out;
396 }
397
398 rhte = &ctxi->rht_start[rhndl];
399 if (unlikely(rhte->nmask == 0)) {
400 pr_debug("%s: Unopened resource handle! (%d)\n",
401 __func__, rhndl);
402 rhte = NULL;
403 goto out;
404 }
405
406 out:
407 return rhte;
408 }
409
410 /**
411 * rhte_checkout() - obtains free/empty resource handle table entry
412 * @ctxi: Context owning the resource handle.
413 * @lli: LUN associated with request.
414 *
415 * Return: Free RHTE on success, NULL on failure
416 */
417 struct sisl_rht_entry *rhte_checkout(struct ctx_info *ctxi,
418 struct llun_info *lli)
419 {
420 struct sisl_rht_entry *rhte = NULL;
421 int i;
422
423 /* Find a free RHT entry */
424 for (i = 0; i < MAX_RHT_PER_CONTEXT; i++)
425 if (ctxi->rht_start[i].nmask == 0) {
426 rhte = &ctxi->rht_start[i];
427 ctxi->rht_out++;
428 break;
429 }
430
431 if (likely(rhte))
432 ctxi->rht_lun[i] = lli;
433
434 pr_debug("%s: returning rhte=%p (%d)\n", __func__, rhte, i);
435 return rhte;
436 }
437
438 /**
439 * rhte_checkin() - releases a resource handle table entry
440 * @ctxi: Context owning the resource handle.
441 * @rhte: RHTE to release.
442 */
443 void rhte_checkin(struct ctx_info *ctxi,
444 struct sisl_rht_entry *rhte)
445 {
446 u32 rsrc_handle = rhte - ctxi->rht_start;
447
448 rhte->nmask = 0;
449 rhte->fp = 0;
450 ctxi->rht_out--;
451 ctxi->rht_lun[rsrc_handle] = NULL;
452 }
453
454 /**
455 * rhte_format1() - populates a RHTE for format 1
456 * @rhte: RHTE to populate.
457 * @lun_id: LUN ID of LUN associated with RHTE.
458 * @perm: Desired permissions for RHTE.
459 * @port_sel: Port selection mask
460 */
461 static void rht_format1(struct sisl_rht_entry *rhte, u64 lun_id, u32 perm,
462 u32 port_sel)
463 {
464 /*
465 * Populate the Format 1 RHT entry for direct access (physical
466 * LUN) using the synchronization sequence defined in the
467 * SISLite specification.
468 */
469 struct sisl_rht_entry_f1 dummy = { 0 };
470 struct sisl_rht_entry_f1 *rhte_f1 = (struct sisl_rht_entry_f1 *)rhte;
471
472 memset(rhte_f1, 0, sizeof(*rhte_f1));
473 rhte_f1->fp = SISL_RHT_FP(1U, 0);
474 dma_wmb(); /* Make setting of format bit visible */
475
476 rhte_f1->lun_id = lun_id;
477 dma_wmb(); /* Make setting of LUN id visible */
478
479 /*
480 * Use a dummy RHT Format 1 entry to build the second dword
481 * of the entry that must be populated in a single write when
482 * enabled (valid bit set to TRUE).
483 */
484 dummy.valid = 0x80;
485 dummy.fp = SISL_RHT_FP(1U, perm);
486 dummy.port_sel = port_sel;
487 rhte_f1->dw = dummy.dw;
488
489 dma_wmb(); /* Make remaining RHT entry fields visible */
490 }
491
492 /**
493 * cxlflash_lun_attach() - attaches a user to a LUN and manages the LUN's mode
494 * @gli: LUN to attach.
495 * @mode: Desired mode of the LUN.
496 * @locked: Mutex status on current thread.
497 *
498 * Return: 0 on success, -errno on failure
499 */
500 int cxlflash_lun_attach(struct glun_info *gli, enum lun_mode mode, bool locked)
501 {
502 int rc = 0;
503
504 if (!locked)
505 mutex_lock(&gli->mutex);
506
507 if (gli->mode == MODE_NONE)
508 gli->mode = mode;
509 else if (gli->mode != mode) {
510 pr_debug("%s: LUN operating in mode %d, requested mode %d\n",
511 __func__, gli->mode, mode);
512 rc = -EINVAL;
513 goto out;
514 }
515
516 gli->users++;
517 WARN_ON(gli->users <= 0);
518 out:
519 pr_debug("%s: Returning rc=%d gli->mode=%u gli->users=%u\n",
520 __func__, rc, gli->mode, gli->users);
521 if (!locked)
522 mutex_unlock(&gli->mutex);
523 return rc;
524 }
525
526 /**
527 * cxlflash_lun_detach() - detaches a user from a LUN and resets the LUN's mode
528 * @gli: LUN to detach.
529 */
530 void cxlflash_lun_detach(struct glun_info *gli)
531 {
532 mutex_lock(&gli->mutex);
533 WARN_ON(gli->mode == MODE_NONE);
534 if (--gli->users == 0)
535 gli->mode = MODE_NONE;
536 pr_debug("%s: gli->users=%u\n", __func__, gli->users);
537 WARN_ON(gli->users < 0);
538 mutex_unlock(&gli->mutex);
539 }
540
541 /**
542 * _cxlflash_disk_release() - releases the specified resource entry
543 * @sdev: SCSI device associated with LUN.
544 * @ctxi: Context owning resources.
545 * @release: Release ioctl data structure.
546 *
547 * Note that the AFU sync should _not_ be performed when the context is sitting
548 * on the error recovery list. A context on the error recovery list is not known
549 * to the AFU due to reset. When the context is recovered, it will be reattached
550 * and made known again to the AFU.
551 *
552 * Return: 0 on success, -errno on failure
553 */
554 int _cxlflash_disk_release(struct scsi_device *sdev,
555 struct ctx_info *ctxi,
556 struct dk_cxlflash_release *release)
557 {
558 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
559 struct device *dev = &cfg->dev->dev;
560 struct llun_info *lli = sdev->hostdata;
561 struct glun_info *gli = lli->parent;
562 struct afu *afu = cfg->afu;
563 bool put_ctx = false;
564
565 res_hndl_t rhndl = release->rsrc_handle;
566
567 int rc = 0;
568 u64 ctxid = DECODE_CTXID(release->context_id),
569 rctxid = release->context_id;
570
571 struct sisl_rht_entry *rhte;
572 struct sisl_rht_entry_f1 *rhte_f1;
573
574 dev_dbg(dev, "%s: ctxid=%llu rhndl=0x%llx gli->mode=%u gli->users=%u\n",
575 __func__, ctxid, release->rsrc_handle, gli->mode, gli->users);
576
577 if (!ctxi) {
578 ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
579 if (unlikely(!ctxi)) {
580 dev_dbg(dev, "%s: Bad context! (%llu)\n",
581 __func__, ctxid);
582 rc = -EINVAL;
583 goto out;
584 }
585
586 put_ctx = true;
587 }
588
589 rhte = get_rhte(ctxi, rhndl, lli);
590 if (unlikely(!rhte)) {
591 dev_dbg(dev, "%s: Bad resource handle! (%d)\n",
592 __func__, rhndl);
593 rc = -EINVAL;
594 goto out;
595 }
596
597 switch (gli->mode) {
598 case MODE_PHYSICAL:
599 /*
600 * Clear the Format 1 RHT entry for direct access
601 * (physical LUN) using the synchronization sequence
602 * defined in the SISLite specification.
603 */
604 rhte_f1 = (struct sisl_rht_entry_f1 *)rhte;
605
606 rhte_f1->valid = 0;
607 dma_wmb(); /* Make revocation of RHT entry visible */
608
609 rhte_f1->lun_id = 0;
610 dma_wmb(); /* Make clearing of LUN id visible */
611
612 rhte_f1->dw = 0;
613 dma_wmb(); /* Make RHT entry bottom-half clearing visible */
614
615 if (!ctxi->err_recovery_active)
616 cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC);
617 break;
618 default:
619 WARN(1, "Unsupported LUN mode!");
620 goto out;
621 }
622
623 rhte_checkin(ctxi, rhte);
624 cxlflash_lun_detach(gli);
625
626 out:
627 if (put_ctx)
628 put_context(ctxi);
629 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
630 return rc;
631 }
632
633 int cxlflash_disk_release(struct scsi_device *sdev,
634 struct dk_cxlflash_release *release)
635 {
636 return _cxlflash_disk_release(sdev, NULL, release);
637 }
638
639 /**
640 * destroy_context() - releases a context
641 * @cfg: Internal structure associated with the host.
642 * @ctxi: Context to release.
643 *
644 * Note that the rht_lun member of the context was cut from a single
645 * allocation when the context was created and therefore does not need
646 * to be explicitly freed. Also note that we conditionally check for the
647 * existence of the context control map before clearing the RHT registers
648 * and context capabilities because it is possible to destroy a context
649 * while the context is in the error state (previous mapping was removed
650 * [so we don't have to worry about clearing] and context is waiting for
651 * a new mapping).
652 */
653 static void destroy_context(struct cxlflash_cfg *cfg,
654 struct ctx_info *ctxi)
655 {
656 struct afu *afu = cfg->afu;
657
658 WARN_ON(!list_empty(&ctxi->luns));
659
660 /* Clear RHT registers and drop all capabilities for this context */
661 if (afu->afu_map && ctxi->ctrl_map) {
662 writeq_be(0, &ctxi->ctrl_map->rht_start);
663 writeq_be(0, &ctxi->ctrl_map->rht_cnt_id);
664 writeq_be(0, &ctxi->ctrl_map->ctx_cap);
665 }
666
667 /* Free memory associated with context */
668 free_page((ulong)ctxi->rht_start);
669 kfree(ctxi->rht_lun);
670 kfree(ctxi);
671 atomic_dec_if_positive(&cfg->num_user_contexts);
672 }
673
674 /**
675 * create_context() - allocates and initializes a context
676 * @cfg: Internal structure associated with the host.
677 * @ctx: Previously obtained CXL context reference.
678 * @ctxid: Previously obtained process element associated with CXL context.
679 * @adap_fd: Previously obtained adapter fd associated with CXL context.
680 * @file: Previously obtained file associated with CXL context.
681 * @perms: User-specified permissions.
682 *
683 * The context's mutex is locked when an allocated context is returned.
684 *
685 * Return: Allocated context on success, NULL on failure
686 */
687 static struct ctx_info *create_context(struct cxlflash_cfg *cfg,
688 struct cxl_context *ctx, int ctxid,
689 int adap_fd, struct file *file,
690 u32 perms)
691 {
692 struct device *dev = &cfg->dev->dev;
693 struct afu *afu = cfg->afu;
694 struct ctx_info *ctxi = NULL;
695 struct llun_info **lli = NULL;
696 struct sisl_rht_entry *rhte;
697
698 ctxi = kzalloc(sizeof(*ctxi), GFP_KERNEL);
699 lli = kzalloc((MAX_RHT_PER_CONTEXT * sizeof(*lli)), GFP_KERNEL);
700 if (unlikely(!ctxi || !lli)) {
701 dev_err(dev, "%s: Unable to allocate context!\n", __func__);
702 goto err;
703 }
704
705 rhte = (struct sisl_rht_entry *)get_zeroed_page(GFP_KERNEL);
706 if (unlikely(!rhte)) {
707 dev_err(dev, "%s: Unable to allocate RHT!\n", __func__);
708 goto err;
709 }
710
711 ctxi->rht_lun = lli;
712 ctxi->rht_start = rhte;
713 ctxi->rht_perms = perms;
714
715 ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl;
716 ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid);
717 ctxi->lfd = adap_fd;
718 ctxi->pid = current->tgid; /* tgid = pid */
719 ctxi->ctx = ctx;
720 ctxi->file = file;
721 mutex_init(&ctxi->mutex);
722 INIT_LIST_HEAD(&ctxi->luns);
723 INIT_LIST_HEAD(&ctxi->list); /* initialize for list_empty() */
724
725 atomic_inc(&cfg->num_user_contexts);
726 mutex_lock(&ctxi->mutex);
727 out:
728 return ctxi;
729
730 err:
731 kfree(lli);
732 kfree(ctxi);
733 ctxi = NULL;
734 goto out;
735 }
736
737 /**
738 * _cxlflash_disk_detach() - detaches a LUN from a context
739 * @sdev: SCSI device associated with LUN.
740 * @ctxi: Context owning resources.
741 * @detach: Detach ioctl data structure.
742 *
743 * As part of the detach, all per-context resources associated with the LUN
744 * are cleaned up. When detaching the last LUN for a context, the context
745 * itself is cleaned up and released.
746 *
747 * Return: 0 on success, -errno on failure
748 */
749 static int _cxlflash_disk_detach(struct scsi_device *sdev,
750 struct ctx_info *ctxi,
751 struct dk_cxlflash_detach *detach)
752 {
753 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
754 struct device *dev = &cfg->dev->dev;
755 struct llun_info *lli = sdev->hostdata;
756 struct lun_access *lun_access, *t;
757 struct dk_cxlflash_release rel;
758 bool put_ctx = false;
759
760 int i;
761 int rc = 0;
762 int lfd;
763 u64 ctxid = DECODE_CTXID(detach->context_id),
764 rctxid = detach->context_id;
765
766 dev_dbg(dev, "%s: ctxid=%llu\n", __func__, ctxid);
767
768 if (!ctxi) {
769 ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
770 if (unlikely(!ctxi)) {
771 dev_dbg(dev, "%s: Bad context! (%llu)\n",
772 __func__, ctxid);
773 rc = -EINVAL;
774 goto out;
775 }
776
777 put_ctx = true;
778 }
779
780 /* Cleanup outstanding resources tied to this LUN */
781 if (ctxi->rht_out) {
782 marshal_det_to_rele(detach, &rel);
783 for (i = 0; i < MAX_RHT_PER_CONTEXT; i++) {
784 if (ctxi->rht_lun[i] == lli) {
785 rel.rsrc_handle = i;
786 _cxlflash_disk_release(sdev, ctxi, &rel);
787 }
788
789 /* No need to loop further if we're done */
790 if (ctxi->rht_out == 0)
791 break;
792 }
793 }
794
795 /* Take our LUN out of context, free the node */
796 list_for_each_entry_safe(lun_access, t, &ctxi->luns, list)
797 if (lun_access->lli == lli) {
798 list_del(&lun_access->list);
799 kfree(lun_access);
800 lun_access = NULL;
801 break;
802 }
803
804 /* Tear down context following last LUN cleanup */
805 if (list_empty(&ctxi->luns)) {
806 ctxi->unavail = true;
807 mutex_unlock(&ctxi->mutex);
808 mutex_lock(&cfg->ctx_tbl_list_mutex);
809 mutex_lock(&ctxi->mutex);
810
811 /* Might not have been in error list so conditionally remove */
812 if (!list_empty(&ctxi->list))
813 list_del(&ctxi->list);
814 cfg->ctx_tbl[ctxid] = NULL;
815 mutex_unlock(&cfg->ctx_tbl_list_mutex);
816 mutex_unlock(&ctxi->mutex);
817
818 lfd = ctxi->lfd;
819 destroy_context(cfg, ctxi);
820 ctxi = NULL;
821 put_ctx = false;
822
823 /*
824 * As a last step, clean up external resources when not
825 * already on an external cleanup thread, i.e.: close(adap_fd).
826 *
827 * NOTE: this will free up the context from the CXL services,
828 * allowing it to dole out the same context_id on a future
829 * (or even currently in-flight) disk_attach operation.
830 */
831 if (lfd != -1)
832 sys_close(lfd);
833 }
834
835 out:
836 if (put_ctx)
837 put_context(ctxi);
838 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
839 return rc;
840 }
841
842 static int cxlflash_disk_detach(struct scsi_device *sdev,
843 struct dk_cxlflash_detach *detach)
844 {
845 return _cxlflash_disk_detach(sdev, NULL, detach);
846 }
847
848 /**
849 * cxlflash_cxl_release() - release handler for adapter file descriptor
850 * @inode: File-system inode associated with fd.
851 * @file: File installed with adapter file descriptor.
852 *
853 * This routine is the release handler for the fops registered with
854 * the CXL services on an initial attach for a context. It is called
855 * when a close is performed on the adapter file descriptor returned
856 * to the user. Programmatically, the user is not required to perform
857 * the close, as it is handled internally via the detach ioctl when
858 * a context is being removed. Note that nothing prevents the user
859 * from performing a close, but the user should be aware that doing
860 * so is considered catastrophic and subsequent usage of the superpipe
861 * API with previously saved off tokens will fail.
862 *
863 * When initiated from an external close (either by the user or via
864 * a process tear down), the routine derives the context reference
865 * and calls detach for each LUN associated with the context. The
866 * final detach operation will cause the context itself to be freed.
867 * Note that the saved off lfd is reset prior to calling detach to
868 * signify that the final detach should not perform a close.
869 *
870 * When initiated from a detach operation as part of the tear down
871 * of a context, the context is first completely freed and then the
872 * close is performed. This routine will fail to derive the context
873 * reference (due to the context having already been freed) and then
874 * call into the CXL release entry point.
875 *
876 * Thus, with exception to when the CXL process element (context id)
877 * lookup fails (a case that should theoretically never occur), every
878 * call into this routine results in a complete freeing of a context.
879 *
880 * As part of the detach, all per-context resources associated with the LUN
881 * are cleaned up. When detaching the last LUN for a context, the context
882 * itself is cleaned up and released.
883 *
884 * Return: 0 on success
885 */
886 static int cxlflash_cxl_release(struct inode *inode, struct file *file)
887 {
888 struct cxl_context *ctx = cxl_fops_get_context(file);
889 struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
890 cxl_fops);
891 struct device *dev = &cfg->dev->dev;
892 struct ctx_info *ctxi = NULL;
893 struct dk_cxlflash_detach detach = { { 0 }, 0 };
894 struct lun_access *lun_access, *t;
895 enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
896 int ctxid;
897
898 ctxid = cxl_process_element(ctx);
899 if (unlikely(ctxid < 0)) {
900 dev_err(dev, "%s: Context %p was closed! (%d)\n",
901 __func__, ctx, ctxid);
902 goto out;
903 }
904
905 ctxi = get_context(cfg, ctxid, file, ctrl);
906 if (unlikely(!ctxi)) {
907 ctxi = get_context(cfg, ctxid, file, ctrl | CTX_CTRL_CLONE);
908 if (!ctxi) {
909 dev_dbg(dev, "%s: Context %d already free!\n",
910 __func__, ctxid);
911 goto out_release;
912 }
913
914 dev_dbg(dev, "%s: Another process owns context %d!\n",
915 __func__, ctxid);
916 put_context(ctxi);
917 goto out;
918 }
919
920 dev_dbg(dev, "%s: close(%d) for context %d\n",
921 __func__, ctxi->lfd, ctxid);
922
923 /* Reset the file descriptor to indicate we're on a close() thread */
924 ctxi->lfd = -1;
925 detach.context_id = ctxi->ctxid;
926 list_for_each_entry_safe(lun_access, t, &ctxi->luns, list)
927 _cxlflash_disk_detach(lun_access->sdev, ctxi, &detach);
928 out_release:
929 cxl_fd_release(inode, file);
930 out:
931 dev_dbg(dev, "%s: returning\n", __func__);
932 return 0;
933 }
934
935 /**
936 * unmap_context() - clears a previously established mapping
937 * @ctxi: Context owning the mapping.
938 *
939 * This routine is used to switch between the error notification page
940 * (dummy page of all 1's) and the real mapping (established by the CXL
941 * fault handler).
942 */
943 static void unmap_context(struct ctx_info *ctxi)
944 {
945 unmap_mapping_range(ctxi->file->f_mapping, 0, 0, 1);
946 }
947
948 /**
949 * get_err_page() - obtains and allocates the error notification page
950 *
951 * Return: error notification page on success, NULL on failure
952 */
953 static struct page *get_err_page(void)
954 {
955 struct page *err_page = global.err_page;
956
957 if (unlikely(!err_page)) {
958 err_page = alloc_page(GFP_KERNEL);
959 if (unlikely(!err_page)) {
960 pr_err("%s: Unable to allocate err_page!\n", __func__);
961 goto out;
962 }
963
964 memset(page_address(err_page), -1, PAGE_SIZE);
965
966 /* Serialize update w/ other threads to avoid a leak */
967 mutex_lock(&global.mutex);
968 if (likely(!global.err_page))
969 global.err_page = err_page;
970 else {
971 __free_page(err_page);
972 err_page = global.err_page;
973 }
974 mutex_unlock(&global.mutex);
975 }
976
977 out:
978 pr_debug("%s: returning err_page=%p\n", __func__, err_page);
979 return err_page;
980 }
981
982 /**
983 * cxlflash_mmap_fault() - mmap fault handler for adapter file descriptor
984 * @vma: VM area associated with mapping.
985 * @vmf: VM fault associated with current fault.
986 *
987 * To support error notification via MMIO, faults are 'caught' by this routine
988 * that was inserted before passing back the adapter file descriptor on attach.
989 * When a fault occurs, this routine evaluates if error recovery is active and
990 * if so, installs the error page to 'notify' the user about the error state.
991 * During normal operation, the fault is simply handled by the original fault
992 * handler that was installed by CXL services as part of initializing the
993 * adapter file descriptor. The VMA's page protection bits are toggled to
994 * indicate cached/not-cached depending on the memory backing the fault.
995 *
996 * Return: 0 on success, VM_FAULT_SIGBUS on failure
997 */
998 static int cxlflash_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
999 {
1000 struct file *file = vma->vm_file;
1001 struct cxl_context *ctx = cxl_fops_get_context(file);
1002 struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
1003 cxl_fops);
1004 struct device *dev = &cfg->dev->dev;
1005 struct ctx_info *ctxi = NULL;
1006 struct page *err_page = NULL;
1007 enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
1008 int rc = 0;
1009 int ctxid;
1010
1011 ctxid = cxl_process_element(ctx);
1012 if (unlikely(ctxid < 0)) {
1013 dev_err(dev, "%s: Context %p was closed! (%d)\n",
1014 __func__, ctx, ctxid);
1015 goto err;
1016 }
1017
1018 ctxi = get_context(cfg, ctxid, file, ctrl);
1019 if (unlikely(!ctxi)) {
1020 dev_dbg(dev, "%s: Bad context! (%d)\n", __func__, ctxid);
1021 goto err;
1022 }
1023
1024 dev_dbg(dev, "%s: fault(%d) for context %d\n",
1025 __func__, ctxi->lfd, ctxid);
1026
1027 if (likely(!ctxi->err_recovery_active)) {
1028 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1029 rc = ctxi->cxl_mmap_vmops->fault(vma, vmf);
1030 } else {
1031 dev_dbg(dev, "%s: err recovery active, use err_page!\n",
1032 __func__);
1033
1034 err_page = get_err_page();
1035 if (unlikely(!err_page)) {
1036 dev_err(dev, "%s: Could not obtain error page!\n",
1037 __func__);
1038 rc = VM_FAULT_RETRY;
1039 goto out;
1040 }
1041
1042 get_page(err_page);
1043 vmf->page = err_page;
1044 vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
1045 }
1046
1047 out:
1048 if (likely(ctxi))
1049 put_context(ctxi);
1050 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1051 return rc;
1052
1053 err:
1054 rc = VM_FAULT_SIGBUS;
1055 goto out;
1056 }
1057
1058 /*
1059 * Local MMAP vmops to 'catch' faults
1060 */
1061 static const struct vm_operations_struct cxlflash_mmap_vmops = {
1062 .fault = cxlflash_mmap_fault,
1063 };
1064
1065 /**
1066 * cxlflash_cxl_mmap() - mmap handler for adapter file descriptor
1067 * @file: File installed with adapter file descriptor.
1068 * @vma: VM area associated with mapping.
1069 *
1070 * Installs local mmap vmops to 'catch' faults for error notification support.
1071 *
1072 * Return: 0 on success, -errno on failure
1073 */
1074 static int cxlflash_cxl_mmap(struct file *file, struct vm_area_struct *vma)
1075 {
1076 struct cxl_context *ctx = cxl_fops_get_context(file);
1077 struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
1078 cxl_fops);
1079 struct device *dev = &cfg->dev->dev;
1080 struct ctx_info *ctxi = NULL;
1081 enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
1082 int ctxid;
1083 int rc = 0;
1084
1085 ctxid = cxl_process_element(ctx);
1086 if (unlikely(ctxid < 0)) {
1087 dev_err(dev, "%s: Context %p was closed! (%d)\n",
1088 __func__, ctx, ctxid);
1089 rc = -EIO;
1090 goto out;
1091 }
1092
1093 ctxi = get_context(cfg, ctxid, file, ctrl);
1094 if (unlikely(!ctxi)) {
1095 dev_dbg(dev, "%s: Bad context! (%d)\n", __func__, ctxid);
1096 rc = -EIO;
1097 goto out;
1098 }
1099
1100 dev_dbg(dev, "%s: mmap(%d) for context %d\n",
1101 __func__, ctxi->lfd, ctxid);
1102
1103 rc = cxl_fd_mmap(file, vma);
1104 if (likely(!rc)) {
1105 /* Insert ourself in the mmap fault handler path */
1106 ctxi->cxl_mmap_vmops = vma->vm_ops;
1107 vma->vm_ops = &cxlflash_mmap_vmops;
1108 }
1109
1110 out:
1111 if (likely(ctxi))
1112 put_context(ctxi);
1113 return rc;
1114 }
1115
1116 /*
1117 * Local fops for adapter file descriptor
1118 */
1119 static const struct file_operations cxlflash_cxl_fops = {
1120 .owner = THIS_MODULE,
1121 .mmap = cxlflash_cxl_mmap,
1122 .release = cxlflash_cxl_release,
1123 };
1124
1125 /**
1126 * cxlflash_mark_contexts_error() - move contexts to error state and list
1127 * @cfg: Internal structure associated with the host.
1128 *
1129 * A context is only moved over to the error list when there are no outstanding
1130 * references to it. This ensures that a running operation has completed.
1131 *
1132 * Return: 0 on success, -errno on failure
1133 */
1134 int cxlflash_mark_contexts_error(struct cxlflash_cfg *cfg)
1135 {
1136 int i, rc = 0;
1137 struct ctx_info *ctxi = NULL;
1138
1139 mutex_lock(&cfg->ctx_tbl_list_mutex);
1140
1141 for (i = 0; i < MAX_CONTEXT; i++) {
1142 ctxi = cfg->ctx_tbl[i];
1143 if (ctxi) {
1144 mutex_lock(&ctxi->mutex);
1145 cfg->ctx_tbl[i] = NULL;
1146 list_add(&ctxi->list, &cfg->ctx_err_recovery);
1147 ctxi->err_recovery_active = true;
1148 ctxi->ctrl_map = NULL;
1149 unmap_context(ctxi);
1150 mutex_unlock(&ctxi->mutex);
1151 }
1152 }
1153
1154 mutex_unlock(&cfg->ctx_tbl_list_mutex);
1155 return rc;
1156 }
1157
1158 /*
1159 * Dummy NULL fops
1160 */
1161 static const struct file_operations null_fops = {
1162 .owner = THIS_MODULE,
1163 };
1164
1165 /**
1166 * cxlflash_disk_attach() - attach a LUN to a context
1167 * @sdev: SCSI device associated with LUN.
1168 * @attach: Attach ioctl data structure.
1169 *
1170 * Creates a context and attaches LUN to it. A LUN can only be attached
1171 * one time to a context (subsequent attaches for the same context/LUN pair
1172 * are not supported). Additional LUNs can be attached to a context by
1173 * specifying the 'reuse' flag defined in the cxlflash_ioctl.h header.
1174 *
1175 * Return: 0 on success, -errno on failure
1176 */
1177 static int cxlflash_disk_attach(struct scsi_device *sdev,
1178 struct dk_cxlflash_attach *attach)
1179 {
1180 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
1181 struct device *dev = &cfg->dev->dev;
1182 struct afu *afu = cfg->afu;
1183 struct llun_info *lli = sdev->hostdata;
1184 struct glun_info *gli = lli->parent;
1185 struct cxl_ioctl_start_work *work;
1186 struct ctx_info *ctxi = NULL;
1187 struct lun_access *lun_access = NULL;
1188 int rc = 0;
1189 u32 perms;
1190 int ctxid = -1;
1191 u64 rctxid = 0UL;
1192 struct file *file;
1193
1194 struct cxl_context *ctx;
1195
1196 int fd = -1;
1197
1198 /* On first attach set fileops */
1199 if (atomic_read(&cfg->num_user_contexts) == 0)
1200 cfg->cxl_fops = cxlflash_cxl_fops;
1201
1202 if (attach->num_interrupts > 4) {
1203 dev_dbg(dev, "%s: Cannot support this many interrupts %llu\n",
1204 __func__, attach->num_interrupts);
1205 rc = -EINVAL;
1206 goto out;
1207 }
1208
1209 if (gli->max_lba == 0) {
1210 dev_dbg(dev, "%s: No capacity info for this LUN (%016llX)\n",
1211 __func__, lli->lun_id[sdev->channel]);
1212 rc = read_cap16(sdev, lli);
1213 if (rc) {
1214 dev_err(dev, "%s: Invalid device! (%d)\n",
1215 __func__, rc);
1216 rc = -ENODEV;
1217 goto out;
1218 }
1219 dev_dbg(dev, "%s: LBA = %016llX\n", __func__, gli->max_lba);
1220 dev_dbg(dev, "%s: BLK_LEN = %08X\n", __func__, gli->blk_len);
1221 }
1222
1223 if (attach->hdr.flags & DK_CXLFLASH_ATTACH_REUSE_CONTEXT) {
1224 rctxid = attach->context_id;
1225 ctxi = get_context(cfg, rctxid, NULL, 0);
1226 if (!ctxi) {
1227 dev_dbg(dev, "%s: Bad context! (%016llX)\n",
1228 __func__, rctxid);
1229 rc = -EINVAL;
1230 goto out;
1231 }
1232
1233 list_for_each_entry(lun_access, &ctxi->luns, list)
1234 if (lun_access->lli == lli) {
1235 dev_dbg(dev, "%s: Already attached!\n",
1236 __func__);
1237 rc = -EINVAL;
1238 goto out;
1239 }
1240 }
1241
1242 lun_access = kzalloc(sizeof(*lun_access), GFP_KERNEL);
1243 if (unlikely(!lun_access)) {
1244 dev_err(dev, "%s: Unable to allocate lun_access!\n", __func__);
1245 rc = -ENOMEM;
1246 goto out;
1247 }
1248
1249 lun_access->lli = lli;
1250 lun_access->sdev = sdev;
1251
1252 /* Non-NULL context indicates reuse */
1253 if (ctxi) {
1254 dev_dbg(dev, "%s: Reusing context for LUN! (%016llX)\n",
1255 __func__, rctxid);
1256 list_add(&lun_access->list, &ctxi->luns);
1257 fd = ctxi->lfd;
1258 goto out_attach;
1259 }
1260
1261 ctx = cxl_dev_context_init(cfg->dev);
1262 if (unlikely(IS_ERR_OR_NULL(ctx))) {
1263 dev_err(dev, "%s: Could not initialize context %p\n",
1264 __func__, ctx);
1265 rc = -ENODEV;
1266 goto err0;
1267 }
1268
1269 ctxid = cxl_process_element(ctx);
1270 if (unlikely((ctxid > MAX_CONTEXT) || (ctxid < 0))) {
1271 dev_err(dev, "%s: ctxid (%d) invalid!\n", __func__, ctxid);
1272 rc = -EPERM;
1273 goto err1;
1274 }
1275
1276 file = cxl_get_fd(ctx, &cfg->cxl_fops, &fd);
1277 if (unlikely(fd < 0)) {
1278 rc = -ENODEV;
1279 dev_err(dev, "%s: Could not get file descriptor\n", __func__);
1280 goto err1;
1281 }
1282
1283 /* Translate read/write O_* flags from fcntl.h to AFU permission bits */
1284 perms = SISL_RHT_PERM(attach->hdr.flags + 1);
1285
1286 ctxi = create_context(cfg, ctx, ctxid, fd, file, perms);
1287 if (unlikely(!ctxi)) {
1288 dev_err(dev, "%s: Failed to create context! (%d)\n",
1289 __func__, ctxid);
1290 goto err2;
1291 }
1292
1293 work = &ctxi->work;
1294 work->num_interrupts = attach->num_interrupts;
1295 work->flags = CXL_START_WORK_NUM_IRQS;
1296
1297 rc = cxl_start_work(ctx, work);
1298 if (unlikely(rc)) {
1299 dev_dbg(dev, "%s: Could not start context rc=%d\n",
1300 __func__, rc);
1301 goto err3;
1302 }
1303
1304 rc = afu_attach(cfg, ctxi);
1305 if (unlikely(rc)) {
1306 dev_err(dev, "%s: Could not attach AFU rc %d\n", __func__, rc);
1307 goto err4;
1308 }
1309
1310 /*
1311 * No error paths after this point. Once the fd is installed it's
1312 * visible to user space and can't be undone safely on this thread.
1313 * There is no need to worry about a deadlock here because no one
1314 * knows about us yet; we can be the only one holding our mutex.
1315 */
1316 list_add(&lun_access->list, &ctxi->luns);
1317 mutex_unlock(&ctxi->mutex);
1318 mutex_lock(&cfg->ctx_tbl_list_mutex);
1319 mutex_lock(&ctxi->mutex);
1320 cfg->ctx_tbl[ctxid] = ctxi;
1321 mutex_unlock(&cfg->ctx_tbl_list_mutex);
1322 fd_install(fd, file);
1323
1324 out_attach:
1325 attach->hdr.return_flags = 0;
1326 attach->context_id = ctxi->ctxid;
1327 attach->block_size = gli->blk_len;
1328 attach->mmio_size = sizeof(afu->afu_map->hosts[0].harea);
1329 attach->last_lba = gli->max_lba;
1330 attach->max_xfer = (sdev->host->max_sectors * 512) / gli->blk_len;
1331
1332 out:
1333 attach->adap_fd = fd;
1334
1335 if (ctxi)
1336 put_context(ctxi);
1337
1338 dev_dbg(dev, "%s: returning ctxid=%d fd=%d bs=%lld rc=%d llba=%lld\n",
1339 __func__, ctxid, fd, attach->block_size, rc, attach->last_lba);
1340 return rc;
1341
1342 err4:
1343 cxl_stop_context(ctx);
1344 err3:
1345 put_context(ctxi);
1346 destroy_context(cfg, ctxi);
1347 ctxi = NULL;
1348 err2:
1349 /*
1350 * Here, we're overriding the fops with a dummy all-NULL fops because
1351 * fput() calls the release fop, which will cause us to mistakenly
1352 * call into the CXL code. Rather than try to add yet more complexity
1353 * to that routine (cxlflash_cxl_release) we should try to fix the
1354 * issue here.
1355 */
1356 file->f_op = &null_fops;
1357 fput(file);
1358 put_unused_fd(fd);
1359 fd = -1;
1360 err1:
1361 cxl_release_context(ctx);
1362 err0:
1363 kfree(lun_access);
1364 goto out;
1365 }
1366
1367 /**
1368 * recover_context() - recovers a context in error
1369 * @cfg: Internal structure associated with the host.
1370 * @ctxi: Context to release.
1371 *
1372 * Restablishes the state for a context-in-error.
1373 *
1374 * Return: 0 on success, -errno on failure
1375 */
1376 static int recover_context(struct cxlflash_cfg *cfg, struct ctx_info *ctxi)
1377 {
1378 struct device *dev = &cfg->dev->dev;
1379 int rc = 0;
1380 int old_fd, fd = -1;
1381 int ctxid = -1;
1382 struct file *file;
1383 struct cxl_context *ctx;
1384 struct afu *afu = cfg->afu;
1385
1386 ctx = cxl_dev_context_init(cfg->dev);
1387 if (unlikely(IS_ERR_OR_NULL(ctx))) {
1388 dev_err(dev, "%s: Could not initialize context %p\n",
1389 __func__, ctx);
1390 rc = -ENODEV;
1391 goto out;
1392 }
1393
1394 ctxid = cxl_process_element(ctx);
1395 if (unlikely((ctxid > MAX_CONTEXT) || (ctxid < 0))) {
1396 dev_err(dev, "%s: ctxid (%d) invalid!\n", __func__, ctxid);
1397 rc = -EPERM;
1398 goto err1;
1399 }
1400
1401 file = cxl_get_fd(ctx, &cfg->cxl_fops, &fd);
1402 if (unlikely(fd < 0)) {
1403 rc = -ENODEV;
1404 dev_err(dev, "%s: Could not get file descriptor\n", __func__);
1405 goto err1;
1406 }
1407
1408 rc = cxl_start_work(ctx, &ctxi->work);
1409 if (unlikely(rc)) {
1410 dev_dbg(dev, "%s: Could not start context rc=%d\n",
1411 __func__, rc);
1412 goto err2;
1413 }
1414
1415 /* Update with new MMIO area based on updated context id */
1416 ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl;
1417
1418 rc = afu_attach(cfg, ctxi);
1419 if (rc) {
1420 dev_err(dev, "%s: Could not attach AFU rc %d\n", __func__, rc);
1421 goto err3;
1422 }
1423
1424 /*
1425 * No error paths after this point. Once the fd is installed it's
1426 * visible to user space and can't be undone safely on this thread.
1427 */
1428 old_fd = ctxi->lfd;
1429 ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid);
1430 ctxi->lfd = fd;
1431 ctxi->ctx = ctx;
1432 ctxi->file = file;
1433
1434 /*
1435 * Put context back in table (note the reinit of the context list);
1436 * we must first drop the context's mutex and then acquire it in
1437 * order with the table/list mutex to avoid a deadlock - safe to do
1438 * here because no one can find us at this moment in time.
1439 */
1440 mutex_unlock(&ctxi->mutex);
1441 mutex_lock(&cfg->ctx_tbl_list_mutex);
1442 mutex_lock(&ctxi->mutex);
1443 list_del_init(&ctxi->list);
1444 cfg->ctx_tbl[ctxid] = ctxi;
1445 mutex_unlock(&cfg->ctx_tbl_list_mutex);
1446 fd_install(fd, file);
1447
1448 /* Release the original adapter fd and associated CXL resources */
1449 sys_close(old_fd);
1450 out:
1451 dev_dbg(dev, "%s: returning ctxid=%d fd=%d rc=%d\n",
1452 __func__, ctxid, fd, rc);
1453 return rc;
1454
1455 err3:
1456 cxl_stop_context(ctx);
1457 err2:
1458 fput(file);
1459 put_unused_fd(fd);
1460 err1:
1461 cxl_release_context(ctx);
1462 goto out;
1463 }
1464
1465 /**
1466 * check_state() - checks and responds to the current adapter state
1467 * @cfg: Internal structure associated with the host.
1468 *
1469 * This routine can block and should only be used on process context.
1470 * Note that when waking up from waiting in limbo, the state is unknown
1471 * and must be checked again before proceeding.
1472 *
1473 * Return: 0 on success, -errno on failure
1474 */
1475 static int check_state(struct cxlflash_cfg *cfg)
1476 {
1477 struct device *dev = &cfg->dev->dev;
1478 int rc = 0;
1479
1480 retry:
1481 switch (cfg->state) {
1482 case STATE_LIMBO:
1483 dev_dbg(dev, "%s: Limbo, going to wait...\n", __func__);
1484 rc = wait_event_interruptible(cfg->limbo_waitq,
1485 cfg->state != STATE_LIMBO);
1486 if (unlikely(rc))
1487 break;
1488 goto retry;
1489 case STATE_FAILTERM:
1490 dev_dbg(dev, "%s: Failed/Terminating!\n", __func__);
1491 rc = -ENODEV;
1492 break;
1493 default:
1494 break;
1495 }
1496
1497 return rc;
1498 }
1499
1500 /**
1501 * cxlflash_afu_recover() - initiates AFU recovery
1502 * @sdev: SCSI device associated with LUN.
1503 * @recover: Recover ioctl data structure.
1504 *
1505 * Only a single recovery is allowed at a time to avoid exhausting CXL
1506 * resources (leading to recovery failure) in the event that we're up
1507 * against the maximum number of contexts limit. For similar reasons,
1508 * a context recovery is retried if there are multiple recoveries taking
1509 * place at the same time and the failure was due to CXL services being
1510 * unable to keep up.
1511 *
1512 * Because a user can detect an error condition before the kernel, it is
1513 * quite possible for this routine to act as the kernel's EEH detection
1514 * source (MMIO read of mbox_r). Because of this, there is a window of
1515 * time where an EEH might have been detected but not yet 'serviced'
1516 * (callback invoked, causing the device to enter limbo state). To avoid
1517 * looping in this routine during that window, a 1 second sleep is in place
1518 * between the time the MMIO failure is detected and the time a wait on the
1519 * limbo wait queue is attempted via check_state().
1520 *
1521 * Return: 0 on success, -errno on failure
1522 */
1523 static int cxlflash_afu_recover(struct scsi_device *sdev,
1524 struct dk_cxlflash_recover_afu *recover)
1525 {
1526 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
1527 struct device *dev = &cfg->dev->dev;
1528 struct llun_info *lli = sdev->hostdata;
1529 struct afu *afu = cfg->afu;
1530 struct ctx_info *ctxi = NULL;
1531 struct mutex *mutex = &cfg->ctx_recovery_mutex;
1532 u64 ctxid = DECODE_CTXID(recover->context_id),
1533 rctxid = recover->context_id;
1534 long reg;
1535 int lretry = 20; /* up to 2 seconds */
1536 int rc = 0;
1537
1538 atomic_inc(&cfg->recovery_threads);
1539 rc = mutex_lock_interruptible(mutex);
1540 if (rc)
1541 goto out;
1542
1543 dev_dbg(dev, "%s: reason 0x%016llX rctxid=%016llX\n",
1544 __func__, recover->reason, rctxid);
1545
1546 retry:
1547 /* Ensure that this process is attached to the context */
1548 ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
1549 if (unlikely(!ctxi)) {
1550 dev_dbg(dev, "%s: Bad context! (%llu)\n", __func__, ctxid);
1551 rc = -EINVAL;
1552 goto out;
1553 }
1554
1555 if (ctxi->err_recovery_active) {
1556 retry_recover:
1557 rc = recover_context(cfg, ctxi);
1558 if (unlikely(rc)) {
1559 dev_err(dev, "%s: Recovery failed for context %llu (rc=%d)\n",
1560 __func__, ctxid, rc);
1561 if ((rc == -ENODEV) &&
1562 ((atomic_read(&cfg->recovery_threads) > 1) ||
1563 (lretry--))) {
1564 dev_dbg(dev, "%s: Going to try again!\n",
1565 __func__);
1566 mutex_unlock(mutex);
1567 msleep(100);
1568 rc = mutex_lock_interruptible(mutex);
1569 if (rc)
1570 goto out;
1571 goto retry_recover;
1572 }
1573
1574 goto out;
1575 }
1576
1577 ctxi->err_recovery_active = false;
1578 recover->context_id = ctxi->ctxid;
1579 recover->adap_fd = ctxi->lfd;
1580 recover->mmio_size = sizeof(afu->afu_map->hosts[0].harea);
1581 recover->hdr.return_flags |=
1582 DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET;
1583 goto out;
1584 }
1585
1586 /* Test if in error state */
1587 reg = readq_be(&afu->ctrl_map->mbox_r);
1588 if (reg == -1) {
1589 dev_dbg(dev, "%s: MMIO read fail! Wait for recovery...\n",
1590 __func__);
1591 mutex_unlock(&ctxi->mutex);
1592 ctxi = NULL;
1593 ssleep(1);
1594 rc = check_state(cfg);
1595 if (unlikely(rc))
1596 goto out;
1597 goto retry;
1598 }
1599
1600 dev_dbg(dev, "%s: MMIO working, no recovery required!\n", __func__);
1601 out:
1602 if (likely(ctxi))
1603 put_context(ctxi);
1604 mutex_unlock(mutex);
1605 atomic_dec_if_positive(&cfg->recovery_threads);
1606 return rc;
1607 }
1608
1609 /**
1610 * process_sense() - evaluates and processes sense data
1611 * @sdev: SCSI device associated with LUN.
1612 * @verify: Verify ioctl data structure.
1613 *
1614 * Return: 0 on success, -errno on failure
1615 */
1616 static int process_sense(struct scsi_device *sdev,
1617 struct dk_cxlflash_verify *verify)
1618 {
1619 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
1620 struct device *dev = &cfg->dev->dev;
1621 struct llun_info *lli = sdev->hostdata;
1622 struct glun_info *gli = lli->parent;
1623 u64 prev_lba = gli->max_lba;
1624 struct scsi_sense_hdr sshdr = { 0 };
1625 int rc = 0;
1626
1627 rc = scsi_normalize_sense((const u8 *)&verify->sense_data,
1628 DK_CXLFLASH_VERIFY_SENSE_LEN, &sshdr);
1629 if (!rc) {
1630 dev_err(dev, "%s: Failed to normalize sense data!\n", __func__);
1631 rc = -EINVAL;
1632 goto out;
1633 }
1634
1635 switch (sshdr.sense_key) {
1636 case NO_SENSE:
1637 case RECOVERED_ERROR:
1638 /* fall through */
1639 case NOT_READY:
1640 break;
1641 case UNIT_ATTENTION:
1642 switch (sshdr.asc) {
1643 case 0x29: /* Power on Reset or Device Reset */
1644 /* fall through */
1645 case 0x2A: /* Device settings/capacity changed */
1646 rc = read_cap16(sdev, lli);
1647 if (rc) {
1648 rc = -ENODEV;
1649 break;
1650 }
1651 if (prev_lba != gli->max_lba)
1652 dev_dbg(dev, "%s: Capacity changed old=%lld "
1653 "new=%lld\n", __func__, prev_lba,
1654 gli->max_lba);
1655 break;
1656 case 0x3F: /* Report LUNs changed, Rescan. */
1657 scsi_scan_host(cfg->host);
1658 break;
1659 default:
1660 rc = -EIO;
1661 break;
1662 }
1663 break;
1664 default:
1665 rc = -EIO;
1666 break;
1667 }
1668 out:
1669 dev_dbg(dev, "%s: sense_key %x asc %x ascq %x rc %d\n", __func__,
1670 sshdr.sense_key, sshdr.asc, sshdr.ascq, rc);
1671 return rc;
1672 }
1673
1674 /**
1675 * cxlflash_disk_verify() - verifies a LUN is the same and handle size changes
1676 * @sdev: SCSI device associated with LUN.
1677 * @verify: Verify ioctl data structure.
1678 *
1679 * Return: 0 on success, -errno on failure
1680 */
1681 static int cxlflash_disk_verify(struct scsi_device *sdev,
1682 struct dk_cxlflash_verify *verify)
1683 {
1684 int rc = 0;
1685 struct ctx_info *ctxi = NULL;
1686 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
1687 struct device *dev = &cfg->dev->dev;
1688 struct llun_info *lli = sdev->hostdata;
1689 struct glun_info *gli = lli->parent;
1690 struct sisl_rht_entry *rhte = NULL;
1691 res_hndl_t rhndl = verify->rsrc_handle;
1692 u64 ctxid = DECODE_CTXID(verify->context_id),
1693 rctxid = verify->context_id;
1694 u64 last_lba = 0;
1695
1696 dev_dbg(dev, "%s: ctxid=%llu rhndl=%016llX, hint=%016llX, "
1697 "flags=%016llX\n", __func__, ctxid, verify->rsrc_handle,
1698 verify->hint, verify->hdr.flags);
1699
1700 ctxi = get_context(cfg, rctxid, lli, 0);
1701 if (unlikely(!ctxi)) {
1702 dev_dbg(dev, "%s: Bad context! (%llu)\n", __func__, ctxid);
1703 rc = -EINVAL;
1704 goto out;
1705 }
1706
1707 rhte = get_rhte(ctxi, rhndl, lli);
1708 if (unlikely(!rhte)) {
1709 dev_dbg(dev, "%s: Bad resource handle! (%d)\n",
1710 __func__, rhndl);
1711 rc = -EINVAL;
1712 goto out;
1713 }
1714
1715 /*
1716 * Look at the hint/sense to see if it requires us to redrive
1717 * inquiry (i.e. the Unit attention is due to the WWN changing).
1718 */
1719 if (verify->hint & DK_CXLFLASH_VERIFY_HINT_SENSE) {
1720 rc = process_sense(sdev, verify);
1721 if (unlikely(rc)) {
1722 dev_err(dev, "%s: Failed to validate sense data (%d)\n",
1723 __func__, rc);
1724 goto out;
1725 }
1726 }
1727
1728 switch (gli->mode) {
1729 case MODE_PHYSICAL:
1730 last_lba = gli->max_lba;
1731 break;
1732 default:
1733 WARN(1, "Unsupported LUN mode!");
1734 }
1735
1736 verify->last_lba = last_lba;
1737
1738 out:
1739 if (likely(ctxi))
1740 put_context(ctxi);
1741 dev_dbg(dev, "%s: returning rc=%d llba=%llX\n",
1742 __func__, rc, verify->last_lba);
1743 return rc;
1744 }
1745
1746 /**
1747 * decode_ioctl() - translates an encoded ioctl to an easily identifiable string
1748 * @cmd: The ioctl command to decode.
1749 *
1750 * Return: A string identifying the decoded ioctl.
1751 */
1752 static char *decode_ioctl(int cmd)
1753 {
1754 switch (cmd) {
1755 case DK_CXLFLASH_ATTACH:
1756 return __stringify_1(DK_CXLFLASH_ATTACH);
1757 case DK_CXLFLASH_USER_DIRECT:
1758 return __stringify_1(DK_CXLFLASH_USER_DIRECT);
1759 case DK_CXLFLASH_RELEASE:
1760 return __stringify_1(DK_CXLFLASH_RELEASE);
1761 case DK_CXLFLASH_DETACH:
1762 return __stringify_1(DK_CXLFLASH_DETACH);
1763 case DK_CXLFLASH_VERIFY:
1764 return __stringify_1(DK_CXLFLASH_VERIFY);
1765 case DK_CXLFLASH_RECOVER_AFU:
1766 return __stringify_1(DK_CXLFLASH_RECOVER_AFU);
1767 case DK_CXLFLASH_MANAGE_LUN:
1768 return __stringify_1(DK_CXLFLASH_MANAGE_LUN);
1769 }
1770
1771 return "UNKNOWN";
1772 }
1773
1774 /**
1775 * cxlflash_disk_direct_open() - opens a direct (physical) disk
1776 * @sdev: SCSI device associated with LUN.
1777 * @arg: UDirect ioctl data structure.
1778 *
1779 * On successful return, the user is informed of the resource handle
1780 * to be used to identify the direct lun and the size (in blocks) of
1781 * the direct lun in last LBA format.
1782 *
1783 * Return: 0 on success, -errno on failure
1784 */
1785 static int cxlflash_disk_direct_open(struct scsi_device *sdev, void *arg)
1786 {
1787 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
1788 struct device *dev = &cfg->dev->dev;
1789 struct afu *afu = cfg->afu;
1790 struct llun_info *lli = sdev->hostdata;
1791 struct glun_info *gli = lli->parent;
1792
1793 struct dk_cxlflash_udirect *pphys = (struct dk_cxlflash_udirect *)arg;
1794
1795 u64 ctxid = DECODE_CTXID(pphys->context_id),
1796 rctxid = pphys->context_id;
1797 u64 lun_size = 0;
1798 u64 last_lba = 0;
1799 u64 rsrc_handle = -1;
1800 u32 port = CHAN2PORT(sdev->channel);
1801
1802 int rc = 0;
1803
1804 struct ctx_info *ctxi = NULL;
1805 struct sisl_rht_entry *rhte = NULL;
1806
1807 pr_debug("%s: ctxid=%llu ls=0x%llx\n", __func__, ctxid, lun_size);
1808
1809 rc = cxlflash_lun_attach(gli, MODE_PHYSICAL, false);
1810 if (unlikely(rc)) {
1811 dev_dbg(dev, "%s: Failed to attach to LUN! (PHYSICAL)\n",
1812 __func__);
1813 goto out;
1814 }
1815
1816 ctxi = get_context(cfg, rctxid, lli, 0);
1817 if (unlikely(!ctxi)) {
1818 dev_dbg(dev, "%s: Bad context! (%llu)\n", __func__, ctxid);
1819 rc = -EINVAL;
1820 goto err1;
1821 }
1822
1823 rhte = rhte_checkout(ctxi, lli);
1824 if (unlikely(!rhte)) {
1825 dev_dbg(dev, "%s: too many opens for this context\n", __func__);
1826 rc = -EMFILE; /* too many opens */
1827 goto err1;
1828 }
1829
1830 rsrc_handle = (rhte - ctxi->rht_start);
1831
1832 rht_format1(rhte, lli->lun_id[sdev->channel], ctxi->rht_perms, port);
1833 cxlflash_afu_sync(afu, ctxid, rsrc_handle, AFU_LW_SYNC);
1834
1835 last_lba = gli->max_lba;
1836 pphys->hdr.return_flags = 0;
1837 pphys->last_lba = last_lba;
1838 pphys->rsrc_handle = rsrc_handle;
1839
1840 out:
1841 if (likely(ctxi))
1842 put_context(ctxi);
1843 dev_dbg(dev, "%s: returning handle 0x%llx rc=%d llba %lld\n",
1844 __func__, rsrc_handle, rc, last_lba);
1845 return rc;
1846
1847 err1:
1848 cxlflash_lun_detach(gli);
1849 goto out;
1850 }
1851
1852 /**
1853 * ioctl_common() - common IOCTL handler for driver
1854 * @sdev: SCSI device associated with LUN.
1855 * @cmd: IOCTL command.
1856 *
1857 * Handles common fencing operations that are valid for multiple ioctls. Always
1858 * allow through ioctls that are cleanup oriented in nature, even when operating
1859 * in a failed/terminating state.
1860 *
1861 * Return: 0 on success, -errno on failure
1862 */
1863 static int ioctl_common(struct scsi_device *sdev, int cmd)
1864 {
1865 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
1866 struct device *dev = &cfg->dev->dev;
1867 struct llun_info *lli = sdev->hostdata;
1868 int rc = 0;
1869
1870 if (unlikely(!lli)) {
1871 dev_dbg(dev, "%s: Unknown LUN\n", __func__);
1872 rc = -EINVAL;
1873 goto out;
1874 }
1875
1876 rc = check_state(cfg);
1877 if (unlikely(rc) && (cfg->state == STATE_FAILTERM)) {
1878 switch (cmd) {
1879 case DK_CXLFLASH_RELEASE:
1880 case DK_CXLFLASH_DETACH:
1881 dev_dbg(dev, "%s: Command override! (%d)\n",
1882 __func__, rc);
1883 rc = 0;
1884 break;
1885 }
1886 }
1887 out:
1888 return rc;
1889 }
1890
1891 /**
1892 * cxlflash_ioctl() - IOCTL handler for driver
1893 * @sdev: SCSI device associated with LUN.
1894 * @cmd: IOCTL command.
1895 * @arg: Userspace ioctl data structure.
1896 *
1897 * Return: 0 on success, -errno on failure
1898 */
1899 int cxlflash_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
1900 {
1901 typedef int (*sioctl) (struct scsi_device *, void *);
1902
1903 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
1904 struct device *dev = &cfg->dev->dev;
1905 struct afu *afu = cfg->afu;
1906 struct dk_cxlflash_hdr *hdr;
1907 char buf[sizeof(union cxlflash_ioctls)];
1908 size_t size = 0;
1909 bool known_ioctl = false;
1910 int idx;
1911 int rc = 0;
1912 struct Scsi_Host *shost = sdev->host;
1913 sioctl do_ioctl = NULL;
1914
1915 static const struct {
1916 size_t size;
1917 sioctl ioctl;
1918 } ioctl_tbl[] = { /* NOTE: order matters here */
1919 {sizeof(struct dk_cxlflash_attach), (sioctl)cxlflash_disk_attach},
1920 {sizeof(struct dk_cxlflash_udirect), cxlflash_disk_direct_open},
1921 {sizeof(struct dk_cxlflash_release), (sioctl)cxlflash_disk_release},
1922 {sizeof(struct dk_cxlflash_detach), (sioctl)cxlflash_disk_detach},
1923 {sizeof(struct dk_cxlflash_verify), (sioctl)cxlflash_disk_verify},
1924 {sizeof(struct dk_cxlflash_recover_afu), (sioctl)cxlflash_afu_recover},
1925 {sizeof(struct dk_cxlflash_manage_lun), (sioctl)cxlflash_manage_lun},
1926 };
1927
1928 /* Restrict command set to physical support only for internal LUN */
1929 if (afu->internal_lun)
1930 switch (cmd) {
1931 case DK_CXLFLASH_RELEASE:
1932 dev_dbg(dev, "%s: %s not supported for lun_mode=%d\n",
1933 __func__, decode_ioctl(cmd), afu->internal_lun);
1934 rc = -EINVAL;
1935 goto cxlflash_ioctl_exit;
1936 }
1937
1938 switch (cmd) {
1939 case DK_CXLFLASH_ATTACH:
1940 case DK_CXLFLASH_USER_DIRECT:
1941 case DK_CXLFLASH_RELEASE:
1942 case DK_CXLFLASH_DETACH:
1943 case DK_CXLFLASH_VERIFY:
1944 case DK_CXLFLASH_RECOVER_AFU:
1945 dev_dbg(dev, "%s: %s (%08X) on dev(%d/%d/%d/%llu)\n",
1946 __func__, decode_ioctl(cmd), cmd, shost->host_no,
1947 sdev->channel, sdev->id, sdev->lun);
1948 rc = ioctl_common(sdev, cmd);
1949 if (unlikely(rc))
1950 goto cxlflash_ioctl_exit;
1951
1952 /* fall through */
1953
1954 case DK_CXLFLASH_MANAGE_LUN:
1955 known_ioctl = true;
1956 idx = _IOC_NR(cmd) - _IOC_NR(DK_CXLFLASH_ATTACH);
1957 size = ioctl_tbl[idx].size;
1958 do_ioctl = ioctl_tbl[idx].ioctl;
1959
1960 if (likely(do_ioctl))
1961 break;
1962
1963 /* fall through */
1964 default:
1965 rc = -EINVAL;
1966 goto cxlflash_ioctl_exit;
1967 }
1968
1969 if (unlikely(copy_from_user(&buf, arg, size))) {
1970 dev_err(dev, "%s: copy_from_user() fail! "
1971 "size=%lu cmd=%d (%s) arg=%p\n",
1972 __func__, size, cmd, decode_ioctl(cmd), arg);
1973 rc = -EFAULT;
1974 goto cxlflash_ioctl_exit;
1975 }
1976
1977 hdr = (struct dk_cxlflash_hdr *)&buf;
1978 if (hdr->version != DK_CXLFLASH_VERSION_0) {
1979 dev_dbg(dev, "%s: Version %u not supported for %s\n",
1980 __func__, hdr->version, decode_ioctl(cmd));
1981 rc = -EINVAL;
1982 goto cxlflash_ioctl_exit;
1983 }
1984
1985 if (hdr->rsvd[0] || hdr->rsvd[1] || hdr->rsvd[2] || hdr->return_flags) {
1986 dev_dbg(dev, "%s: Reserved/rflags populated!\n", __func__);
1987 rc = -EINVAL;
1988 goto cxlflash_ioctl_exit;
1989 }
1990
1991 rc = do_ioctl(sdev, (void *)&buf);
1992 if (likely(!rc))
1993 if (unlikely(copy_to_user(arg, &buf, size))) {
1994 dev_err(dev, "%s: copy_to_user() fail! "
1995 "size=%lu cmd=%d (%s) arg=%p\n",
1996 __func__, size, cmd, decode_ioctl(cmd), arg);
1997 rc = -EFAULT;
1998 }
1999
2000 /* fall through to exit */
2001
2002 cxlflash_ioctl_exit:
2003 if (unlikely(rc && known_ioctl))
2004 dev_err(dev, "%s: ioctl %s (%08X) on dev(%d/%d/%d/%llu) "
2005 "returned rc %d\n", __func__,
2006 decode_ioctl(cmd), cmd, shost->host_no,
2007 sdev->channel, sdev->id, sdev->lun, rc);
2008 else
2009 dev_dbg(dev, "%s: ioctl %s (%08X) on dev(%d/%d/%d/%llu) "
2010 "returned rc %d\n", __func__, decode_ioctl(cmd),
2011 cmd, shost->host_no, sdev->channel, sdev->id,
2012 sdev->lun, rc);
2013 return rc;
2014 }
This page took 0.069436 seconds and 4 git commands to generate.