2 * CXL Flash Device Driver
4 * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
5 * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
7 * Copyright (C) 2015 IBM Corporation
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
15 #include <linux/delay.h>
16 #include <linux/file.h>
17 #include <linux/syscalls.h>
19 #include <asm/unaligned.h>
21 #include <scsi/scsi.h>
22 #include <scsi/scsi_host.h>
23 #include <scsi/scsi_cmnd.h>
24 #include <scsi/scsi_eh.h>
25 #include <uapi/scsi/cxlflash_ioctl.h>
30 #include "superpipe.h"
32 struct cxlflash_global global
;
35 * marshal_rele_to_resize() - translate release to resize structure
36 * @rele: Source structure from which to translate/copy.
37 * @resize: Destination structure for the translate/copy.
39 static void marshal_rele_to_resize(struct dk_cxlflash_release
*release
,
40 struct dk_cxlflash_resize
*resize
)
42 resize
->hdr
= release
->hdr
;
43 resize
->context_id
= release
->context_id
;
44 resize
->rsrc_handle
= release
->rsrc_handle
;
48 * marshal_det_to_rele() - translate detach to release structure
49 * @detach: Destination structure for the translate/copy.
50 * @rele: Source structure from which to translate/copy.
52 static void marshal_det_to_rele(struct dk_cxlflash_detach
*detach
,
53 struct dk_cxlflash_release
*release
)
55 release
->hdr
= detach
->hdr
;
56 release
->context_id
= detach
->context_id
;
60 * cxlflash_free_errpage() - frees resources associated with global error page
62 void cxlflash_free_errpage(void)
65 mutex_lock(&global
.mutex
);
66 if (global
.err_page
) {
67 __free_page(global
.err_page
);
68 global
.err_page
= NULL
;
70 mutex_unlock(&global
.mutex
);
74 * cxlflash_stop_term_user_contexts() - stops/terminates known user contexts
75 * @cfg: Internal structure associated with the host.
77 * When the host needs to go down, all users must be quiesced and their
78 * memory freed. This is accomplished by putting the contexts in error
79 * state which will notify the user and let them 'drive' the tear-down.
80 * Meanwhile, this routine camps until all user contexts have been removed.
82 void cxlflash_stop_term_user_contexts(struct cxlflash_cfg
*cfg
)
84 struct device
*dev
= &cfg
->dev
->dev
;
87 cxlflash_mark_contexts_error(cfg
);
92 for (i
= 0; i
< MAX_CONTEXT
; i
++)
93 if (cfg
->ctx_tbl
[i
]) {
98 if (!found
&& list_empty(&cfg
->ctx_err_recovery
))
101 dev_dbg(dev
, "%s: Wait for user contexts to quiesce...\n",
103 wake_up_all(&cfg
->reset_waitq
);
109 * find_error_context() - locates a context by cookie on the error recovery list
110 * @cfg: Internal structure associated with the host.
111 * @rctxid: Desired context by id.
112 * @file: Desired context by file.
114 * Return: Found context on success, NULL on failure
116 static struct ctx_info
*find_error_context(struct cxlflash_cfg
*cfg
, u64 rctxid
,
119 struct ctx_info
*ctxi
;
121 list_for_each_entry(ctxi
, &cfg
->ctx_err_recovery
, list
)
122 if ((ctxi
->ctxid
== rctxid
) || (ctxi
->file
== file
))
129 * get_context() - obtains a validated and locked context reference
130 * @cfg: Internal structure associated with the host.
131 * @rctxid: Desired context (raw, un-decoded format).
132 * @arg: LUN information or file associated with request.
133 * @ctx_ctrl: Control information to 'steer' desired lookup.
135 * NOTE: despite the name pid, in linux, current->pid actually refers
136 * to the lightweight process id (tid) and can change if the process is
137 * multi threaded. The tgid remains constant for the process and only changes
138 * when the process of fork. For all intents and purposes, think of tgid
139 * as a pid in the traditional sense.
141 * Return: Validated context on success, NULL on failure
143 struct ctx_info
*get_context(struct cxlflash_cfg
*cfg
, u64 rctxid
,
144 void *arg
, enum ctx_ctrl ctx_ctrl
)
146 struct device
*dev
= &cfg
->dev
->dev
;
147 struct ctx_info
*ctxi
= NULL
;
148 struct lun_access
*lun_access
= NULL
;
149 struct file
*file
= NULL
;
150 struct llun_info
*lli
= arg
;
151 u64 ctxid
= DECODE_CTXID(rctxid
);
153 pid_t pid
= current
->tgid
, ctxpid
= 0;
155 if (ctx_ctrl
& CTX_CTRL_FILE
) {
157 file
= (struct file
*)arg
;
160 if (ctx_ctrl
& CTX_CTRL_CLONE
)
161 pid
= current
->parent
->tgid
;
163 if (likely(ctxid
< MAX_CONTEXT
)) {
165 rc
= mutex_lock_interruptible(&cfg
->ctx_tbl_list_mutex
);
169 ctxi
= cfg
->ctx_tbl
[ctxid
];
171 if ((file
&& (ctxi
->file
!= file
)) ||
172 (!file
&& (ctxi
->ctxid
!= rctxid
)))
175 if ((ctx_ctrl
& CTX_CTRL_ERR
) ||
176 (!ctxi
&& (ctx_ctrl
& CTX_CTRL_ERR_FALLBACK
)))
177 ctxi
= find_error_context(cfg
, rctxid
, file
);
179 mutex_unlock(&cfg
->ctx_tbl_list_mutex
);
184 * Need to acquire ownership of the context while still
185 * under the table/list lock to serialize with a remove
186 * thread. Use the 'try' to avoid stalling the
187 * table/list lock for a single context.
189 * Note that the lock order is:
191 * cfg->ctx_tbl_list_mutex -> ctxi->mutex
193 * Therefore release ctx_tbl_list_mutex before retrying.
195 rc
= mutex_trylock(&ctxi
->mutex
);
196 mutex_unlock(&cfg
->ctx_tbl_list_mutex
);
198 break; /* got the context's lock! */
205 if (likely(!(ctx_ctrl
& CTX_CTRL_NOPID
)))
210 list_for_each_entry(lun_access
, &ctxi
->luns
, list
)
211 if (lun_access
->lli
== lli
)
218 dev_dbg(dev
, "%s: rctxid=%016llX ctxinfo=%p ctxpid=%u pid=%u "
219 "ctx_ctrl=%u\n", __func__
, rctxid
, ctxi
, ctxpid
, pid
,
225 mutex_unlock(&ctxi
->mutex
);
231 * put_context() - release a context that was retrieved from get_context()
232 * @ctxi: Context to release.
234 * For now, releasing the context equates to unlocking it's mutex.
236 void put_context(struct ctx_info
*ctxi
)
238 mutex_unlock(&ctxi
->mutex
);
242 * afu_attach() - attach a context to the AFU
243 * @cfg: Internal structure associated with the host.
244 * @ctxi: Context to attach.
246 * Upon setting the context capabilities, they must be confirmed with
247 * a read back operation as the context might have been closed since
248 * the mailbox was unlocked. When this occurs, registration is failed.
250 * Return: 0 on success, -errno on failure
252 static int afu_attach(struct cxlflash_cfg
*cfg
, struct ctx_info
*ctxi
)
254 struct device
*dev
= &cfg
->dev
->dev
;
255 struct afu
*afu
= cfg
->afu
;
256 struct sisl_ctrl_map __iomem
*ctrl_map
= ctxi
->ctrl_map
;
260 /* Unlock cap and restrict user to read/write cmds in translated mode */
261 readq_be(&ctrl_map
->mbox_r
);
262 val
= (SISL_CTX_CAP_READ_CMD
| SISL_CTX_CAP_WRITE_CMD
);
263 writeq_be(val
, &ctrl_map
->ctx_cap
);
264 val
= readq_be(&ctrl_map
->ctx_cap
);
265 if (val
!= (SISL_CTX_CAP_READ_CMD
| SISL_CTX_CAP_WRITE_CMD
)) {
266 dev_err(dev
, "%s: ctx may be closed val=%016llX\n",
272 /* Set up MMIO registers pointing to the RHT */
273 writeq_be((u64
)ctxi
->rht_start
, &ctrl_map
->rht_start
);
274 val
= SISL_RHT_CNT_ID((u64
)MAX_RHT_PER_CONTEXT
, (u64
)(afu
->ctx_hndl
));
275 writeq_be(val
, &ctrl_map
->rht_cnt_id
);
277 dev_dbg(dev
, "%s: returning rc=%d\n", __func__
, rc
);
282 * read_cap16() - issues a SCSI READ_CAP16 command
283 * @sdev: SCSI device associated with LUN.
284 * @lli: LUN destined for capacity request.
286 * Return: 0 on success, -errno on failure
288 static int read_cap16(struct scsi_device
*sdev
, struct llun_info
*lli
)
290 struct cxlflash_cfg
*cfg
= (struct cxlflash_cfg
*)sdev
->host
->hostdata
;
291 struct device
*dev
= &cfg
->dev
->dev
;
292 struct glun_info
*gli
= lli
->parent
;
295 u8
*sense_buf
= NULL
;
299 u32 to
= CMD_TIMEOUT
* HZ
;
302 cmd_buf
= kzalloc(CMD_BUFSIZE
, GFP_KERNEL
);
303 scsi_cmd
= kzalloc(MAX_COMMAND_SIZE
, GFP_KERNEL
);
304 sense_buf
= kzalloc(SCSI_SENSE_BUFFERSIZE
, GFP_KERNEL
);
305 if (unlikely(!cmd_buf
|| !scsi_cmd
|| !sense_buf
)) {
310 scsi_cmd
[0] = SERVICE_ACTION_IN_16
; /* read cap(16) */
311 scsi_cmd
[1] = SAI_READ_CAPACITY_16
; /* service action */
312 put_unaligned_be32(CMD_BUFSIZE
, &scsi_cmd
[10]);
314 dev_dbg(dev
, "%s: %ssending cmd(0x%x)\n", __func__
,
315 retry_cnt
? "re" : "", scsi_cmd
[0]);
317 result
= scsi_execute(sdev
, scsi_cmd
, DMA_FROM_DEVICE
, cmd_buf
,
318 CMD_BUFSIZE
, sense_buf
, to
, CMD_RETRIES
, 0, NULL
);
320 if (driver_byte(result
) == DRIVER_SENSE
) {
321 result
&= ~(0xFF<<24); /* DRIVER_SENSE is not an error */
322 if (result
& SAM_STAT_CHECK_CONDITION
) {
323 struct scsi_sense_hdr sshdr
;
325 scsi_normalize_sense(sense_buf
, SCSI_SENSE_BUFFERSIZE
,
327 switch (sshdr
.sense_key
) {
329 case RECOVERED_ERROR
:
332 result
&= ~SAM_STAT_CHECK_CONDITION
;
336 case 0x29: /* Power on Reset or Device Reset */
338 case 0x2A: /* Device capacity changed */
339 case 0x3F: /* Report LUNs changed */
340 /* Retry the command once more */
341 if (retry_cnt
++ < 1) {
356 dev_err(dev
, "%s: command failed, result=0x%x\n",
363 * Read cap was successful, grab values from the buffer;
364 * note that we don't need to worry about unaligned access
365 * as the buffer is allocated on an aligned boundary.
367 mutex_lock(&gli
->mutex
);
368 gli
->max_lba
= be64_to_cpu(*((__be64
*)&cmd_buf
[0]));
369 gli
->blk_len
= be32_to_cpu(*((__be32
*)&cmd_buf
[8]));
370 mutex_unlock(&gli
->mutex
);
377 dev_dbg(dev
, "%s: maxlba=%lld blklen=%d rc=%d\n",
378 __func__
, gli
->max_lba
, gli
->blk_len
, rc
);
383 * get_rhte() - obtains validated resource handle table entry reference
384 * @ctxi: Context owning the resource handle.
385 * @rhndl: Resource handle associated with entry.
386 * @lli: LUN associated with request.
388 * Return: Validated RHTE on success, NULL on failure
390 struct sisl_rht_entry
*get_rhte(struct ctx_info
*ctxi
, res_hndl_t rhndl
,
391 struct llun_info
*lli
)
393 struct sisl_rht_entry
*rhte
= NULL
;
395 if (unlikely(!ctxi
->rht_start
)) {
396 pr_debug("%s: Context does not have allocated RHT!\n",
401 if (unlikely(rhndl
>= MAX_RHT_PER_CONTEXT
)) {
402 pr_debug("%s: Bad resource handle! (%d)\n", __func__
, rhndl
);
406 if (unlikely(ctxi
->rht_lun
[rhndl
] != lli
)) {
407 pr_debug("%s: Bad resource handle LUN! (%d)\n",
412 rhte
= &ctxi
->rht_start
[rhndl
];
413 if (unlikely(rhte
->nmask
== 0)) {
414 pr_debug("%s: Unopened resource handle! (%d)\n",
425 * rhte_checkout() - obtains free/empty resource handle table entry
426 * @ctxi: Context owning the resource handle.
427 * @lli: LUN associated with request.
429 * Return: Free RHTE on success, NULL on failure
431 struct sisl_rht_entry
*rhte_checkout(struct ctx_info
*ctxi
,
432 struct llun_info
*lli
)
434 struct sisl_rht_entry
*rhte
= NULL
;
437 /* Find a free RHT entry */
438 for (i
= 0; i
< MAX_RHT_PER_CONTEXT
; i
++)
439 if (ctxi
->rht_start
[i
].nmask
== 0) {
440 rhte
= &ctxi
->rht_start
[i
];
446 ctxi
->rht_lun
[i
] = lli
;
448 pr_debug("%s: returning rhte=%p (%d)\n", __func__
, rhte
, i
);
453 * rhte_checkin() - releases a resource handle table entry
454 * @ctxi: Context owning the resource handle.
455 * @rhte: RHTE to release.
457 void rhte_checkin(struct ctx_info
*ctxi
,
458 struct sisl_rht_entry
*rhte
)
460 u32 rsrc_handle
= rhte
- ctxi
->rht_start
;
465 ctxi
->rht_lun
[rsrc_handle
] = NULL
;
466 ctxi
->rht_needs_ws
[rsrc_handle
] = false;
470 * rhte_format1() - populates a RHTE for format 1
471 * @rhte: RHTE to populate.
472 * @lun_id: LUN ID of LUN associated with RHTE.
473 * @perm: Desired permissions for RHTE.
474 * @port_sel: Port selection mask
476 static void rht_format1(struct sisl_rht_entry
*rhte
, u64 lun_id
, u32 perm
,
480 * Populate the Format 1 RHT entry for direct access (physical
481 * LUN) using the synchronization sequence defined in the
482 * SISLite specification.
484 struct sisl_rht_entry_f1 dummy
= { 0 };
485 struct sisl_rht_entry_f1
*rhte_f1
= (struct sisl_rht_entry_f1
*)rhte
;
487 memset(rhte_f1
, 0, sizeof(*rhte_f1
));
488 rhte_f1
->fp
= SISL_RHT_FP(1U, 0);
489 dma_wmb(); /* Make setting of format bit visible */
491 rhte_f1
->lun_id
= lun_id
;
492 dma_wmb(); /* Make setting of LUN id visible */
495 * Use a dummy RHT Format 1 entry to build the second dword
496 * of the entry that must be populated in a single write when
497 * enabled (valid bit set to TRUE).
500 dummy
.fp
= SISL_RHT_FP(1U, perm
);
501 dummy
.port_sel
= port_sel
;
502 rhte_f1
->dw
= dummy
.dw
;
504 dma_wmb(); /* Make remaining RHT entry fields visible */
508 * cxlflash_lun_attach() - attaches a user to a LUN and manages the LUN's mode
509 * @gli: LUN to attach.
510 * @mode: Desired mode of the LUN.
511 * @locked: Mutex status on current thread.
513 * Return: 0 on success, -errno on failure
515 int cxlflash_lun_attach(struct glun_info
*gli
, enum lun_mode mode
, bool locked
)
520 mutex_lock(&gli
->mutex
);
522 if (gli
->mode
== MODE_NONE
)
524 else if (gli
->mode
!= mode
) {
525 pr_debug("%s: LUN operating in mode %d, requested mode %d\n",
526 __func__
, gli
->mode
, mode
);
532 WARN_ON(gli
->users
<= 0);
534 pr_debug("%s: Returning rc=%d gli->mode=%u gli->users=%u\n",
535 __func__
, rc
, gli
->mode
, gli
->users
);
537 mutex_unlock(&gli
->mutex
);
542 * cxlflash_lun_detach() - detaches a user from a LUN and resets the LUN's mode
543 * @gli: LUN to detach.
545 * When resetting the mode, terminate block allocation resources as they
546 * are no longer required (service is safe to call even when block allocation
547 * resources were not present - such as when transitioning from physical mode).
548 * These resources will be reallocated when needed (subsequent transition to
551 void cxlflash_lun_detach(struct glun_info
*gli
)
553 mutex_lock(&gli
->mutex
);
554 WARN_ON(gli
->mode
== MODE_NONE
);
555 if (--gli
->users
== 0) {
556 gli
->mode
= MODE_NONE
;
557 cxlflash_ba_terminate(&gli
->blka
.ba_lun
);
559 pr_debug("%s: gli->users=%u\n", __func__
, gli
->users
);
560 WARN_ON(gli
->users
< 0);
561 mutex_unlock(&gli
->mutex
);
565 * _cxlflash_disk_release() - releases the specified resource entry
566 * @sdev: SCSI device associated with LUN.
567 * @ctxi: Context owning resources.
568 * @release: Release ioctl data structure.
570 * For LUNs in virtual mode, the virtual LUN associated with the specified
571 * resource handle is resized to 0 prior to releasing the RHTE. Note that the
572 * AFU sync should _not_ be performed when the context is sitting on the error
573 * recovery list. A context on the error recovery list is not known to the AFU
574 * due to reset. When the context is recovered, it will be reattached and made
575 * known again to the AFU.
577 * Return: 0 on success, -errno on failure
579 int _cxlflash_disk_release(struct scsi_device
*sdev
,
580 struct ctx_info
*ctxi
,
581 struct dk_cxlflash_release
*release
)
583 struct cxlflash_cfg
*cfg
= (struct cxlflash_cfg
*)sdev
->host
->hostdata
;
584 struct device
*dev
= &cfg
->dev
->dev
;
585 struct llun_info
*lli
= sdev
->hostdata
;
586 struct glun_info
*gli
= lli
->parent
;
587 struct afu
*afu
= cfg
->afu
;
588 bool put_ctx
= false;
590 struct dk_cxlflash_resize size
;
591 res_hndl_t rhndl
= release
->rsrc_handle
;
594 u64 ctxid
= DECODE_CTXID(release
->context_id
),
595 rctxid
= release
->context_id
;
597 struct sisl_rht_entry
*rhte
;
598 struct sisl_rht_entry_f1
*rhte_f1
;
600 dev_dbg(dev
, "%s: ctxid=%llu rhndl=0x%llx gli->mode=%u gli->users=%u\n",
601 __func__
, ctxid
, release
->rsrc_handle
, gli
->mode
, gli
->users
);
604 ctxi
= get_context(cfg
, rctxid
, lli
, CTX_CTRL_ERR_FALLBACK
);
605 if (unlikely(!ctxi
)) {
606 dev_dbg(dev
, "%s: Bad context! (%llu)\n",
615 rhte
= get_rhte(ctxi
, rhndl
, lli
);
616 if (unlikely(!rhte
)) {
617 dev_dbg(dev
, "%s: Bad resource handle! (%d)\n",
624 * Resize to 0 for virtual LUNS by setting the size
625 * to 0. This will clear LXT_START and LXT_CNT fields
626 * in the RHT entry and properly sync with the AFU.
628 * Afterwards we clear the remaining fields.
632 marshal_rele_to_resize(release
, &size
);
634 rc
= _cxlflash_vlun_resize(sdev
, ctxi
, &size
);
636 dev_dbg(dev
, "%s: resize failed rc %d\n", __func__
, rc
);
643 * Clear the Format 1 RHT entry for direct access
644 * (physical LUN) using the synchronization sequence
645 * defined in the SISLite specification.
647 rhte_f1
= (struct sisl_rht_entry_f1
*)rhte
;
650 dma_wmb(); /* Make revocation of RHT entry visible */
653 dma_wmb(); /* Make clearing of LUN id visible */
656 dma_wmb(); /* Make RHT entry bottom-half clearing visible */
658 if (!ctxi
->err_recovery_active
)
659 cxlflash_afu_sync(afu
, ctxid
, rhndl
, AFU_HW_SYNC
);
662 WARN(1, "Unsupported LUN mode!");
666 rhte_checkin(ctxi
, rhte
);
667 cxlflash_lun_detach(gli
);
672 dev_dbg(dev
, "%s: returning rc=%d\n", __func__
, rc
);
676 int cxlflash_disk_release(struct scsi_device
*sdev
,
677 struct dk_cxlflash_release
*release
)
679 return _cxlflash_disk_release(sdev
, NULL
, release
);
683 * destroy_context() - releases a context
684 * @cfg: Internal structure associated with the host.
685 * @ctxi: Context to release.
687 * Note that the rht_lun member of the context was cut from a single
688 * allocation when the context was created and therefore does not need
689 * to be explicitly freed. Also note that we conditionally check for the
690 * existence of the context control map before clearing the RHT registers
691 * and context capabilities because it is possible to destroy a context
692 * while the context is in the error state (previous mapping was removed
693 * [so we don't have to worry about clearing] and context is waiting for
696 static void destroy_context(struct cxlflash_cfg
*cfg
,
697 struct ctx_info
*ctxi
)
699 struct afu
*afu
= cfg
->afu
;
701 WARN_ON(!list_empty(&ctxi
->luns
));
703 /* Clear RHT registers and drop all capabilities for this context */
704 if (afu
->afu_map
&& ctxi
->ctrl_map
) {
705 writeq_be(0, &ctxi
->ctrl_map
->rht_start
);
706 writeq_be(0, &ctxi
->ctrl_map
->rht_cnt_id
);
707 writeq_be(0, &ctxi
->ctrl_map
->ctx_cap
);
710 /* Free memory associated with context */
711 free_page((ulong
)ctxi
->rht_start
);
712 kfree(ctxi
->rht_needs_ws
);
713 kfree(ctxi
->rht_lun
);
715 atomic_dec_if_positive(&cfg
->num_user_contexts
);
719 * create_context() - allocates and initializes a context
720 * @cfg: Internal structure associated with the host.
721 * @ctx: Previously obtained CXL context reference.
722 * @ctxid: Previously obtained process element associated with CXL context.
723 * @adap_fd: Previously obtained adapter fd associated with CXL context.
724 * @file: Previously obtained file associated with CXL context.
725 * @perms: User-specified permissions.
727 * The context's mutex is locked when an allocated context is returned.
729 * Return: Allocated context on success, NULL on failure
731 static struct ctx_info
*create_context(struct cxlflash_cfg
*cfg
,
732 struct cxl_context
*ctx
, int ctxid
,
733 int adap_fd
, struct file
*file
,
736 struct device
*dev
= &cfg
->dev
->dev
;
737 struct afu
*afu
= cfg
->afu
;
738 struct ctx_info
*ctxi
= NULL
;
739 struct llun_info
**lli
= NULL
;
741 struct sisl_rht_entry
*rhte
;
743 ctxi
= kzalloc(sizeof(*ctxi
), GFP_KERNEL
);
744 lli
= kzalloc((MAX_RHT_PER_CONTEXT
* sizeof(*lli
)), GFP_KERNEL
);
745 ws
= kzalloc((MAX_RHT_PER_CONTEXT
* sizeof(*ws
)), GFP_KERNEL
);
746 if (unlikely(!ctxi
|| !lli
|| !ws
)) {
747 dev_err(dev
, "%s: Unable to allocate context!\n", __func__
);
751 rhte
= (struct sisl_rht_entry
*)get_zeroed_page(GFP_KERNEL
);
752 if (unlikely(!rhte
)) {
753 dev_err(dev
, "%s: Unable to allocate RHT!\n", __func__
);
758 ctxi
->rht_needs_ws
= ws
;
759 ctxi
->rht_start
= rhte
;
760 ctxi
->rht_perms
= perms
;
762 ctxi
->ctrl_map
= &afu
->afu_map
->ctrls
[ctxid
].ctrl
;
763 ctxi
->ctxid
= ENCODE_CTXID(ctxi
, ctxid
);
765 ctxi
->pid
= current
->tgid
; /* tgid = pid */
768 mutex_init(&ctxi
->mutex
);
769 INIT_LIST_HEAD(&ctxi
->luns
);
770 INIT_LIST_HEAD(&ctxi
->list
); /* initialize for list_empty() */
772 atomic_inc(&cfg
->num_user_contexts
);
773 mutex_lock(&ctxi
->mutex
);
786 * _cxlflash_disk_detach() - detaches a LUN from a context
787 * @sdev: SCSI device associated with LUN.
788 * @ctxi: Context owning resources.
789 * @detach: Detach ioctl data structure.
791 * As part of the detach, all per-context resources associated with the LUN
792 * are cleaned up. When detaching the last LUN for a context, the context
793 * itself is cleaned up and released.
795 * Return: 0 on success, -errno on failure
797 static int _cxlflash_disk_detach(struct scsi_device
*sdev
,
798 struct ctx_info
*ctxi
,
799 struct dk_cxlflash_detach
*detach
)
801 struct cxlflash_cfg
*cfg
= (struct cxlflash_cfg
*)sdev
->host
->hostdata
;
802 struct device
*dev
= &cfg
->dev
->dev
;
803 struct llun_info
*lli
= sdev
->hostdata
;
804 struct lun_access
*lun_access
, *t
;
805 struct dk_cxlflash_release rel
;
806 bool put_ctx
= false;
811 u64 ctxid
= DECODE_CTXID(detach
->context_id
),
812 rctxid
= detach
->context_id
;
814 dev_dbg(dev
, "%s: ctxid=%llu\n", __func__
, ctxid
);
817 ctxi
= get_context(cfg
, rctxid
, lli
, CTX_CTRL_ERR_FALLBACK
);
818 if (unlikely(!ctxi
)) {
819 dev_dbg(dev
, "%s: Bad context! (%llu)\n",
828 /* Cleanup outstanding resources tied to this LUN */
830 marshal_det_to_rele(detach
, &rel
);
831 for (i
= 0; i
< MAX_RHT_PER_CONTEXT
; i
++) {
832 if (ctxi
->rht_lun
[i
] == lli
) {
834 _cxlflash_disk_release(sdev
, ctxi
, &rel
);
837 /* No need to loop further if we're done */
838 if (ctxi
->rht_out
== 0)
843 /* Take our LUN out of context, free the node */
844 list_for_each_entry_safe(lun_access
, t
, &ctxi
->luns
, list
)
845 if (lun_access
->lli
== lli
) {
846 list_del(&lun_access
->list
);
852 /* Tear down context following last LUN cleanup */
853 if (list_empty(&ctxi
->luns
)) {
854 ctxi
->unavail
= true;
855 mutex_unlock(&ctxi
->mutex
);
856 mutex_lock(&cfg
->ctx_tbl_list_mutex
);
857 mutex_lock(&ctxi
->mutex
);
859 /* Might not have been in error list so conditionally remove */
860 if (!list_empty(&ctxi
->list
))
861 list_del(&ctxi
->list
);
862 cfg
->ctx_tbl
[ctxid
] = NULL
;
863 mutex_unlock(&cfg
->ctx_tbl_list_mutex
);
864 mutex_unlock(&ctxi
->mutex
);
867 destroy_context(cfg
, ctxi
);
872 * As a last step, clean up external resources when not
873 * already on an external cleanup thread, i.e.: close(adap_fd).
875 * NOTE: this will free up the context from the CXL services,
876 * allowing it to dole out the same context_id on a future
877 * (or even currently in-flight) disk_attach operation.
883 /* Release the sdev reference that bound this LUN to the context */
884 scsi_device_put(sdev
);
889 dev_dbg(dev
, "%s: returning rc=%d\n", __func__
, rc
);
893 static int cxlflash_disk_detach(struct scsi_device
*sdev
,
894 struct dk_cxlflash_detach
*detach
)
896 return _cxlflash_disk_detach(sdev
, NULL
, detach
);
900 * cxlflash_cxl_release() - release handler for adapter file descriptor
901 * @inode: File-system inode associated with fd.
902 * @file: File installed with adapter file descriptor.
904 * This routine is the release handler for the fops registered with
905 * the CXL services on an initial attach for a context. It is called
906 * when a close is performed on the adapter file descriptor returned
907 * to the user. Programmatically, the user is not required to perform
908 * the close, as it is handled internally via the detach ioctl when
909 * a context is being removed. Note that nothing prevents the user
910 * from performing a close, but the user should be aware that doing
911 * so is considered catastrophic and subsequent usage of the superpipe
912 * API with previously saved off tokens will fail.
914 * When initiated from an external close (either by the user or via
915 * a process tear down), the routine derives the context reference
916 * and calls detach for each LUN associated with the context. The
917 * final detach operation will cause the context itself to be freed.
918 * Note that the saved off lfd is reset prior to calling detach to
919 * signify that the final detach should not perform a close.
921 * When initiated from a detach operation as part of the tear down
922 * of a context, the context is first completely freed and then the
923 * close is performed. This routine will fail to derive the context
924 * reference (due to the context having already been freed) and then
925 * call into the CXL release entry point.
927 * Thus, with exception to when the CXL process element (context id)
928 * lookup fails (a case that should theoretically never occur), every
929 * call into this routine results in a complete freeing of a context.
931 * As part of the detach, all per-context resources associated with the LUN
932 * are cleaned up. When detaching the last LUN for a context, the context
933 * itself is cleaned up and released.
935 * Return: 0 on success
937 static int cxlflash_cxl_release(struct inode
*inode
, struct file
*file
)
939 struct cxl_context
*ctx
= cxl_fops_get_context(file
);
940 struct cxlflash_cfg
*cfg
= container_of(file
->f_op
, struct cxlflash_cfg
,
942 struct device
*dev
= &cfg
->dev
->dev
;
943 struct ctx_info
*ctxi
= NULL
;
944 struct dk_cxlflash_detach detach
= { { 0 }, 0 };
945 struct lun_access
*lun_access
, *t
;
946 enum ctx_ctrl ctrl
= CTX_CTRL_ERR_FALLBACK
| CTX_CTRL_FILE
;
949 ctxid
= cxl_process_element(ctx
);
950 if (unlikely(ctxid
< 0)) {
951 dev_err(dev
, "%s: Context %p was closed! (%d)\n",
952 __func__
, ctx
, ctxid
);
956 ctxi
= get_context(cfg
, ctxid
, file
, ctrl
);
957 if (unlikely(!ctxi
)) {
958 ctxi
= get_context(cfg
, ctxid
, file
, ctrl
| CTX_CTRL_CLONE
);
960 dev_dbg(dev
, "%s: Context %d already free!\n",
965 dev_dbg(dev
, "%s: Another process owns context %d!\n",
971 dev_dbg(dev
, "%s: close(%d) for context %d\n",
972 __func__
, ctxi
->lfd
, ctxid
);
974 /* Reset the file descriptor to indicate we're on a close() thread */
976 detach
.context_id
= ctxi
->ctxid
;
977 list_for_each_entry_safe(lun_access
, t
, &ctxi
->luns
, list
)
978 _cxlflash_disk_detach(lun_access
->sdev
, ctxi
, &detach
);
980 cxl_fd_release(inode
, file
);
982 dev_dbg(dev
, "%s: returning\n", __func__
);
987 * unmap_context() - clears a previously established mapping
988 * @ctxi: Context owning the mapping.
990 * This routine is used to switch between the error notification page
991 * (dummy page of all 1's) and the real mapping (established by the CXL
994 static void unmap_context(struct ctx_info
*ctxi
)
996 unmap_mapping_range(ctxi
->file
->f_mapping
, 0, 0, 1);
1000 * get_err_page() - obtains and allocates the error notification page
1002 * Return: error notification page on success, NULL on failure
1004 static struct page
*get_err_page(void)
1006 struct page
*err_page
= global
.err_page
;
1008 if (unlikely(!err_page
)) {
1009 err_page
= alloc_page(GFP_KERNEL
);
1010 if (unlikely(!err_page
)) {
1011 pr_err("%s: Unable to allocate err_page!\n", __func__
);
1015 memset(page_address(err_page
), -1, PAGE_SIZE
);
1017 /* Serialize update w/ other threads to avoid a leak */
1018 mutex_lock(&global
.mutex
);
1019 if (likely(!global
.err_page
))
1020 global
.err_page
= err_page
;
1022 __free_page(err_page
);
1023 err_page
= global
.err_page
;
1025 mutex_unlock(&global
.mutex
);
1029 pr_debug("%s: returning err_page=%p\n", __func__
, err_page
);
1034 * cxlflash_mmap_fault() - mmap fault handler for adapter file descriptor
1035 * @vma: VM area associated with mapping.
1036 * @vmf: VM fault associated with current fault.
1038 * To support error notification via MMIO, faults are 'caught' by this routine
1039 * that was inserted before passing back the adapter file descriptor on attach.
1040 * When a fault occurs, this routine evaluates if error recovery is active and
1041 * if so, installs the error page to 'notify' the user about the error state.
1042 * During normal operation, the fault is simply handled by the original fault
1043 * handler that was installed by CXL services as part of initializing the
1044 * adapter file descriptor. The VMA's page protection bits are toggled to
1045 * indicate cached/not-cached depending on the memory backing the fault.
1047 * Return: 0 on success, VM_FAULT_SIGBUS on failure
1049 static int cxlflash_mmap_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
1051 struct file
*file
= vma
->vm_file
;
1052 struct cxl_context
*ctx
= cxl_fops_get_context(file
);
1053 struct cxlflash_cfg
*cfg
= container_of(file
->f_op
, struct cxlflash_cfg
,
1055 struct device
*dev
= &cfg
->dev
->dev
;
1056 struct ctx_info
*ctxi
= NULL
;
1057 struct page
*err_page
= NULL
;
1058 enum ctx_ctrl ctrl
= CTX_CTRL_ERR_FALLBACK
| CTX_CTRL_FILE
;
1062 ctxid
= cxl_process_element(ctx
);
1063 if (unlikely(ctxid
< 0)) {
1064 dev_err(dev
, "%s: Context %p was closed! (%d)\n",
1065 __func__
, ctx
, ctxid
);
1069 ctxi
= get_context(cfg
, ctxid
, file
, ctrl
);
1070 if (unlikely(!ctxi
)) {
1071 dev_dbg(dev
, "%s: Bad context! (%d)\n", __func__
, ctxid
);
1075 dev_dbg(dev
, "%s: fault(%d) for context %d\n",
1076 __func__
, ctxi
->lfd
, ctxid
);
1078 if (likely(!ctxi
->err_recovery_active
)) {
1079 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
1080 rc
= ctxi
->cxl_mmap_vmops
->fault(vma
, vmf
);
1082 dev_dbg(dev
, "%s: err recovery active, use err_page!\n",
1085 err_page
= get_err_page();
1086 if (unlikely(!err_page
)) {
1087 dev_err(dev
, "%s: Could not obtain error page!\n",
1089 rc
= VM_FAULT_RETRY
;
1094 vmf
->page
= err_page
;
1095 vma
->vm_page_prot
= pgprot_cached(vma
->vm_page_prot
);
1101 dev_dbg(dev
, "%s: returning rc=%d\n", __func__
, rc
);
1105 rc
= VM_FAULT_SIGBUS
;
1110 * Local MMAP vmops to 'catch' faults
1112 static const struct vm_operations_struct cxlflash_mmap_vmops
= {
1113 .fault
= cxlflash_mmap_fault
,
1117 * cxlflash_cxl_mmap() - mmap handler for adapter file descriptor
1118 * @file: File installed with adapter file descriptor.
1119 * @vma: VM area associated with mapping.
1121 * Installs local mmap vmops to 'catch' faults for error notification support.
1123 * Return: 0 on success, -errno on failure
1125 static int cxlflash_cxl_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1127 struct cxl_context
*ctx
= cxl_fops_get_context(file
);
1128 struct cxlflash_cfg
*cfg
= container_of(file
->f_op
, struct cxlflash_cfg
,
1130 struct device
*dev
= &cfg
->dev
->dev
;
1131 struct ctx_info
*ctxi
= NULL
;
1132 enum ctx_ctrl ctrl
= CTX_CTRL_ERR_FALLBACK
| CTX_CTRL_FILE
;
1136 ctxid
= cxl_process_element(ctx
);
1137 if (unlikely(ctxid
< 0)) {
1138 dev_err(dev
, "%s: Context %p was closed! (%d)\n",
1139 __func__
, ctx
, ctxid
);
1144 ctxi
= get_context(cfg
, ctxid
, file
, ctrl
);
1145 if (unlikely(!ctxi
)) {
1146 dev_dbg(dev
, "%s: Bad context! (%d)\n", __func__
, ctxid
);
1151 dev_dbg(dev
, "%s: mmap(%d) for context %d\n",
1152 __func__
, ctxi
->lfd
, ctxid
);
1154 rc
= cxl_fd_mmap(file
, vma
);
1156 /* Insert ourself in the mmap fault handler path */
1157 ctxi
->cxl_mmap_vmops
= vma
->vm_ops
;
1158 vma
->vm_ops
= &cxlflash_mmap_vmops
;
1168 * Local fops for adapter file descriptor
1170 static const struct file_operations cxlflash_cxl_fops
= {
1171 .owner
= THIS_MODULE
,
1172 .mmap
= cxlflash_cxl_mmap
,
1173 .release
= cxlflash_cxl_release
,
1177 * cxlflash_mark_contexts_error() - move contexts to error state and list
1178 * @cfg: Internal structure associated with the host.
1180 * A context is only moved over to the error list when there are no outstanding
1181 * references to it. This ensures that a running operation has completed.
1183 * Return: 0 on success, -errno on failure
1185 int cxlflash_mark_contexts_error(struct cxlflash_cfg
*cfg
)
1188 struct ctx_info
*ctxi
= NULL
;
1190 mutex_lock(&cfg
->ctx_tbl_list_mutex
);
1192 for (i
= 0; i
< MAX_CONTEXT
; i
++) {
1193 ctxi
= cfg
->ctx_tbl
[i
];
1195 mutex_lock(&ctxi
->mutex
);
1196 cfg
->ctx_tbl
[i
] = NULL
;
1197 list_add(&ctxi
->list
, &cfg
->ctx_err_recovery
);
1198 ctxi
->err_recovery_active
= true;
1199 ctxi
->ctrl_map
= NULL
;
1200 unmap_context(ctxi
);
1201 mutex_unlock(&ctxi
->mutex
);
1205 mutex_unlock(&cfg
->ctx_tbl_list_mutex
);
1212 static const struct file_operations null_fops
= {
1213 .owner
= THIS_MODULE
,
1217 * check_state() - checks and responds to the current adapter state
1218 * @cfg: Internal structure associated with the host.
1220 * This routine can block and should only be used on process context.
1221 * It assumes that the caller is an ioctl thread and holding the ioctl
1222 * read semaphore. This is temporarily let up across the wait to allow
1223 * for draining actively running ioctls. Also note that when waking up
1224 * from waiting in reset, the state is unknown and must be checked again
1225 * before proceeding.
1227 * Return: 0 on success, -errno on failure
1229 static int check_state(struct cxlflash_cfg
*cfg
)
1231 struct device
*dev
= &cfg
->dev
->dev
;
1235 switch (cfg
->state
) {
1237 dev_dbg(dev
, "%s: Reset state, going to wait...\n", __func__
);
1238 up_read(&cfg
->ioctl_rwsem
);
1239 rc
= wait_event_interruptible(cfg
->reset_waitq
,
1240 cfg
->state
!= STATE_RESET
);
1241 down_read(&cfg
->ioctl_rwsem
);
1245 case STATE_FAILTERM
:
1246 dev_dbg(dev
, "%s: Failed/Terminating!\n", __func__
);
1257 * cxlflash_disk_attach() - attach a LUN to a context
1258 * @sdev: SCSI device associated with LUN.
1259 * @attach: Attach ioctl data structure.
1261 * Creates a context and attaches LUN to it. A LUN can only be attached
1262 * one time to a context (subsequent attaches for the same context/LUN pair
1263 * are not supported). Additional LUNs can be attached to a context by
1264 * specifying the 'reuse' flag defined in the cxlflash_ioctl.h header.
1266 * Return: 0 on success, -errno on failure
1268 static int cxlflash_disk_attach(struct scsi_device
*sdev
,
1269 struct dk_cxlflash_attach
*attach
)
1271 struct cxlflash_cfg
*cfg
= (struct cxlflash_cfg
*)sdev
->host
->hostdata
;
1272 struct device
*dev
= &cfg
->dev
->dev
;
1273 struct afu
*afu
= cfg
->afu
;
1274 struct llun_info
*lli
= sdev
->hostdata
;
1275 struct glun_info
*gli
= lli
->parent
;
1276 struct cxl_ioctl_start_work
*work
;
1277 struct ctx_info
*ctxi
= NULL
;
1278 struct lun_access
*lun_access
= NULL
;
1285 struct cxl_context
*ctx
;
1289 /* On first attach set fileops */
1290 if (atomic_read(&cfg
->num_user_contexts
) == 0)
1291 cfg
->cxl_fops
= cxlflash_cxl_fops
;
1293 if (attach
->num_interrupts
> 4) {
1294 dev_dbg(dev
, "%s: Cannot support this many interrupts %llu\n",
1295 __func__
, attach
->num_interrupts
);
1300 if (gli
->max_lba
== 0) {
1301 dev_dbg(dev
, "%s: No capacity info for this LUN (%016llX)\n",
1302 __func__
, lli
->lun_id
[sdev
->channel
]);
1303 rc
= read_cap16(sdev
, lli
);
1305 dev_err(dev
, "%s: Invalid device! (%d)\n",
1310 dev_dbg(dev
, "%s: LBA = %016llX\n", __func__
, gli
->max_lba
);
1311 dev_dbg(dev
, "%s: BLK_LEN = %08X\n", __func__
, gli
->blk_len
);
1314 if (attach
->hdr
.flags
& DK_CXLFLASH_ATTACH_REUSE_CONTEXT
) {
1315 rctxid
= attach
->context_id
;
1316 ctxi
= get_context(cfg
, rctxid
, NULL
, 0);
1318 dev_dbg(dev
, "%s: Bad context! (%016llX)\n",
1324 list_for_each_entry(lun_access
, &ctxi
->luns
, list
)
1325 if (lun_access
->lli
== lli
) {
1326 dev_dbg(dev
, "%s: Already attached!\n",
1333 rc
= scsi_device_get(sdev
);
1335 dev_err(dev
, "%s: Unable to get sdev reference!\n", __func__
);
1339 lun_access
= kzalloc(sizeof(*lun_access
), GFP_KERNEL
);
1340 if (unlikely(!lun_access
)) {
1341 dev_err(dev
, "%s: Unable to allocate lun_access!\n", __func__
);
1346 lun_access
->lli
= lli
;
1347 lun_access
->sdev
= sdev
;
1349 /* Non-NULL context indicates reuse */
1351 dev_dbg(dev
, "%s: Reusing context for LUN! (%016llX)\n",
1353 list_add(&lun_access
->list
, &ctxi
->luns
);
1358 ctx
= cxl_dev_context_init(cfg
->dev
);
1359 if (unlikely(IS_ERR_OR_NULL(ctx
))) {
1360 dev_err(dev
, "%s: Could not initialize context %p\n",
1366 ctxid
= cxl_process_element(ctx
);
1367 if (unlikely((ctxid
> MAX_CONTEXT
) || (ctxid
< 0))) {
1368 dev_err(dev
, "%s: ctxid (%d) invalid!\n", __func__
, ctxid
);
1373 file
= cxl_get_fd(ctx
, &cfg
->cxl_fops
, &fd
);
1374 if (unlikely(fd
< 0)) {
1376 dev_err(dev
, "%s: Could not get file descriptor\n", __func__
);
1380 /* Translate read/write O_* flags from fcntl.h to AFU permission bits */
1381 perms
= SISL_RHT_PERM(attach
->hdr
.flags
+ 1);
1383 ctxi
= create_context(cfg
, ctx
, ctxid
, fd
, file
, perms
);
1384 if (unlikely(!ctxi
)) {
1385 dev_err(dev
, "%s: Failed to create context! (%d)\n",
1391 work
->num_interrupts
= attach
->num_interrupts
;
1392 work
->flags
= CXL_START_WORK_NUM_IRQS
;
1394 rc
= cxl_start_work(ctx
, work
);
1396 dev_dbg(dev
, "%s: Could not start context rc=%d\n",
1401 rc
= afu_attach(cfg
, ctxi
);
1403 dev_err(dev
, "%s: Could not attach AFU rc %d\n", __func__
, rc
);
1408 * No error paths after this point. Once the fd is installed it's
1409 * visible to user space and can't be undone safely on this thread.
1410 * There is no need to worry about a deadlock here because no one
1411 * knows about us yet; we can be the only one holding our mutex.
1413 list_add(&lun_access
->list
, &ctxi
->luns
);
1414 mutex_unlock(&ctxi
->mutex
);
1415 mutex_lock(&cfg
->ctx_tbl_list_mutex
);
1416 mutex_lock(&ctxi
->mutex
);
1417 cfg
->ctx_tbl
[ctxid
] = ctxi
;
1418 mutex_unlock(&cfg
->ctx_tbl_list_mutex
);
1419 fd_install(fd
, file
);
1422 attach
->hdr
.return_flags
= 0;
1423 attach
->context_id
= ctxi
->ctxid
;
1424 attach
->block_size
= gli
->blk_len
;
1425 attach
->mmio_size
= sizeof(afu
->afu_map
->hosts
[0].harea
);
1426 attach
->last_lba
= gli
->max_lba
;
1427 attach
->max_xfer
= sdev
->host
->max_sectors
* MAX_SECTOR_UNIT
;
1428 attach
->max_xfer
/= gli
->blk_len
;
1431 attach
->adap_fd
= fd
;
1436 dev_dbg(dev
, "%s: returning ctxid=%d fd=%d bs=%lld rc=%d llba=%lld\n",
1437 __func__
, ctxid
, fd
, attach
->block_size
, rc
, attach
->last_lba
);
1441 cxl_stop_context(ctx
);
1444 destroy_context(cfg
, ctxi
);
1448 * Here, we're overriding the fops with a dummy all-NULL fops because
1449 * fput() calls the release fop, which will cause us to mistakenly
1450 * call into the CXL code. Rather than try to add yet more complexity
1451 * to that routine (cxlflash_cxl_release) we should try to fix the
1454 file
->f_op
= &null_fops
;
1459 cxl_release_context(ctx
);
1463 scsi_device_put(sdev
);
1468 * recover_context() - recovers a context in error
1469 * @cfg: Internal structure associated with the host.
1470 * @ctxi: Context to release.
1472 * Restablishes the state for a context-in-error.
1474 * Return: 0 on success, -errno on failure
1476 static int recover_context(struct cxlflash_cfg
*cfg
, struct ctx_info
*ctxi
)
1478 struct device
*dev
= &cfg
->dev
->dev
;
1480 int old_fd
, fd
= -1;
1483 struct cxl_context
*ctx
;
1484 struct afu
*afu
= cfg
->afu
;
1486 ctx
= cxl_dev_context_init(cfg
->dev
);
1487 if (unlikely(IS_ERR_OR_NULL(ctx
))) {
1488 dev_err(dev
, "%s: Could not initialize context %p\n",
1494 ctxid
= cxl_process_element(ctx
);
1495 if (unlikely((ctxid
> MAX_CONTEXT
) || (ctxid
< 0))) {
1496 dev_err(dev
, "%s: ctxid (%d) invalid!\n", __func__
, ctxid
);
1501 file
= cxl_get_fd(ctx
, &cfg
->cxl_fops
, &fd
);
1502 if (unlikely(fd
< 0)) {
1504 dev_err(dev
, "%s: Could not get file descriptor\n", __func__
);
1508 rc
= cxl_start_work(ctx
, &ctxi
->work
);
1510 dev_dbg(dev
, "%s: Could not start context rc=%d\n",
1515 /* Update with new MMIO area based on updated context id */
1516 ctxi
->ctrl_map
= &afu
->afu_map
->ctrls
[ctxid
].ctrl
;
1518 rc
= afu_attach(cfg
, ctxi
);
1520 dev_err(dev
, "%s: Could not attach AFU rc %d\n", __func__
, rc
);
1525 * No error paths after this point. Once the fd is installed it's
1526 * visible to user space and can't be undone safely on this thread.
1529 ctxi
->ctxid
= ENCODE_CTXID(ctxi
, ctxid
);
1535 * Put context back in table (note the reinit of the context list);
1536 * we must first drop the context's mutex and then acquire it in
1537 * order with the table/list mutex to avoid a deadlock - safe to do
1538 * here because no one can find us at this moment in time.
1540 mutex_unlock(&ctxi
->mutex
);
1541 mutex_lock(&cfg
->ctx_tbl_list_mutex
);
1542 mutex_lock(&ctxi
->mutex
);
1543 list_del_init(&ctxi
->list
);
1544 cfg
->ctx_tbl
[ctxid
] = ctxi
;
1545 mutex_unlock(&cfg
->ctx_tbl_list_mutex
);
1546 fd_install(fd
, file
);
1548 /* Release the original adapter fd and associated CXL resources */
1551 dev_dbg(dev
, "%s: returning ctxid=%d fd=%d rc=%d\n",
1552 __func__
, ctxid
, fd
, rc
);
1556 cxl_stop_context(ctx
);
1561 cxl_release_context(ctx
);
1566 * cxlflash_afu_recover() - initiates AFU recovery
1567 * @sdev: SCSI device associated with LUN.
1568 * @recover: Recover ioctl data structure.
1570 * Only a single recovery is allowed at a time to avoid exhausting CXL
1571 * resources (leading to recovery failure) in the event that we're up
1572 * against the maximum number of contexts limit. For similar reasons,
1573 * a context recovery is retried if there are multiple recoveries taking
1574 * place at the same time and the failure was due to CXL services being
1575 * unable to keep up.
1577 * Because a user can detect an error condition before the kernel, it is
1578 * quite possible for this routine to act as the kernel's EEH detection
1579 * source (MMIO read of mbox_r). Because of this, there is a window of
1580 * time where an EEH might have been detected but not yet 'serviced'
1581 * (callback invoked, causing the device to enter reset state). To avoid
1582 * looping in this routine during that window, a 1 second sleep is in place
1583 * between the time the MMIO failure is detected and the time a wait on the
1584 * reset wait queue is attempted via check_state().
1586 * Return: 0 on success, -errno on failure
1588 static int cxlflash_afu_recover(struct scsi_device
*sdev
,
1589 struct dk_cxlflash_recover_afu
*recover
)
1591 struct cxlflash_cfg
*cfg
= (struct cxlflash_cfg
*)sdev
->host
->hostdata
;
1592 struct device
*dev
= &cfg
->dev
->dev
;
1593 struct llun_info
*lli
= sdev
->hostdata
;
1594 struct afu
*afu
= cfg
->afu
;
1595 struct ctx_info
*ctxi
= NULL
;
1596 struct mutex
*mutex
= &cfg
->ctx_recovery_mutex
;
1597 u64 ctxid
= DECODE_CTXID(recover
->context_id
),
1598 rctxid
= recover
->context_id
;
1600 int lretry
= 20; /* up to 2 seconds */
1603 atomic_inc(&cfg
->recovery_threads
);
1604 rc
= mutex_lock_interruptible(mutex
);
1608 dev_dbg(dev
, "%s: reason 0x%016llX rctxid=%016llX\n",
1609 __func__
, recover
->reason
, rctxid
);
1612 /* Ensure that this process is attached to the context */
1613 ctxi
= get_context(cfg
, rctxid
, lli
, CTX_CTRL_ERR_FALLBACK
);
1614 if (unlikely(!ctxi
)) {
1615 dev_dbg(dev
, "%s: Bad context! (%llu)\n", __func__
, ctxid
);
1620 if (ctxi
->err_recovery_active
) {
1622 rc
= recover_context(cfg
, ctxi
);
1624 dev_err(dev
, "%s: Recovery failed for context %llu (rc=%d)\n",
1625 __func__
, ctxid
, rc
);
1626 if ((rc
== -ENODEV
) &&
1627 ((atomic_read(&cfg
->recovery_threads
) > 1) ||
1629 dev_dbg(dev
, "%s: Going to try again!\n",
1631 mutex_unlock(mutex
);
1633 rc
= mutex_lock_interruptible(mutex
);
1642 ctxi
->err_recovery_active
= false;
1643 recover
->context_id
= ctxi
->ctxid
;
1644 recover
->adap_fd
= ctxi
->lfd
;
1645 recover
->mmio_size
= sizeof(afu
->afu_map
->hosts
[0].harea
);
1646 recover
->hdr
.return_flags
|=
1647 DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET
;
1651 /* Test if in error state */
1652 reg
= readq_be(&afu
->ctrl_map
->mbox_r
);
1654 dev_dbg(dev
, "%s: MMIO fail, wait for recovery.\n", __func__
);
1657 * Before checking the state, put back the context obtained with
1658 * get_context() as it is no longer needed and sleep for a short
1659 * period of time (see prolog notes).
1664 rc
= check_state(cfg
);
1670 dev_dbg(dev
, "%s: MMIO working, no recovery required!\n", __func__
);
1674 mutex_unlock(mutex
);
1675 atomic_dec_if_positive(&cfg
->recovery_threads
);
1680 * process_sense() - evaluates and processes sense data
1681 * @sdev: SCSI device associated with LUN.
1682 * @verify: Verify ioctl data structure.
1684 * Return: 0 on success, -errno on failure
1686 static int process_sense(struct scsi_device
*sdev
,
1687 struct dk_cxlflash_verify
*verify
)
1689 struct cxlflash_cfg
*cfg
= (struct cxlflash_cfg
*)sdev
->host
->hostdata
;
1690 struct device
*dev
= &cfg
->dev
->dev
;
1691 struct llun_info
*lli
= sdev
->hostdata
;
1692 struct glun_info
*gli
= lli
->parent
;
1693 u64 prev_lba
= gli
->max_lba
;
1694 struct scsi_sense_hdr sshdr
= { 0 };
1697 rc
= scsi_normalize_sense((const u8
*)&verify
->sense_data
,
1698 DK_CXLFLASH_VERIFY_SENSE_LEN
, &sshdr
);
1700 dev_err(dev
, "%s: Failed to normalize sense data!\n", __func__
);
1705 switch (sshdr
.sense_key
) {
1707 case RECOVERED_ERROR
:
1711 case UNIT_ATTENTION
:
1712 switch (sshdr
.asc
) {
1713 case 0x29: /* Power on Reset or Device Reset */
1715 case 0x2A: /* Device settings/capacity changed */
1716 rc
= read_cap16(sdev
, lli
);
1721 if (prev_lba
!= gli
->max_lba
)
1722 dev_dbg(dev
, "%s: Capacity changed old=%lld "
1723 "new=%lld\n", __func__
, prev_lba
,
1726 case 0x3F: /* Report LUNs changed, Rescan. */
1727 scsi_scan_host(cfg
->host
);
1739 dev_dbg(dev
, "%s: sense_key %x asc %x ascq %x rc %d\n", __func__
,
1740 sshdr
.sense_key
, sshdr
.asc
, sshdr
.ascq
, rc
);
1745 * cxlflash_disk_verify() - verifies a LUN is the same and handle size changes
1746 * @sdev: SCSI device associated with LUN.
1747 * @verify: Verify ioctl data structure.
1749 * Return: 0 on success, -errno on failure
1751 static int cxlflash_disk_verify(struct scsi_device
*sdev
,
1752 struct dk_cxlflash_verify
*verify
)
1755 struct ctx_info
*ctxi
= NULL
;
1756 struct cxlflash_cfg
*cfg
= (struct cxlflash_cfg
*)sdev
->host
->hostdata
;
1757 struct device
*dev
= &cfg
->dev
->dev
;
1758 struct llun_info
*lli
= sdev
->hostdata
;
1759 struct glun_info
*gli
= lli
->parent
;
1760 struct sisl_rht_entry
*rhte
= NULL
;
1761 res_hndl_t rhndl
= verify
->rsrc_handle
;
1762 u64 ctxid
= DECODE_CTXID(verify
->context_id
),
1763 rctxid
= verify
->context_id
;
1766 dev_dbg(dev
, "%s: ctxid=%llu rhndl=%016llX, hint=%016llX, "
1767 "flags=%016llX\n", __func__
, ctxid
, verify
->rsrc_handle
,
1768 verify
->hint
, verify
->hdr
.flags
);
1770 ctxi
= get_context(cfg
, rctxid
, lli
, 0);
1771 if (unlikely(!ctxi
)) {
1772 dev_dbg(dev
, "%s: Bad context! (%llu)\n", __func__
, ctxid
);
1777 rhte
= get_rhte(ctxi
, rhndl
, lli
);
1778 if (unlikely(!rhte
)) {
1779 dev_dbg(dev
, "%s: Bad resource handle! (%d)\n",
1786 * Look at the hint/sense to see if it requires us to redrive
1787 * inquiry (i.e. the Unit attention is due to the WWN changing).
1789 if (verify
->hint
& DK_CXLFLASH_VERIFY_HINT_SENSE
) {
1790 /* Can't hold mutex across process_sense/read_cap16,
1791 * since we could have an intervening EEH event.
1793 ctxi
->unavail
= true;
1794 mutex_unlock(&ctxi
->mutex
);
1795 rc
= process_sense(sdev
, verify
);
1797 dev_err(dev
, "%s: Failed to validate sense data (%d)\n",
1799 mutex_lock(&ctxi
->mutex
);
1800 ctxi
->unavail
= false;
1803 mutex_lock(&ctxi
->mutex
);
1804 ctxi
->unavail
= false;
1807 switch (gli
->mode
) {
1809 last_lba
= gli
->max_lba
;
1812 /* Cast lxt_cnt to u64 for multiply to be treated as 64bit op */
1813 last_lba
= ((u64
)rhte
->lxt_cnt
* MC_CHUNK_SIZE
* gli
->blk_len
);
1814 last_lba
/= CXLFLASH_BLOCK_SIZE
;
1818 WARN(1, "Unsupported LUN mode!");
1821 verify
->last_lba
= last_lba
;
1826 dev_dbg(dev
, "%s: returning rc=%d llba=%llX\n",
1827 __func__
, rc
, verify
->last_lba
);
1832 * decode_ioctl() - translates an encoded ioctl to an easily identifiable string
1833 * @cmd: The ioctl command to decode.
1835 * Return: A string identifying the decoded ioctl.
1837 static char *decode_ioctl(int cmd
)
1840 case DK_CXLFLASH_ATTACH
:
1841 return __stringify_1(DK_CXLFLASH_ATTACH
);
1842 case DK_CXLFLASH_USER_DIRECT
:
1843 return __stringify_1(DK_CXLFLASH_USER_DIRECT
);
1844 case DK_CXLFLASH_USER_VIRTUAL
:
1845 return __stringify_1(DK_CXLFLASH_USER_VIRTUAL
);
1846 case DK_CXLFLASH_VLUN_RESIZE
:
1847 return __stringify_1(DK_CXLFLASH_VLUN_RESIZE
);
1848 case DK_CXLFLASH_RELEASE
:
1849 return __stringify_1(DK_CXLFLASH_RELEASE
);
1850 case DK_CXLFLASH_DETACH
:
1851 return __stringify_1(DK_CXLFLASH_DETACH
);
1852 case DK_CXLFLASH_VERIFY
:
1853 return __stringify_1(DK_CXLFLASH_VERIFY
);
1854 case DK_CXLFLASH_VLUN_CLONE
:
1855 return __stringify_1(DK_CXLFLASH_VLUN_CLONE
);
1856 case DK_CXLFLASH_RECOVER_AFU
:
1857 return __stringify_1(DK_CXLFLASH_RECOVER_AFU
);
1858 case DK_CXLFLASH_MANAGE_LUN
:
1859 return __stringify_1(DK_CXLFLASH_MANAGE_LUN
);
1866 * cxlflash_disk_direct_open() - opens a direct (physical) disk
1867 * @sdev: SCSI device associated with LUN.
1868 * @arg: UDirect ioctl data structure.
1870 * On successful return, the user is informed of the resource handle
1871 * to be used to identify the direct lun and the size (in blocks) of
1872 * the direct lun in last LBA format.
1874 * Return: 0 on success, -errno on failure
1876 static int cxlflash_disk_direct_open(struct scsi_device
*sdev
, void *arg
)
1878 struct cxlflash_cfg
*cfg
= (struct cxlflash_cfg
*)sdev
->host
->hostdata
;
1879 struct device
*dev
= &cfg
->dev
->dev
;
1880 struct afu
*afu
= cfg
->afu
;
1881 struct llun_info
*lli
= sdev
->hostdata
;
1882 struct glun_info
*gli
= lli
->parent
;
1884 struct dk_cxlflash_udirect
*pphys
= (struct dk_cxlflash_udirect
*)arg
;
1886 u64 ctxid
= DECODE_CTXID(pphys
->context_id
),
1887 rctxid
= pphys
->context_id
;
1890 u64 rsrc_handle
= -1;
1891 u32 port
= CHAN2PORT(sdev
->channel
);
1895 struct ctx_info
*ctxi
= NULL
;
1896 struct sisl_rht_entry
*rhte
= NULL
;
1898 pr_debug("%s: ctxid=%llu ls=0x%llx\n", __func__
, ctxid
, lun_size
);
1900 rc
= cxlflash_lun_attach(gli
, MODE_PHYSICAL
, false);
1902 dev_dbg(dev
, "%s: Failed to attach to LUN! (PHYSICAL)\n",
1907 ctxi
= get_context(cfg
, rctxid
, lli
, 0);
1908 if (unlikely(!ctxi
)) {
1909 dev_dbg(dev
, "%s: Bad context! (%llu)\n", __func__
, ctxid
);
1914 rhte
= rhte_checkout(ctxi
, lli
);
1915 if (unlikely(!rhte
)) {
1916 dev_dbg(dev
, "%s: too many opens for this context\n", __func__
);
1917 rc
= -EMFILE
; /* too many opens */
1921 rsrc_handle
= (rhte
- ctxi
->rht_start
);
1923 rht_format1(rhte
, lli
->lun_id
[sdev
->channel
], ctxi
->rht_perms
, port
);
1924 cxlflash_afu_sync(afu
, ctxid
, rsrc_handle
, AFU_LW_SYNC
);
1926 last_lba
= gli
->max_lba
;
1927 pphys
->hdr
.return_flags
= 0;
1928 pphys
->last_lba
= last_lba
;
1929 pphys
->rsrc_handle
= rsrc_handle
;
1934 dev_dbg(dev
, "%s: returning handle 0x%llx rc=%d llba %lld\n",
1935 __func__
, rsrc_handle
, rc
, last_lba
);
1939 cxlflash_lun_detach(gli
);
1944 * ioctl_common() - common IOCTL handler for driver
1945 * @sdev: SCSI device associated with LUN.
1946 * @cmd: IOCTL command.
1948 * Handles common fencing operations that are valid for multiple ioctls. Always
1949 * allow through ioctls that are cleanup oriented in nature, even when operating
1950 * in a failed/terminating state.
1952 * Return: 0 on success, -errno on failure
1954 static int ioctl_common(struct scsi_device
*sdev
, int cmd
)
1956 struct cxlflash_cfg
*cfg
= (struct cxlflash_cfg
*)sdev
->host
->hostdata
;
1957 struct device
*dev
= &cfg
->dev
->dev
;
1958 struct llun_info
*lli
= sdev
->hostdata
;
1961 if (unlikely(!lli
)) {
1962 dev_dbg(dev
, "%s: Unknown LUN\n", __func__
);
1967 rc
= check_state(cfg
);
1968 if (unlikely(rc
) && (cfg
->state
== STATE_FAILTERM
)) {
1970 case DK_CXLFLASH_VLUN_RESIZE
:
1971 case DK_CXLFLASH_RELEASE
:
1972 case DK_CXLFLASH_DETACH
:
1973 dev_dbg(dev
, "%s: Command override! (%d)\n",
1984 * cxlflash_ioctl() - IOCTL handler for driver
1985 * @sdev: SCSI device associated with LUN.
1986 * @cmd: IOCTL command.
1987 * @arg: Userspace ioctl data structure.
1989 * A read/write semaphore is used to implement a 'drain' of currently
1990 * running ioctls. The read semaphore is taken at the beginning of each
1991 * ioctl thread and released upon concluding execution. Additionally the
1992 * semaphore should be released and then reacquired in any ioctl execution
1993 * path which will wait for an event to occur that is outside the scope of
1994 * the ioctl (i.e. an adapter reset). To drain the ioctls currently running,
1995 * a thread simply needs to acquire the write semaphore.
1997 * Return: 0 on success, -errno on failure
1999 int cxlflash_ioctl(struct scsi_device
*sdev
, int cmd
, void __user
*arg
)
2001 typedef int (*sioctl
) (struct scsi_device
*, void *);
2003 struct cxlflash_cfg
*cfg
= (struct cxlflash_cfg
*)sdev
->host
->hostdata
;
2004 struct device
*dev
= &cfg
->dev
->dev
;
2005 struct afu
*afu
= cfg
->afu
;
2006 struct dk_cxlflash_hdr
*hdr
;
2007 char buf
[sizeof(union cxlflash_ioctls
)];
2009 bool known_ioctl
= false;
2012 struct Scsi_Host
*shost
= sdev
->host
;
2013 sioctl do_ioctl
= NULL
;
2015 static const struct {
2018 } ioctl_tbl
[] = { /* NOTE: order matters here */
2019 {sizeof(struct dk_cxlflash_attach
), (sioctl
)cxlflash_disk_attach
},
2020 {sizeof(struct dk_cxlflash_udirect
), cxlflash_disk_direct_open
},
2021 {sizeof(struct dk_cxlflash_release
), (sioctl
)cxlflash_disk_release
},
2022 {sizeof(struct dk_cxlflash_detach
), (sioctl
)cxlflash_disk_detach
},
2023 {sizeof(struct dk_cxlflash_verify
), (sioctl
)cxlflash_disk_verify
},
2024 {sizeof(struct dk_cxlflash_recover_afu
), (sioctl
)cxlflash_afu_recover
},
2025 {sizeof(struct dk_cxlflash_manage_lun
), (sioctl
)cxlflash_manage_lun
},
2026 {sizeof(struct dk_cxlflash_uvirtual
), cxlflash_disk_virtual_open
},
2027 {sizeof(struct dk_cxlflash_resize
), (sioctl
)cxlflash_vlun_resize
},
2028 {sizeof(struct dk_cxlflash_clone
), (sioctl
)cxlflash_disk_clone
},
2031 /* Hold read semaphore so we can drain if needed */
2032 down_read(&cfg
->ioctl_rwsem
);
2034 /* Restrict command set to physical support only for internal LUN */
2035 if (afu
->internal_lun
)
2037 case DK_CXLFLASH_RELEASE
:
2038 case DK_CXLFLASH_USER_VIRTUAL
:
2039 case DK_CXLFLASH_VLUN_RESIZE
:
2040 case DK_CXLFLASH_VLUN_CLONE
:
2041 dev_dbg(dev
, "%s: %s not supported for lun_mode=%d\n",
2042 __func__
, decode_ioctl(cmd
), afu
->internal_lun
);
2044 goto cxlflash_ioctl_exit
;
2048 case DK_CXLFLASH_ATTACH
:
2049 case DK_CXLFLASH_USER_DIRECT
:
2050 case DK_CXLFLASH_RELEASE
:
2051 case DK_CXLFLASH_DETACH
:
2052 case DK_CXLFLASH_VERIFY
:
2053 case DK_CXLFLASH_RECOVER_AFU
:
2054 case DK_CXLFLASH_USER_VIRTUAL
:
2055 case DK_CXLFLASH_VLUN_RESIZE
:
2056 case DK_CXLFLASH_VLUN_CLONE
:
2057 dev_dbg(dev
, "%s: %s (%08X) on dev(%d/%d/%d/%llu)\n",
2058 __func__
, decode_ioctl(cmd
), cmd
, shost
->host_no
,
2059 sdev
->channel
, sdev
->id
, sdev
->lun
);
2060 rc
= ioctl_common(sdev
, cmd
);
2062 goto cxlflash_ioctl_exit
;
2066 case DK_CXLFLASH_MANAGE_LUN
:
2068 idx
= _IOC_NR(cmd
) - _IOC_NR(DK_CXLFLASH_ATTACH
);
2069 size
= ioctl_tbl
[idx
].size
;
2070 do_ioctl
= ioctl_tbl
[idx
].ioctl
;
2072 if (likely(do_ioctl
))
2078 goto cxlflash_ioctl_exit
;
2081 if (unlikely(copy_from_user(&buf
, arg
, size
))) {
2082 dev_err(dev
, "%s: copy_from_user() fail! "
2083 "size=%lu cmd=%d (%s) arg=%p\n",
2084 __func__
, size
, cmd
, decode_ioctl(cmd
), arg
);
2086 goto cxlflash_ioctl_exit
;
2089 hdr
= (struct dk_cxlflash_hdr
*)&buf
;
2090 if (hdr
->version
!= DK_CXLFLASH_VERSION_0
) {
2091 dev_dbg(dev
, "%s: Version %u not supported for %s\n",
2092 __func__
, hdr
->version
, decode_ioctl(cmd
));
2094 goto cxlflash_ioctl_exit
;
2097 if (hdr
->rsvd
[0] || hdr
->rsvd
[1] || hdr
->rsvd
[2] || hdr
->return_flags
) {
2098 dev_dbg(dev
, "%s: Reserved/rflags populated!\n", __func__
);
2100 goto cxlflash_ioctl_exit
;
2103 rc
= do_ioctl(sdev
, (void *)&buf
);
2105 if (unlikely(copy_to_user(arg
, &buf
, size
))) {
2106 dev_err(dev
, "%s: copy_to_user() fail! "
2107 "size=%lu cmd=%d (%s) arg=%p\n",
2108 __func__
, size
, cmd
, decode_ioctl(cmd
), arg
);
2112 /* fall through to exit */
2114 cxlflash_ioctl_exit
:
2115 up_read(&cfg
->ioctl_rwsem
);
2116 if (unlikely(rc
&& known_ioctl
))
2117 dev_err(dev
, "%s: ioctl %s (%08X) on dev(%d/%d/%d/%llu) "
2118 "returned rc %d\n", __func__
,
2119 decode_ioctl(cmd
), cmd
, shost
->host_no
,
2120 sdev
->channel
, sdev
->id
, sdev
->lun
, rc
);
2122 dev_dbg(dev
, "%s: ioctl %s (%08X) on dev(%d/%d/%d/%llu) "
2123 "returned rc %d\n", __func__
, decode_ioctl(cmd
),
2124 cmd
, shost
->host_no
, sdev
->channel
, sdev
->id
,