Merge branch 7.0/scsi-fixes into 7.1/scsi-staging

Pull in fixes to resolve mpi3mr merge conflict.

Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
Martin K. Petersen
2026-04-02 20:31:22 -04:00
20 changed files with 170 additions and 80 deletions

View File

@@ -6213,6 +6213,7 @@ F: drivers/scsi/fnic/
CISCO SCSI HBA DRIVER
M: Karan Tilak Kumar <kartilak@cisco.com>
M: Narsimhulu Musini <nmusini@cisco.com>
M: Sesidhar Baddela <sebaddel@cisco.com>
L: linux-scsi@vger.kernel.org
S: Supported

View File

@@ -2578,7 +2578,7 @@ int hisi_sas_probe(struct platform_device *pdev,
shost->transportt = hisi_sas_stt;
shost->max_id = HISI_SAS_MAX_DEVICES;
shost->max_lun = ~0;
shost->max_channel = 1;
shost->max_channel = 0;
shost->max_cmd_len = HISI_SAS_MAX_CDB_LEN;
if (hisi_hba->hw->slot_index_alloc) {
shost->can_queue = HISI_SAS_MAX_COMMANDS;

View File

@@ -4993,7 +4993,7 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
shost->transportt = hisi_sas_stt;
shost->max_id = HISI_SAS_MAX_DEVICES;
shost->max_lun = ~0;
shost->max_channel = 1;
shost->max_channel = 0;
shost->max_cmd_len = HISI_SAS_MAX_CDB_LEN;
shost->can_queue = HISI_SAS_UNRESERVED_IPTT;
shost->cmd_per_lun = HISI_SAS_UNRESERVED_IPTT;

View File

@@ -4966,7 +4966,8 @@ static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt)
switch (mad_status) {
case IBMVFC_MAD_SUCCESS:
ibmvfc_dbg(vhost, "Discover Targets succeeded\n");
vhost->num_targets = be32_to_cpu(rsp->num_written);
vhost->num_targets = min_t(u32, be32_to_cpu(rsp->num_written),
max_targets);
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_ALLOC_TGTS);
break;
case IBMVFC_MAD_FAILED:

View File

@@ -12023,6 +12023,8 @@ lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
iounmap(phba->sli4_hba.conf_regs_memmap_p);
if (phba->sli4_hba.dpp_regs_memmap_p)
iounmap(phba->sli4_hba.dpp_regs_memmap_p);
if (phba->sli4_hba.dpp_regs_memmap_wc_p)
iounmap(phba->sli4_hba.dpp_regs_memmap_wc_p);
break;
case LPFC_SLI_INTF_IF_TYPE_1:
break;

View File

@@ -15970,6 +15970,32 @@ lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
return NULL;
}
static __maybe_unused void __iomem *
lpfc_dpp_wc_map(struct lpfc_hba *phba, uint8_t dpp_barset)
{
/* DPP region is supposed to cover 64-bit BAR2 */
if (dpp_barset != WQ_PCI_BAR_4_AND_5) {
lpfc_log_msg(phba, KERN_WARNING, LOG_INIT,
"3273 dpp_barset x%x != WQ_PCI_BAR_4_AND_5\n",
dpp_barset);
return NULL;
}
if (!phba->sli4_hba.dpp_regs_memmap_wc_p) {
void __iomem *dpp_map;
dpp_map = ioremap_wc(phba->pci_bar2_map,
pci_resource_len(phba->pcidev,
PCI_64BIT_BAR4));
if (dpp_map)
phba->sli4_hba.dpp_regs_memmap_wc_p = dpp_map;
}
return phba->sli4_hba.dpp_regs_memmap_wc_p;
}
/**
* lpfc_modify_hba_eq_delay - Modify Delay Multiplier on EQs
* @phba: HBA structure that EQs are on.
@@ -16933,9 +16959,6 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
uint8_t dpp_barset;
uint32_t dpp_offset;
uint8_t wq_create_version;
#ifdef CONFIG_X86
unsigned long pg_addr;
#endif
/* sanity check on queue memory */
if (!wq || !cq)
@@ -17121,14 +17144,15 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
#ifdef CONFIG_X86
/* Enable combined writes for DPP aperture */
pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK;
rc = set_memory_wc(pg_addr, 1);
if (rc) {
bar_memmap_p = lpfc_dpp_wc_map(phba, dpp_barset);
if (!bar_memmap_p) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3272 Cannot setup Combined "
"Write on WQ[%d] - disable DPP\n",
wq->queue_id);
phba->cfg_enable_dpp = 0;
} else {
wq->dpp_regaddr = bar_memmap_p + dpp_offset;
}
#else
phba->cfg_enable_dpp = 0;

View File

@@ -788,6 +788,9 @@ struct lpfc_sli4_hba {
void __iomem *dpp_regs_memmap_p; /* Kernel memory mapped address for
* dpp registers
*/
void __iomem *dpp_regs_memmap_wc_p;/* Kernel memory mapped address for
* dpp registers with write combining
*/
union {
struct {
/* IF Type 0, BAR 0 PCI cfg space reg mem map */

View File

@@ -1619,6 +1619,7 @@ static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc)
ioc_info(mrioc,
"successfully transitioned to %s state\n",
mpi3mr_iocstate_name(ioc_state));
mpi3mr_clear_reset_history(mrioc);
return 0;
}
ioc_status = readl(&mrioc->sysif_regs->ioc_status);
@@ -1638,6 +1639,15 @@ static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc)
elapsed_time_sec = jiffies_to_msecs(jiffies - start_time)/1000;
} while (elapsed_time_sec < mrioc->ready_timeout);
ioc_state = mpi3mr_get_iocstate(mrioc);
if (ioc_state == MRIOC_STATE_READY) {
ioc_info(mrioc,
"successfully transitioned to %s state after %llu seconds\n",
mpi3mr_iocstate_name(ioc_state), elapsed_time_sec);
mpi3mr_clear_reset_history(mrioc);
return 0;
}
out_failed:
elapsed_time_sec = jiffies_to_msecs(jiffies - start_time)/1000;
if ((retry < 2) && (elapsed_time_sec < (mrioc->ready_timeout - 60))) {
@@ -4828,22 +4838,26 @@ void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc)
}
for (i = 0; i < mrioc->num_queues; i++) {
mrioc->op_reply_qinfo[i].qid = 0;
mrioc->op_reply_qinfo[i].ci = 0;
mrioc->op_reply_qinfo[i].num_replies = 0;
mrioc->op_reply_qinfo[i].ephase = 0;
atomic_set(&mrioc->op_reply_qinfo[i].pend_ios, 0);
atomic_set(&mrioc->op_reply_qinfo[i].in_use, 0);
mpi3mr_memset_op_reply_q_buffers(mrioc, i);
if (mrioc->op_reply_qinfo) {
mrioc->op_reply_qinfo[i].qid = 0;
mrioc->op_reply_qinfo[i].ci = 0;
mrioc->op_reply_qinfo[i].num_replies = 0;
mrioc->op_reply_qinfo[i].ephase = 0;
atomic_set(&mrioc->op_reply_qinfo[i].pend_ios, 0);
atomic_set(&mrioc->op_reply_qinfo[i].in_use, 0);
mpi3mr_memset_op_reply_q_buffers(mrioc, i);
}
mrioc->req_qinfo[i].ci = 0;
mrioc->req_qinfo[i].pi = 0;
mrioc->req_qinfo[i].num_requests = 0;
mrioc->req_qinfo[i].qid = 0;
mrioc->req_qinfo[i].reply_qid = 0;
spin_lock_init(&mrioc->req_qinfo[i].q_lock);
mrioc->req_qinfo[i].last_full_host_tag = 0;
mpi3mr_memset_op_req_q_buffers(mrioc, i);
if (mrioc->req_qinfo) {
mrioc->req_qinfo[i].ci = 0;
mrioc->req_qinfo[i].pi = 0;
mrioc->req_qinfo[i].num_requests = 0;
mrioc->req_qinfo[i].qid = 0;
mrioc->req_qinfo[i].reply_qid = 0;
spin_lock_init(&mrioc->req_qinfo[i].q_lock);
mrioc->req_qinfo[i].last_full_host_tag = 0;
mpi3mr_memset_op_req_q_buffers(mrioc, i);
}
}
atomic_set(&mrioc->pend_large_data_sz, 0);

View File

@@ -525,8 +525,9 @@ int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags)
} else {
task->task_done(task);
}
rc = -ENODEV;
goto err_out;
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
pm8001_dbg(pm8001_ha, IO, "pm8001_task_exec device gone\n");
return 0;
}
ccb = pm8001_ccb_alloc(pm8001_ha, pm8001_dev, task);

View File

@@ -2751,7 +2751,6 @@ qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
if (!elsio->u.els_logo.els_logo_pyld) {
/* ref: INIT */
kref_put(&sp->cmd_kref, qla2x00_sp_release);
qla2x00_free_fcport(fcport);
return QLA_FUNCTION_FAILED;
}
@@ -2776,7 +2775,6 @@ qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
if (rval != QLA_SUCCESS) {
/* ref: INIT */
kref_put(&sp->cmd_kref, qla2x00_sp_release);
qla2x00_free_fcport(fcport);
return QLA_FUNCTION_FAILED;
}

View File

@@ -190,7 +190,7 @@ static struct {
{"IBM", "2076", NULL, BLIST_NO_VPD_SIZE},
{"IBM", "2105", NULL, BLIST_RETRY_HWERROR},
{"iomega", "jaz 1GB", "J.86", BLIST_NOTQ | BLIST_NOLUN},
{"IOMEGA", "ZIP", NULL, BLIST_NOTQ | BLIST_NOLUN},
{"IOMEGA", "ZIP", NULL, BLIST_NOTQ | BLIST_NOLUN | BLIST_SKIP_IO_HINTS},
{"IOMEGA", "Io20S *F", NULL, BLIST_KEY},
{"INSITE", "Floptical F*8I", NULL, BLIST_KEY},
{"INSITE", "I325VM", NULL, BLIST_KEY},

View File

@@ -360,11 +360,8 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
* default device queue depth to figure out sbitmap shift
* since we use this queue depth most of times.
*/
if (scsi_realloc_sdev_budget_map(sdev, depth)) {
put_device(&starget->dev);
kfree(sdev);
goto out;
}
if (scsi_realloc_sdev_budget_map(sdev, depth))
goto out_device_destroy;
scsi_change_queue_depth(sdev, depth);

View File

@@ -1734,7 +1734,7 @@ static int sas_user_scan(struct Scsi_Host *shost, uint channel,
break;
default:
if (channel < shost->max_channel) {
if (channel <= shost->max_channel) {
res = scsi_scan_host_selected(shost, channel, id, lun,
SCSI_SCAN_MANUAL);
} else {

View File

@@ -215,7 +215,7 @@ static unsigned char *ses_get_page2_descriptor(struct enclosure_device *edev,
unsigned char *type_ptr = ses_dev->page1_types;
unsigned char *desc_ptr = ses_dev->page2 + 8;
if (ses_recv_diag(sdev, 2, ses_dev->page2, ses_dev->page2_len) < 0)
if (ses_recv_diag(sdev, 2, ses_dev->page2, ses_dev->page2_len))
return NULL;
for (i = 0; i < ses_dev->page1_num_types; i++, type_ptr += 4) {
@@ -528,9 +528,8 @@ struct efd {
};
static int ses_enclosure_find_by_addr(struct enclosure_device *edev,
void *data)
struct efd *efd)
{
struct efd *efd = data;
int i;
struct ses_component *scomp;
@@ -683,7 +682,7 @@ static void ses_match_to_enclosure(struct enclosure_device *edev,
if (efd.addr) {
efd.dev = &sdev->sdev_gendev;
enclosure_for_each_device(ses_enclosure_find_by_addr, &efd);
ses_enclosure_find_by_addr(edev, &efd);
}
}

View File

@@ -42,8 +42,6 @@ struct vnic_dev {
struct vnic_devcmd_notify *notify;
struct vnic_devcmd_notify notify_copy;
dma_addr_t notify_pa;
u32 *linkstatus;
dma_addr_t linkstatus_pa;
struct vnic_stats *stats;
dma_addr_t stats_pa;
struct vnic_devcmd_fw_info *fw_info;
@@ -650,8 +648,6 @@ int svnic_dev_init(struct vnic_dev *vdev, int arg)
int svnic_dev_link_status(struct vnic_dev *vdev)
{
if (vdev->linkstatus)
return *vdev->linkstatus;
if (!vnic_dev_notify_ready(vdev))
return 0;
@@ -686,11 +682,6 @@ void svnic_dev_unregister(struct vnic_dev *vdev)
sizeof(struct vnic_devcmd_notify),
vdev->notify,
vdev->notify_pa);
if (vdev->linkstatus)
dma_free_coherent(&vdev->pdev->dev,
sizeof(u32),
vdev->linkstatus,
vdev->linkstatus_pa);
if (vdev->stats)
dma_free_coherent(&vdev->pdev->dev,
sizeof(struct vnic_stats),

View File

@@ -1856,8 +1856,9 @@ static enum scsi_qc_status storvsc_queuecommand(struct Scsi_Host *host,
cmd_request->payload_sz = payload_sz;
/* Invokes the vsc to start an IO */
ret = storvsc_do_io(dev, cmd_request, get_cpu());
put_cpu();
migrate_disable();
ret = storvsc_do_io(dev, cmd_request, smp_processor_id());
migrate_enable();
if (ret)
scsi_dma_unmap(scmnd);

View File

@@ -26,6 +26,7 @@
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/configfs.h>
#include <linux/blk-mq.h>
#include <scsi/scsi.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_host.h>
@@ -269,15 +270,27 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
}
static bool tcm_loop_flush_work_iter(struct request *rq, void *data)
{
struct scsi_cmnd *sc = blk_mq_rq_to_pdu(rq);
struct tcm_loop_cmd *tl_cmd = scsi_cmd_priv(sc);
struct se_cmd *se_cmd = &tl_cmd->tl_se_cmd;
flush_work(&se_cmd->work);
return true;
}
static int tcm_loop_target_reset(struct scsi_cmnd *sc)
{
struct tcm_loop_hba *tl_hba;
struct tcm_loop_tpg *tl_tpg;
struct Scsi_Host *sh = sc->device->host;
int ret;
/*
* Locate the tcm_loop_hba_t pointer
*/
tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
tl_hba = *(struct tcm_loop_hba **)shost_priv(sh);
if (!tl_hba) {
pr_err("Unable to perform device reset without active I_T Nexus\n");
return FAILED;
@@ -286,11 +299,38 @@ static int tcm_loop_target_reset(struct scsi_cmnd *sc)
* Locate the tl_tpg pointer from TargetID in sc->device->id
*/
tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
if (tl_tpg) {
tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
return SUCCESS;
}
return FAILED;
if (!tl_tpg)
return FAILED;
/*
* Issue a LUN_RESET to drain all commands that the target core
* knows about. This handles commands not yet marked CMD_T_COMPLETE.
*/
ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun, 0, TMR_LUN_RESET);
if (ret != TMR_FUNCTION_COMPLETE)
return FAILED;
/*
* Flush any deferred target core completion work that may still be
* queued. Commands that already had CMD_T_COMPLETE set before the TMR
* are skipped by the TMR drain, but their async completion work
* (transport_lun_remove_cmd percpu_ref_put, release_cmd scsi_done)
* may still be pending in target_completion_wq.
*
* The SCSI EH will reuse in-flight scsi_cmnd structures for recovery
* commands (e.g. TUR) immediately after this handler returns SUCCESS
* if deferred work is still pending, the memset in queuecommand would
* zero the se_cmd while the work accesses it, leaking the LUN
* percpu_ref and hanging configfs unlink forever.
*
* Use blk_mq_tagset_busy_iter() to find all started requests and
* flush_work() on each the same pattern used by mpi3mr, scsi_debug,
* and other SCSI drivers to drain outstanding commands during reset.
*/
blk_mq_tagset_busy_iter(&sh->tag_set, tcm_loop_flush_work_iter, NULL);
tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
return SUCCESS;
}
static const struct scsi_host_template tcm_loop_driver_template = {

View File

@@ -108,8 +108,8 @@ static ssize_t target_core_item_dbroot_store(struct config_item *item,
const char *page, size_t count)
{
ssize_t read_bytes;
struct file *fp;
ssize_t r = -EINVAL;
struct path path = {};
mutex_lock(&target_devices_lock);
if (target_devices) {
@@ -131,17 +131,14 @@ static ssize_t target_core_item_dbroot_store(struct config_item *item,
db_root_stage[read_bytes - 1] = '\0';
/* validate new db root before accepting it */
fp = filp_open(db_root_stage, O_RDONLY, 0);
if (IS_ERR(fp)) {
r = kern_path(db_root_stage, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &path);
if (r) {
pr_err("db_root: cannot open: %s\n", db_root_stage);
if (r == -ENOTDIR)
pr_err("db_root: not a directory: %s\n", db_root_stage);
goto unlock;
}
if (!S_ISDIR(file_inode(fp)->i_mode)) {
filp_close(fp, NULL);
pr_err("db_root: not a directory: %s\n", db_root_stage);
goto unlock;
}
filp_close(fp, NULL);
path_put(&path);
strscpy(db_root, db_root_stage);
pr_debug("Target_Core_ConfigFS: db_root set to %s\n", db_root);

View File

@@ -276,7 +276,7 @@ fd_execute_rw_aio(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
ssize_t len = 0;
int ret = 0, i;
aio_cmd = kmalloc_flex(*aio_cmd, bvecs, sgl_nents);
aio_cmd = kzalloc_flex(*aio_cmd, bvecs, sgl_nents);
if (!aio_cmd)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;

View File

@@ -24,6 +24,7 @@
#include <linux/pm_opp.h>
#include <linux/regulator/consumer.h>
#include <linux/sched/clock.h>
#include <linux/sizes.h>
#include <linux/iopoll.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_dbg.h>
@@ -513,8 +514,8 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba, struct scsi_cmnd *cmd,
if (hba->mcq_enabled) {
struct ufs_hw_queue *hwq = ufshcd_mcq_req_to_hwq(hba, rq);
hwq_id = hwq->id;
if (hwq)
hwq_id = hwq->id;
} else {
doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
}
@@ -4433,14 +4434,6 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
spin_unlock_irqrestore(hba->host->host_lock, flags);
mutex_unlock(&hba->uic_cmd_mutex);
/*
* If the h8 exit fails during the runtime resume process, it becomes
* stuck and cannot be recovered through the error handler. To fix
* this, use link recovery instead of the error handler.
*/
if (ret && hba->pm_op_in_progress)
ret = ufshcd_link_recovery(hba);
return ret;
}
@@ -5372,6 +5365,25 @@ static void ufshcd_lu_init(struct ufs_hba *hba, struct scsi_device *sdev)
hba->dev_info.rpmb_region_size[1] = desc_buf[RPMB_UNIT_DESC_PARAM_REGION1_SIZE];
hba->dev_info.rpmb_region_size[2] = desc_buf[RPMB_UNIT_DESC_PARAM_REGION2_SIZE];
hba->dev_info.rpmb_region_size[3] = desc_buf[RPMB_UNIT_DESC_PARAM_REGION3_SIZE];
if (hba->dev_info.wspecversion <= 0x0220) {
/*
* These older spec chips have only one RPMB region,
* sized between 128 kB minimum and 16 MB maximum.
* No per region size fields are provided (respective
* REGIONX_SIZE fields always contain zeros), so get
* it from the logical block count and size fields for
* compatibility
*
* (See JESD220C-2_2 Section 14.1.4.6
* RPMB Unit Descriptor,* offset 13h, 4 bytes)
*/
hba->dev_info.rpmb_region_size[0] =
(get_unaligned_be64(desc_buf
+ RPMB_UNIT_DESC_PARAM_LOGICAL_BLK_COUNT)
<< desc_buf[RPMB_UNIT_DESC_PARAM_LOGICAL_BLK_SIZE])
/ SZ_128K;
}
}
@@ -6089,6 +6101,7 @@ static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
hba->auto_bkops_enabled = false;
trace_ufshcd_auto_bkops_state(hba, "Disabled");
hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
hba->is_urgent_bkops_lvl_checked = false;
out:
return err;
@@ -6192,7 +6205,7 @@ static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba)
* impacted or critical. Handle these device by determining their urgent
* bkops status at runtime.
*/
if (curr_status < BKOPS_STATUS_PERF_IMPACT) {
if ((curr_status > BKOPS_STATUS_NO_OP) && (curr_status < BKOPS_STATUS_PERF_IMPACT)) {
dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n",
__func__, curr_status);
/* update the current status as the urgent bkops level */
@@ -7234,7 +7247,7 @@ static irqreturn_t ufshcd_handle_mcq_cq_events(struct ufs_hba *hba, bool reset_i
ret = ufshcd_vops_get_outstanding_cqs(hba, &outstanding_cqs);
if (ret)
outstanding_cqs = (1U << hba->nr_hw_queues) - 1;
outstanding_cqs = (1ULL << hba->nr_hw_queues) - 1;
/* Exclude the poll queues */
nr_queues = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL];
@@ -10226,6 +10239,7 @@ static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
}
flush_work(&hba->eeh_work);
cancel_delayed_work_sync(&hba->ufs_rtc_update_work);
ret = ufshcd_vops_suspend(hba, pm_op, PRE_CHANGE);
if (ret)
@@ -10280,7 +10294,6 @@ static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
if (ret)
goto set_link_active;
cancel_delayed_work_sync(&hba->ufs_rtc_update_work);
goto out;
set_link_active:
@@ -10352,7 +10365,15 @@ static int __ufshcd_wl_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
} else {
dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
__func__, ret);
goto vendor_suspend;
/*
* If the h8 exit fails during the runtime resume
* process, it becomes stuck and cannot be recovered
* through the error handler. To fix this, use link
* recovery instead of the error handler.
*/
ret = ufshcd_link_recovery(hba);
if (ret)
goto vendor_suspend;
}
} else if (ufshcd_is_link_off(hba)) {
/*