mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-16 02:01:18 -04:00
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI updates from James Bottomley: "Usual driver updates (ufs, lpfc, fnic, target, mpi3mr). The substantive core changes are adding a 'serial' sysfs attribute and getting sd to support > PAGE_SIZE sectors" * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (98 commits) scsi: target: Don't validate ignored fields in PROUT PREEMPT scsi: qla2xxx: Use nr_cpu_ids instead of NR_CPUS for qp_cpu_map allocation scsi: ufs: core: Disable timestamp for Kioxia THGJFJT0E25BAIP scsi: mpi3mr: Fix typo scsi: sd: fix missing put_disk() when device_add(&disk_dev) fails scsi: libsas: Delete unused to_dom_device() and to_dev_attr() scsi: storvsc: Handle PERSISTENT_RESERVE_IN truncation for Hyper-V vFC scsi: iscsi_tcp: Remove unneeded selections of CRYPTO and CRYPTO_MD5 scsi: lpfc: Update lpfc version to 15.0.0.0 scsi: lpfc: Add PCI ID support for LPe42100 series adapters scsi: lpfc: Introduce 128G link speed selection and support scsi: lpfc: Check ASIC_ID register to aid diagnostics during failed fw updates scsi: lpfc: Update construction of SGL when XPSGL is enabled scsi: lpfc: Remove deprecated PBDE feature scsi: lpfc: Add REG_VFI mailbox cmd error handling scsi: lpfc: Log MCQE contents for mbox commands with no context scsi: lpfc: Select mailbox rq_create cmd version based on SLI4 if_type scsi: lpfc: Break out of IRQ affinity assignment when mask reaches nr_cpu_ids scsi: ufs: core: Make the header files self-contained scsi: ufs: core: Remove an include directive from ufshcd-crypto.h ...
This commit is contained in:
@@ -1768,3 +1768,26 @@ Description:
|
||||
==================== ===========================
|
||||
|
||||
The attribute is read only.
|
||||
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/dme_qos_notification
|
||||
What: /sys/bus/platform/devices/*.ufs/dme_qos_notification
|
||||
Date: March 2026
|
||||
Contact: Can Guo <can.guo@oss.qualcomm.com>
|
||||
Description:
|
||||
This attribute reports and clears pending DME (Device Management
|
||||
Entity) Quality of Service (QoS) notifications. This attribute
|
||||
is a bitfield with the following bit assignments:
|
||||
|
||||
Bit Description
|
||||
=== ======================================
|
||||
0 DME QoS Monitor has been reset by host
|
||||
1 QoS from TX is detected
|
||||
2 QoS from RX is detected
|
||||
3 QoS from PA_INIT is detected
|
||||
|
||||
Reading this attribute returns the pending DME QoS notification
|
||||
bits. Writing '0' to this attribute clears pending DME QoS
|
||||
notification bits. Writing any non-zero value is invalid and
|
||||
will be rejected.
|
||||
|
||||
The attribute is read/write.
|
||||
|
||||
@@ -15,6 +15,7 @@ select:
|
||||
compatible:
|
||||
contains:
|
||||
enum:
|
||||
- qcom,milos-ufshc
|
||||
- qcom,msm8998-ufshc
|
||||
- qcom,qcs8300-ufshc
|
||||
- qcom,sa8775p-ufshc
|
||||
@@ -31,21 +32,28 @@ select:
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
items:
|
||||
- enum:
|
||||
- qcom,msm8998-ufshc
|
||||
- qcom,qcs8300-ufshc
|
||||
- qcom,sa8775p-ufshc
|
||||
- qcom,sc7180-ufshc
|
||||
- qcom,sc7280-ufshc
|
||||
- qcom,sc8180x-ufshc
|
||||
- qcom,sc8280xp-ufshc
|
||||
- qcom,sm8250-ufshc
|
||||
- qcom,sm8350-ufshc
|
||||
- qcom,sm8450-ufshc
|
||||
- qcom,sm8550-ufshc
|
||||
- const: qcom,ufshc
|
||||
- const: jedec,ufs-2.0
|
||||
oneOf:
|
||||
- items:
|
||||
- enum:
|
||||
- qcom,x1e80100-ufshc
|
||||
- const: qcom,sm8550-ufshc
|
||||
- const: qcom,ufshc
|
||||
- items:
|
||||
- enum:
|
||||
- qcom,milos-ufshc
|
||||
- qcom,msm8998-ufshc
|
||||
- qcom,qcs8300-ufshc
|
||||
- qcom,sa8775p-ufshc
|
||||
- qcom,sc7180-ufshc
|
||||
- qcom,sc7280-ufshc
|
||||
- qcom,sc8180x-ufshc
|
||||
- qcom,sc8280xp-ufshc
|
||||
- qcom,sm8250-ufshc
|
||||
- qcom,sm8350-ufshc
|
||||
- qcom,sm8450-ufshc
|
||||
- qcom,sm8550-ufshc
|
||||
- const: qcom,ufshc
|
||||
- const: jedec,ufs-2.0
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
||||
@@ -15,6 +15,7 @@ select:
|
||||
compatible:
|
||||
contains:
|
||||
enum:
|
||||
- qcom,eliza-ufshc
|
||||
- qcom,kaanapali-ufshc
|
||||
- qcom,sm8650-ufshc
|
||||
- qcom,sm8750-ufshc
|
||||
@@ -25,6 +26,7 @@ properties:
|
||||
compatible:
|
||||
items:
|
||||
- enum:
|
||||
- qcom,eliza-ufshc
|
||||
- qcom,kaanapali-ufshc
|
||||
- qcom,sm8650-ufshc
|
||||
- qcom,sm8750-ufshc
|
||||
@@ -66,6 +68,18 @@ required:
|
||||
|
||||
allOf:
|
||||
- $ref: qcom,ufs-common.yaml
|
||||
- if:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
enum:
|
||||
- qcom,eliza-ufshc
|
||||
then:
|
||||
properties:
|
||||
reg:
|
||||
minItems: 2
|
||||
reg-names:
|
||||
minItems: 2
|
||||
|
||||
unevaluatedProperties: false
|
||||
|
||||
|
||||
@@ -41,7 +41,7 @@ properties:
|
||||
maxItems: 1
|
||||
|
||||
resets:
|
||||
maxItems: 4
|
||||
maxItems: 5
|
||||
|
||||
reset-names:
|
||||
items:
|
||||
@@ -49,6 +49,7 @@ properties:
|
||||
- const: sys
|
||||
- const: ufs
|
||||
- const: grf
|
||||
- const: mphy
|
||||
|
||||
reset-gpios:
|
||||
maxItems: 1
|
||||
@@ -98,8 +99,8 @@ examples:
|
||||
interrupts = <GIC_SPI 361 IRQ_TYPE_LEVEL_HIGH>;
|
||||
power-domains = <&power RK3576_PD_USB>;
|
||||
resets = <&cru SRST_A_UFS_BIU>, <&cru SRST_A_UFS_SYS>, <&cru SRST_A_UFS>,
|
||||
<&cru SRST_P_UFS_GRF>;
|
||||
reset-names = "biu", "sys", "ufs", "grf";
|
||||
<&cru SRST_P_UFS_GRF>, <&cru SRST_MPHY_INIT>;
|
||||
reset-names = "biu", "sys", "ufs", "grf", "mphy";
|
||||
reset-gpios = <&gpio4 RK_PD0 GPIO_ACTIVE_LOW>;
|
||||
};
|
||||
};
|
||||
|
||||
@@ -3925,6 +3925,7 @@ static const struct target_core_fabric_ops srpt_template = {
|
||||
.tfc_wwn_attrs = srpt_wwn_attrs,
|
||||
.tfc_tpg_attrib_attrs = srpt_tpg_attrib_attrs,
|
||||
|
||||
.default_compl_type = TARGET_QUEUE_COMPL,
|
||||
.default_submit_type = TARGET_DIRECT_SUBMIT,
|
||||
.direct_submit_supp = 1,
|
||||
};
|
||||
|
||||
@@ -1632,8 +1632,8 @@ static bool __init blogic_rdconfig(struct blogic_adapter *adapter)
|
||||
/*
|
||||
Initialize the Host Adapter Full Model Name from the Model Name.
|
||||
*/
|
||||
strcpy(adapter->full_model, "BusLogic ");
|
||||
strcat(adapter->full_model, adapter->model);
|
||||
scnprintf(adapter->full_model, sizeof(adapter->full_model),
|
||||
"BusLogic %s", adapter->model);
|
||||
/*
|
||||
Select an appropriate value for the Tagged Queue Depth either from a
|
||||
BusLogic Driver Options specification, or based on whether this Host
|
||||
|
||||
@@ -304,8 +304,6 @@ config ISCSI_TCP
|
||||
tristate "iSCSI Initiator over TCP/IP"
|
||||
depends on SCSI && INET
|
||||
select CRC32
|
||||
select CRYPTO
|
||||
select CRYPTO_MD5
|
||||
select SCSI_ISCSI_ATTRS
|
||||
help
|
||||
The iSCSI Driver provides a host with the ability to access storage
|
||||
@@ -1151,6 +1149,7 @@ config SCSI_LPFC
|
||||
depends on NVME_TARGET_FC || NVME_TARGET_FC=n
|
||||
depends on NVME_FC || NVME_FC=n
|
||||
select CRC_T10DIF
|
||||
select CRC32
|
||||
select IRQ_POLL
|
||||
help
|
||||
This lpfc driver supports the Emulex LightPulse
|
||||
|
||||
@@ -82,7 +82,7 @@ extern int src_mode;
|
||||
extern int dst_mode;
|
||||
struct symbol;
|
||||
|
||||
void stop(const char *errstring, int err_code);
|
||||
void __attribute__((noreturn)) stop(const char *errstring, int err_code);
|
||||
void include_file(char *file_name, include_type type);
|
||||
void expand_macro(struct symbol *macro_symbol);
|
||||
struct instruction *seq_alloc(void);
|
||||
|
||||
@@ -1104,7 +1104,7 @@ conditional:
|
||||
last_scope = TAILQ_LAST(&scope_context->inner_scope,
|
||||
scope_tailq);
|
||||
if (last_scope == NULL
|
||||
|| last_scope->type == T_ELSE) {
|
||||
|| last_scope->type == (int)T_ELSE) {
|
||||
|
||||
stop("'else if' without leading 'if'", EX_DATAERR);
|
||||
/* NOTREACHED */
|
||||
|
||||
@@ -389,7 +389,7 @@ nop { return T_NOP; }
|
||||
char c;
|
||||
|
||||
yptr = yytext;
|
||||
while (c = *yptr++) {
|
||||
while ((c = *yptr++)) {
|
||||
/*
|
||||
* Strip carriage returns.
|
||||
*/
|
||||
|
||||
@@ -1612,6 +1612,7 @@ static const struct target_core_fabric_ops efct_lio_ops = {
|
||||
.sess_get_initiator_sid = NULL,
|
||||
.tfc_tpg_base_attrs = efct_lio_tpg_attrs,
|
||||
.tfc_tpg_attrib_attrs = efct_lio_tpg_attrib_attrs,
|
||||
.default_compl_type = TARGET_QUEUE_COMPL,
|
||||
.default_submit_type = TARGET_DIRECT_SUBMIT,
|
||||
.direct_submit_supp = 1,
|
||||
};
|
||||
@@ -1650,6 +1651,7 @@ static const struct target_core_fabric_ops efct_lio_npiv_ops = {
|
||||
.tfc_tpg_base_attrs = efct_lio_npiv_tpg_attrs,
|
||||
.tfc_tpg_attrib_attrs = efct_lio_npiv_tpg_attrib_attrs,
|
||||
|
||||
.default_compl_type = TARGET_QUEUE_COMPL,
|
||||
.default_submit_type = TARGET_DIRECT_SUBMIT,
|
||||
.direct_submit_supp = 1,
|
||||
};
|
||||
|
||||
@@ -101,11 +101,6 @@ static const char *translate_esas2r_event_level_to_kernel(const long level)
|
||||
}
|
||||
}
|
||||
|
||||
#pragma GCC diagnostic push
|
||||
#ifndef __clang__
|
||||
#pragma GCC diagnostic ignored "-Wsuggest-attribute=format"
|
||||
#endif
|
||||
|
||||
/*
|
||||
* the master logging function. this function will format the message as
|
||||
* outlined by the formatting string, the input device information and the
|
||||
@@ -118,10 +113,9 @@ static const char *translate_esas2r_event_level_to_kernel(const long level)
|
||||
*
|
||||
* @return 0 on success, or -1 if an error occurred.
|
||||
*/
|
||||
static int esas2r_log_master(const long level,
|
||||
const struct device *dev,
|
||||
const char *format,
|
||||
va_list args)
|
||||
static __printf(3, 0)
|
||||
int esas2r_log_master(const long level, const struct device *dev,
|
||||
const char *format, va_list args)
|
||||
{
|
||||
if (level <= event_log_level) {
|
||||
unsigned long flags = 0;
|
||||
@@ -175,8 +169,6 @@ static int esas2r_log_master(const long level,
|
||||
return 0;
|
||||
}
|
||||
|
||||
#pragma GCC diagnostic pop
|
||||
|
||||
/*
|
||||
* formats and logs a message to the system log.
|
||||
*
|
||||
|
||||
@@ -4613,7 +4613,7 @@ void fnic_fdls_disc_start(struct fnic_iport_s *iport)
|
||||
if (!iport->usefip) {
|
||||
if (iport->flags & FNIC_FIRST_LINK_UP) {
|
||||
spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
|
||||
fnic_scsi_fcpio_reset(iport->fnic);
|
||||
fnic_fcpio_reset(iport->fnic);
|
||||
spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
|
||||
|
||||
iport->flags &= ~FNIC_FIRST_LINK_UP;
|
||||
@@ -5072,7 +5072,7 @@ void fnic_fdls_link_down(struct fnic_iport_s *iport)
|
||||
iport->fabric.flags = 0;
|
||||
|
||||
spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
|
||||
fnic_scsi_fcpio_reset(iport->fnic);
|
||||
fnic_fcpio_reset(iport->fnic);
|
||||
spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
|
||||
list_for_each_entry_safe(tport, next, &iport->tport_list, links) {
|
||||
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
|
||||
|
||||
@@ -737,7 +737,7 @@ void fnic_work_on_fip_timer(struct work_struct *work)
|
||||
if (memcmp(iport->selected_fcf.fcf_mac, zmac, ETH_ALEN) != 0) {
|
||||
|
||||
if (iport->flags & FNIC_FIRST_LINK_UP) {
|
||||
fnic_scsi_fcpio_reset(iport->fnic);
|
||||
fnic_fcpio_reset(iport->fnic);
|
||||
iport->flags &= ~FNIC_FIRST_LINK_UP;
|
||||
}
|
||||
|
||||
|
||||
@@ -30,7 +30,7 @@
|
||||
|
||||
#define DRV_NAME "fnic"
|
||||
#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
|
||||
#define DRV_VERSION "1.8.0.2"
|
||||
#define DRV_VERSION "1.8.0.3"
|
||||
#define PFX DRV_NAME ": "
|
||||
#define DFX DRV_NAME "%d: "
|
||||
|
||||
@@ -438,6 +438,7 @@ struct fnic {
|
||||
struct list_head tx_queue;
|
||||
mempool_t *frame_pool;
|
||||
mempool_t *frame_elem_pool;
|
||||
mempool_t *frame_recv_pool;
|
||||
struct work_struct tport_work;
|
||||
struct list_head tport_event_list;
|
||||
|
||||
@@ -512,7 +513,6 @@ int fnic_host_reset(struct Scsi_Host *shost);
|
||||
void fnic_reset(struct Scsi_Host *shost);
|
||||
int fnic_issue_fc_host_lip(struct Scsi_Host *shost);
|
||||
void fnic_get_host_port_state(struct Scsi_Host *shost);
|
||||
void fnic_scsi_fcpio_reset(struct fnic *fnic);
|
||||
int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do, unsigned int cq_index);
|
||||
int fnic_wq_cmpl_handler(struct fnic *fnic, int);
|
||||
int fnic_flogi_reg_handler(struct fnic *fnic, u32);
|
||||
@@ -541,7 +541,8 @@ fnic_chk_state_flags_locked(struct fnic *fnic, unsigned long st_flags)
|
||||
}
|
||||
void __fnic_set_state_flags(struct fnic *, unsigned long, unsigned long);
|
||||
void fnic_dump_fchost_stats(struct Scsi_Host *, struct fc_host_statistics *);
|
||||
void fnic_free_txq(struct list_head *head);
|
||||
void fnic_free_txq(struct fnic *fnic);
|
||||
void fnic_free_rxq(struct fnic *fnic);
|
||||
int fnic_get_desc_by_devid(struct pci_dev *pdev, char **desc,
|
||||
char **subsys_desc);
|
||||
void fnic_fdls_link_status_change(struct fnic *fnic, int linkup);
|
||||
|
||||
@@ -291,7 +291,7 @@ void fnic_handle_frame(struct work_struct *work)
|
||||
if (fnic->stop_rx_link_events) {
|
||||
list_del(&cur_frame->links);
|
||||
spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
|
||||
kfree(cur_frame->fp);
|
||||
mempool_free(cur_frame->fp, fnic->frame_recv_pool);
|
||||
mempool_free(cur_frame, fnic->frame_elem_pool);
|
||||
return;
|
||||
}
|
||||
@@ -317,7 +317,7 @@ void fnic_handle_frame(struct work_struct *work)
|
||||
fnic_fdls_recv_frame(&fnic->iport, cur_frame->fp,
|
||||
cur_frame->frame_len, fchdr_offset);
|
||||
|
||||
kfree(cur_frame->fp);
|
||||
mempool_free(cur_frame->fp, fnic->frame_recv_pool);
|
||||
mempool_free(cur_frame, fnic->frame_elem_pool);
|
||||
}
|
||||
spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
|
||||
@@ -337,8 +337,8 @@ void fnic_handle_fip_frame(struct work_struct *work)
|
||||
if (fnic->stop_rx_link_events) {
|
||||
list_del(&cur_frame->links);
|
||||
spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
|
||||
kfree(cur_frame->fp);
|
||||
kfree(cur_frame);
|
||||
mempool_free(cur_frame->fp, fnic->frame_recv_pool);
|
||||
mempool_free(cur_frame, fnic->frame_elem_pool);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -355,8 +355,8 @@ void fnic_handle_fip_frame(struct work_struct *work)
|
||||
list_del(&cur_frame->links);
|
||||
|
||||
if (fdls_fip_recv_frame(fnic, cur_frame->fp)) {
|
||||
kfree(cur_frame->fp);
|
||||
kfree(cur_frame);
|
||||
mempool_free(cur_frame->fp, fnic->frame_recv_pool);
|
||||
mempool_free(cur_frame, fnic->frame_elem_pool);
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
|
||||
@@ -375,10 +375,10 @@ static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, void *fp)
|
||||
|
||||
eh = (struct ethhdr *) fp;
|
||||
if ((eh->h_proto == cpu_to_be16(ETH_P_FIP)) && (fnic->iport.usefip)) {
|
||||
fip_fr_elem = (struct fnic_frame_list *)
|
||||
kzalloc_obj(struct fnic_frame_list, GFP_ATOMIC);
|
||||
fip_fr_elem = mempool_alloc(fnic->frame_elem_pool, GFP_ATOMIC);
|
||||
if (!fip_fr_elem)
|
||||
return 0;
|
||||
memset(fip_fr_elem, 0, sizeof(struct fnic_frame_list));
|
||||
fip_fr_elem->fp = fp;
|
||||
spin_lock_irqsave(&fnic->fnic_lock, flags);
|
||||
list_add_tail(&fip_fr_elem->links, &fnic->fip_frame_queue);
|
||||
@@ -519,13 +519,13 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
|
||||
|
||||
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
|
||||
|
||||
frame_elem = mempool_alloc(fnic->frame_elem_pool,
|
||||
GFP_ATOMIC | __GFP_ZERO);
|
||||
frame_elem = mempool_alloc(fnic->frame_elem_pool, GFP_ATOMIC);
|
||||
if (!frame_elem) {
|
||||
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
|
||||
"Failed to allocate memory for frame elem");
|
||||
goto drop;
|
||||
}
|
||||
memset(frame_elem, 0, sizeof(struct fnic_frame_list));
|
||||
frame_elem->fp = fp;
|
||||
frame_elem->rx_ethhdr_stripped = ethhdr_stripped;
|
||||
frame_elem->frame_len = bytes_written;
|
||||
@@ -538,7 +538,7 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
|
||||
return;
|
||||
|
||||
drop:
|
||||
kfree(fp);
|
||||
mempool_free(fp, fnic->frame_recv_pool);
|
||||
}
|
||||
|
||||
static int fnic_rq_cmpl_handler_cont(struct vnic_dev *vdev,
|
||||
@@ -591,7 +591,7 @@ int fnic_alloc_rq_frame(struct vnic_rq *rq)
|
||||
int ret;
|
||||
|
||||
len = FNIC_FRAME_HT_ROOM;
|
||||
buf = kmalloc(len, GFP_ATOMIC);
|
||||
buf = mempool_alloc(fnic->frame_recv_pool, GFP_ATOMIC);
|
||||
if (!buf) {
|
||||
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
|
||||
"Unable to allocate RQ buffer of size: %d\n", len);
|
||||
@@ -609,7 +609,7 @@ int fnic_alloc_rq_frame(struct vnic_rq *rq)
|
||||
fnic_queue_rq_desc(rq, buf, pa, len);
|
||||
return 0;
|
||||
free_buf:
|
||||
kfree(buf);
|
||||
mempool_free(buf, fnic->frame_recv_pool);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -621,7 +621,7 @@ void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
|
||||
dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
|
||||
DMA_FROM_DEVICE);
|
||||
|
||||
kfree(rq_buf);
|
||||
mempool_free(rq_buf, fnic->frame_recv_pool);
|
||||
buf->os_buf = NULL;
|
||||
}
|
||||
|
||||
@@ -704,13 +704,13 @@ fdls_send_fcoe_frame(struct fnic *fnic, void *frame, int frame_size,
|
||||
*/
|
||||
if ((fnic->state != FNIC_IN_FC_MODE)
|
||||
&& (fnic->state != FNIC_IN_ETH_MODE)) {
|
||||
frame_elem = mempool_alloc(fnic->frame_elem_pool,
|
||||
GFP_ATOMIC | __GFP_ZERO);
|
||||
frame_elem = mempool_alloc(fnic->frame_elem_pool, GFP_ATOMIC);
|
||||
if (!frame_elem) {
|
||||
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
|
||||
"Failed to allocate memory for frame elem");
|
||||
return -ENOMEM;
|
||||
}
|
||||
memset(frame_elem, 0, sizeof(struct fnic_frame_list));
|
||||
|
||||
FNIC_FCS_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
|
||||
"Queueing FC frame: sid/did/type/oxid = 0x%x/0x%x/0x%x/0x%x\n",
|
||||
@@ -836,14 +836,34 @@ fnic_fdls_register_portid(struct fnic_iport_s *iport, u32 port_id,
|
||||
return 0;
|
||||
}
|
||||
|
||||
void fnic_free_txq(struct list_head *head)
|
||||
void fnic_free_txq(struct fnic *fnic)
|
||||
{
|
||||
struct fnic_frame_list *cur_frame, *next;
|
||||
|
||||
list_for_each_entry_safe(cur_frame, next, head, links) {
|
||||
list_for_each_entry_safe(cur_frame, next, &fnic->tx_queue, links) {
|
||||
list_del(&cur_frame->links);
|
||||
kfree(cur_frame->fp);
|
||||
kfree(cur_frame);
|
||||
mempool_free(cur_frame->fp, fnic->frame_pool);
|
||||
mempool_free(cur_frame, fnic->frame_elem_pool);
|
||||
}
|
||||
}
|
||||
|
||||
void fnic_free_rxq(struct fnic *fnic)
|
||||
{
|
||||
struct fnic_frame_list *cur_frame, *next;
|
||||
|
||||
list_for_each_entry_safe(cur_frame, next, &fnic->frame_queue, links) {
|
||||
list_del(&cur_frame->links);
|
||||
mempool_free(cur_frame->fp, fnic->frame_recv_pool);
|
||||
mempool_free(cur_frame, fnic->frame_elem_pool);
|
||||
}
|
||||
|
||||
if (fnic->config.flags & VFCF_FIP_CAPABLE) {
|
||||
list_for_each_entry_safe(cur_frame, next,
|
||||
&fnic->fip_frame_queue, links) {
|
||||
list_del(&cur_frame->links);
|
||||
mempool_free(cur_frame->fp, fnic->frame_recv_pool);
|
||||
mempool_free(cur_frame, fnic->frame_elem_pool);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -898,7 +918,7 @@ void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
|
||||
dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
kfree(buf->os_buf);
|
||||
mempool_free(buf->os_buf, fnic->frame_pool);
|
||||
buf->os_buf = NULL;
|
||||
}
|
||||
|
||||
@@ -1108,3 +1128,53 @@ void fnic_reset_work_handler(struct work_struct *work)
|
||||
spin_unlock_irqrestore(&reset_fnic_list_lock,
|
||||
reset_fnic_list_lock_flags);
|
||||
}
|
||||
|
||||
void fnic_fcpio_reset(struct fnic *fnic)
|
||||
{
|
||||
unsigned long flags;
|
||||
enum fnic_state old_state;
|
||||
struct fnic_iport_s *iport = &fnic->iport;
|
||||
DECLARE_COMPLETION_ONSTACK(fw_reset_done);
|
||||
int time_remain;
|
||||
|
||||
/* issue fw reset */
|
||||
spin_lock_irqsave(&fnic->fnic_lock, flags);
|
||||
if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) {
|
||||
/* fw reset is in progress, poll for its completion */
|
||||
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
|
||||
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
|
||||
"fnic is in unexpected state: %d for fw_reset\n",
|
||||
fnic->state);
|
||||
return;
|
||||
}
|
||||
|
||||
old_state = fnic->state;
|
||||
fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
|
||||
|
||||
fnic_update_mac_locked(fnic, iport->hwmac);
|
||||
fnic->fw_reset_done = &fw_reset_done;
|
||||
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
|
||||
|
||||
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
|
||||
"Issuing fw reset\n");
|
||||
if (fnic_fw_reset_handler(fnic)) {
|
||||
spin_lock_irqsave(&fnic->fnic_lock, flags);
|
||||
if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)
|
||||
fnic->state = old_state;
|
||||
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
|
||||
} else {
|
||||
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
|
||||
"Waiting for fw completion\n");
|
||||
time_remain = wait_for_completion_timeout(&fw_reset_done,
|
||||
msecs_to_jiffies(FNIC_FW_RESET_TIMEOUT));
|
||||
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
|
||||
"Woken up after fw completion timeout\n");
|
||||
if (time_remain == 0) {
|
||||
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
|
||||
"FW reset completion timed out after %d ms\n",
|
||||
FNIC_FW_RESET_TIMEOUT);
|
||||
}
|
||||
atomic64_inc(&fnic->fnic_stats.reset_stats.fw_reset_timeouts);
|
||||
}
|
||||
fnic->fw_reset_done = NULL;
|
||||
}
|
||||
|
||||
@@ -410,6 +410,7 @@ void fnic_fdls_add_tport(struct fnic_iport_s *iport,
|
||||
void fnic_fdls_remove_tport(struct fnic_iport_s *iport,
|
||||
struct fnic_tport_s *tport,
|
||||
unsigned long flags);
|
||||
void fnic_fcpio_reset(struct fnic *fnic);
|
||||
|
||||
/* fip.c */
|
||||
void fnic_fcoe_send_vlan_req(struct fnic *fnic);
|
||||
@@ -422,7 +423,6 @@ void fnic_handle_fip_timer(struct timer_list *t);
|
||||
extern void fdls_fabric_timer_callback(struct timer_list *t);
|
||||
|
||||
/* fnic_scsi.c */
|
||||
void fnic_scsi_fcpio_reset(struct fnic *fnic);
|
||||
extern void fdls_fabric_timer_callback(struct timer_list *t);
|
||||
void fnic_rport_exch_reset(struct fnic *fnic, u32 fcid);
|
||||
int fnic_fdls_register_portid(struct fnic_iport_s *iport, u32 port_id,
|
||||
|
||||
@@ -40,6 +40,7 @@ static struct kmem_cache *fnic_sgl_cache[FNIC_SGL_NUM_CACHES];
|
||||
static struct kmem_cache *fnic_io_req_cache;
|
||||
static struct kmem_cache *fdls_frame_cache;
|
||||
static struct kmem_cache *fdls_frame_elem_cache;
|
||||
static struct kmem_cache *fdls_frame_recv_cache;
|
||||
static LIST_HEAD(fnic_list);
|
||||
static DEFINE_SPINLOCK(fnic_list_lock);
|
||||
static DEFINE_IDA(fnic_ida);
|
||||
@@ -554,6 +555,7 @@ static int fnic_cleanup(struct fnic *fnic)
|
||||
mempool_destroy(fnic->io_req_pool);
|
||||
mempool_destroy(fnic->frame_pool);
|
||||
mempool_destroy(fnic->frame_elem_pool);
|
||||
mempool_destroy(fnic->frame_recv_pool);
|
||||
for (i = 0; i < FNIC_SGL_NUM_CACHES; i++)
|
||||
mempool_destroy(fnic->io_sgl_pool[i]);
|
||||
|
||||
@@ -928,6 +930,14 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
}
|
||||
fnic->frame_elem_pool = pool;
|
||||
|
||||
pool = mempool_create_slab_pool(FDLS_MIN_FRAMES,
|
||||
fdls_frame_recv_cache);
|
||||
if (!pool) {
|
||||
err = -ENOMEM;
|
||||
goto err_out_fdls_frame_recv_pool;
|
||||
}
|
||||
fnic->frame_recv_pool = pool;
|
||||
|
||||
/* setup vlan config, hw inserts vlan header */
|
||||
fnic->vlan_hw_insert = 1;
|
||||
fnic->vlan_id = 0;
|
||||
@@ -1085,6 +1095,8 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
}
|
||||
vnic_dev_notify_unset(fnic->vdev);
|
||||
err_out_fnic_notify_set:
|
||||
mempool_destroy(fnic->frame_recv_pool);
|
||||
err_out_fdls_frame_recv_pool:
|
||||
mempool_destroy(fnic->frame_elem_pool);
|
||||
err_out_fdls_frame_elem_pool:
|
||||
mempool_destroy(fnic->frame_pool);
|
||||
@@ -1157,7 +1169,6 @@ static void fnic_remove(struct pci_dev *pdev)
|
||||
timer_delete_sync(&fnic->enode_ka_timer);
|
||||
timer_delete_sync(&fnic->vn_ka_timer);
|
||||
|
||||
fnic_free_txq(&fnic->fip_frame_queue);
|
||||
fnic_fcoe_reset_vlans(fnic);
|
||||
}
|
||||
|
||||
@@ -1177,8 +1188,8 @@ static void fnic_remove(struct pci_dev *pdev)
|
||||
list_del(&fnic->list);
|
||||
spin_unlock_irqrestore(&fnic_list_lock, flags);
|
||||
|
||||
fnic_free_txq(&fnic->frame_queue);
|
||||
fnic_free_txq(&fnic->tx_queue);
|
||||
fnic_free_rxq(fnic);
|
||||
fnic_free_txq(fnic);
|
||||
|
||||
vnic_dev_notify_unset(fnic->vdev);
|
||||
fnic_free_intr(fnic);
|
||||
@@ -1287,6 +1298,15 @@ static int __init fnic_init_module(void)
|
||||
goto err_create_fdls_frame_cache_elem;
|
||||
}
|
||||
|
||||
fdls_frame_recv_cache = kmem_cache_create("fdls_frame_recv",
|
||||
FNIC_FRAME_HT_ROOM,
|
||||
0, SLAB_HWCACHE_ALIGN, NULL);
|
||||
if (!fdls_frame_recv_cache) {
|
||||
pr_err("fnic fdls frame recv cach create failed\n");
|
||||
err = -ENOMEM;
|
||||
goto err_create_fdls_frame_recv_cache;
|
||||
}
|
||||
|
||||
fnic_event_queue =
|
||||
alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, "fnic_event_wq");
|
||||
if (!fnic_event_queue) {
|
||||
@@ -1339,6 +1359,8 @@ static int __init fnic_init_module(void)
|
||||
if (pc_rscn_handling_feature_flag == PC_RSCN_HANDLING_FEATURE_ON)
|
||||
destroy_workqueue(reset_fnic_work_queue);
|
||||
err_create_reset_fnic_workq:
|
||||
kmem_cache_destroy(fdls_frame_recv_cache);
|
||||
err_create_fdls_frame_recv_cache:
|
||||
destroy_workqueue(fnic_event_queue);
|
||||
err_create_fnic_workq:
|
||||
kmem_cache_destroy(fdls_frame_elem_cache);
|
||||
|
||||
@@ -471,7 +471,6 @@ enum scsi_qc_status fnic_queuecommand(struct Scsi_Host *shost,
|
||||
int sg_count = 0;
|
||||
unsigned long flags = 0;
|
||||
unsigned long ptr;
|
||||
int io_lock_acquired = 0;
|
||||
uint16_t hwq = 0;
|
||||
struct fnic_tport_s *tport = NULL;
|
||||
struct rport_dd_data_s *rdd_data;
|
||||
@@ -636,7 +635,6 @@ enum scsi_qc_status fnic_queuecommand(struct Scsi_Host *shost,
|
||||
spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
|
||||
|
||||
/* initialize rest of io_req */
|
||||
io_lock_acquired = 1;
|
||||
io_req->port_id = rport->port_id;
|
||||
io_req->start_time = jiffies;
|
||||
fnic_priv(sc)->state = FNIC_IOREQ_CMD_PENDING;
|
||||
@@ -689,6 +687,9 @@ enum scsi_qc_status fnic_queuecommand(struct Scsi_Host *shost,
|
||||
/* REVISIT: Use per IO lock in the final code */
|
||||
fnic_priv(sc)->flags |= FNIC_IO_ISSUED;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
|
||||
|
||||
out:
|
||||
cmd_trace = ((u64)sc->cmnd[0] << 56 | (u64)sc->cmnd[7] << 40 |
|
||||
(u64)sc->cmnd[8] << 32 | (u64)sc->cmnd[2] << 24 |
|
||||
@@ -699,10 +700,6 @@ enum scsi_qc_status fnic_queuecommand(struct Scsi_Host *shost,
|
||||
mqtag, sc, io_req, sg_count, cmd_trace,
|
||||
fnic_flags_and_state(sc));
|
||||
|
||||
/* if only we issued IO, will we have the io lock */
|
||||
if (io_lock_acquired)
|
||||
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
|
||||
|
||||
atomic_dec(&fnic->in_flight);
|
||||
atomic_dec(&tport->in_flight);
|
||||
|
||||
@@ -777,7 +774,7 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
|
||||
*/
|
||||
if (ret) {
|
||||
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
|
||||
fnic_free_txq(&fnic->tx_queue);
|
||||
fnic_free_txq(fnic);
|
||||
goto reset_cmpl_handler_end;
|
||||
}
|
||||
|
||||
@@ -1972,15 +1969,11 @@ void fnic_scsi_unload(struct fnic *fnic)
|
||||
*/
|
||||
spin_lock_irqsave(&fnic->fnic_lock, flags);
|
||||
fnic->iport.state = FNIC_IPORT_STATE_LINK_WAIT;
|
||||
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
|
||||
|
||||
if (fdls_get_state(&fnic->iport.fabric) != FDLS_STATE_INIT)
|
||||
fnic_scsi_fcpio_reset(fnic);
|
||||
|
||||
spin_lock_irqsave(&fnic->fnic_lock, flags);
|
||||
fnic->in_remove = 1;
|
||||
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
|
||||
|
||||
fnic_fcpio_reset(fnic);
|
||||
|
||||
fnic_flush_tport_event_list(fnic);
|
||||
fnic_delete_fcp_tports(fnic);
|
||||
}
|
||||
@@ -3040,54 +3033,3 @@ int fnic_eh_host_reset_handler(struct scsi_cmnd *sc)
|
||||
ret = fnic_host_reset(shost);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
void fnic_scsi_fcpio_reset(struct fnic *fnic)
|
||||
{
|
||||
unsigned long flags;
|
||||
enum fnic_state old_state;
|
||||
struct fnic_iport_s *iport = &fnic->iport;
|
||||
DECLARE_COMPLETION_ONSTACK(fw_reset_done);
|
||||
int time_remain;
|
||||
|
||||
/* issue fw reset */
|
||||
spin_lock_irqsave(&fnic->fnic_lock, flags);
|
||||
if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) {
|
||||
/* fw reset is in progress, poll for its completion */
|
||||
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
|
||||
FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
|
||||
"fnic is in unexpected state: %d for fw_reset\n",
|
||||
fnic->state);
|
||||
return;
|
||||
}
|
||||
|
||||
old_state = fnic->state;
|
||||
fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
|
||||
|
||||
fnic_update_mac_locked(fnic, iport->hwmac);
|
||||
fnic->fw_reset_done = &fw_reset_done;
|
||||
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
|
||||
|
||||
FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
|
||||
"Issuing fw reset\n");
|
||||
if (fnic_fw_reset_handler(fnic)) {
|
||||
spin_lock_irqsave(&fnic->fnic_lock, flags);
|
||||
if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)
|
||||
fnic->state = old_state;
|
||||
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
|
||||
} else {
|
||||
FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
|
||||
"Waiting for fw completion\n");
|
||||
time_remain = wait_for_completion_timeout(&fw_reset_done,
|
||||
msecs_to_jiffies(FNIC_FW_RESET_TIMEOUT));
|
||||
FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
|
||||
"Woken up after fw completion timeout\n");
|
||||
if (time_remain == 0) {
|
||||
FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
|
||||
"FW reset completion timed out after %d ms)\n",
|
||||
FNIC_FW_RESET_TIMEOUT);
|
||||
}
|
||||
atomic64_inc(&fnic->fnic_stats.reset_stats.fw_reset_timeouts);
|
||||
}
|
||||
fnic->fw_reset_done = NULL;
|
||||
}
|
||||
|
||||
@@ -1326,7 +1326,7 @@ static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
|
||||
|
||||
if (sts && !wait_for_completion_timeout(&completion,
|
||||
HISI_SAS_WAIT_PHYUP_TIMEOUT)) {
|
||||
dev_warn(dev, "phy%d wait phyup timed out for func %d\n",
|
||||
dev_warn(dev, "phy%d wait phyup timed out for func %u\n",
|
||||
phy_no, func);
|
||||
if (phy->in_reset)
|
||||
ret = -ETIMEDOUT;
|
||||
|
||||
@@ -432,7 +432,7 @@
|
||||
#define CMPLT_HDR_IPTT_OFF 0
|
||||
#define CMPLT_HDR_IPTT_MSK (0xffff << CMPLT_HDR_IPTT_OFF)
|
||||
#define CMPLT_HDR_DEV_ID_OFF 16
|
||||
#define CMPLT_HDR_DEV_ID_MSK (0xffff << CMPLT_HDR_DEV_ID_OFF)
|
||||
#define CMPLT_HDR_DEV_ID_MSK (0xffffU << CMPLT_HDR_DEV_ID_OFF)
|
||||
/* dw3 */
|
||||
#define SATA_DISK_IN_ERROR_STATUS_OFF 8
|
||||
#define SATA_DISK_IN_ERROR_STATUS_MSK (0x1 << SATA_DISK_IN_ERROR_STATUS_OFF)
|
||||
@@ -444,7 +444,7 @@
|
||||
#define FIS_ATA_STATUS_ERR_OFF 18
|
||||
#define FIS_ATA_STATUS_ERR_MSK (0x1 << FIS_ATA_STATUS_ERR_OFF)
|
||||
#define FIS_TYPE_SDB_OFF 31
|
||||
#define FIS_TYPE_SDB_MSK (0x1 << FIS_TYPE_SDB_OFF)
|
||||
#define FIS_TYPE_SDB_MSK (0x1U << FIS_TYPE_SDB_OFF)
|
||||
|
||||
/* ITCT header */
|
||||
/* qw0 */
|
||||
@@ -896,7 +896,7 @@ static void setup_itct_v3_hw(struct hisi_hba *hisi_hba,
|
||||
qw0 = HISI_SAS_DEV_TYPE_SATA << ITCT_HDR_DEV_TYPE_OFF;
|
||||
break;
|
||||
default:
|
||||
dev_warn(dev, "setup itct: unsupported dev type (%d)\n",
|
||||
dev_warn(dev, "setup itct: unsupported dev type (%u)\n",
|
||||
sas_dev->dev_type);
|
||||
}
|
||||
|
||||
@@ -2847,7 +2847,7 @@ static void wait_cmds_complete_timeout_v3_hw(struct hisi_hba *hisi_hba,
|
||||
static ssize_t intr_conv_v3_hw_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return scnprintf(buf, PAGE_SIZE, "%u\n", hisi_sas_intr_conv);
|
||||
return scnprintf(buf, PAGE_SIZE, "%d\n", hisi_sas_intr_conv);
|
||||
}
|
||||
static DEVICE_ATTR_RO(intr_conv_v3_hw);
|
||||
|
||||
@@ -3293,7 +3293,7 @@ static int debugfs_set_bist_v3_hw(struct hisi_hba *hisi_hba, bool enable)
|
||||
u32 *fix_code = &hisi_hba->debugfs_bist_fixed_code[0];
|
||||
struct device *dev = hisi_hba->dev;
|
||||
|
||||
dev_info(dev, "BIST info:phy%d link_rate=%d code_mode=%d path_mode=%d ffe={0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x} fixed_code={0x%x, 0x%x}\n",
|
||||
dev_info(dev, "BIST info:phy%u link_rate=%u code_mode=%u path_mode=%u ffe={0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x} fixed_code={0x%x, 0x%x}\n",
|
||||
phy_no, linkrate, code_mode, path_mode,
|
||||
ffe[FFE_SAS_1_5_GBPS], ffe[FFE_SAS_3_0_GBPS],
|
||||
ffe[FFE_SAS_6_0_GBPS], ffe[FFE_SAS_12_0_GBPS],
|
||||
@@ -3650,7 +3650,7 @@ static void debugfs_print_reg_v3_hw(u32 *regs_val, struct seq_file *s,
|
||||
int i;
|
||||
|
||||
for (i = 0; i < reg->count; i++) {
|
||||
int off = i * HISI_SAS_REG_MEM_SIZE;
|
||||
u32 off = i * HISI_SAS_REG_MEM_SIZE;
|
||||
const char *name;
|
||||
|
||||
name = debugfs_to_reg_name_v3_hw(off, reg->base_off,
|
||||
|
||||
@@ -164,7 +164,7 @@ struct bmic_controller_parameters {
|
||||
struct ctlr_info {
|
||||
unsigned int *reply_map;
|
||||
int ctlr;
|
||||
char devname[8];
|
||||
char devname[16];
|
||||
char *product_name;
|
||||
struct pci_dev *pdev;
|
||||
u32 board_id;
|
||||
@@ -255,7 +255,7 @@ struct ctlr_info {
|
||||
int remove_in_progress;
|
||||
/* Address of h->q[x] is passed to intr handler to know which queue */
|
||||
u8 q[MAX_REPLY_QUEUES];
|
||||
char intrname[MAX_REPLY_QUEUES][16]; /* "hpsa0-msix00" names */
|
||||
char intrname[MAX_REPLY_QUEUES][32]; /* controller and IRQ names */
|
||||
u32 TMFSupportFlags; /* cache what task mgmt funcs are supported. */
|
||||
#define HPSATMF_BITS_SUPPORTED (1 << 0)
|
||||
#define HPSATMF_PHYS_LUN_RESET (1 << 1)
|
||||
|
||||
@@ -3968,6 +3968,7 @@ static const struct target_core_fabric_ops ibmvscsis_ops = {
|
||||
|
||||
.tfc_wwn_attrs = ibmvscsis_wwn_attrs,
|
||||
|
||||
.default_compl_type = TARGET_QUEUE_COMPL,
|
||||
.default_submit_type = TARGET_DIRECT_SUBMIT,
|
||||
.direct_submit_supp = 1,
|
||||
};
|
||||
|
||||
@@ -267,7 +267,7 @@ iscsi_sw_tcp_conn_restore_callbacks(struct iscsi_conn *conn)
|
||||
struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
|
||||
struct sock *sk = tcp_sw_conn->sock->sk;
|
||||
|
||||
/* restore socket callbacks, see also: iscsi_conn_set_callbacks() */
|
||||
/* restore socket callbacks, see also: iscsi_sw_tcp_conn_set_callbacks() */
|
||||
write_lock_bh(&sk->sk_callback_lock);
|
||||
sk->sk_user_data = NULL;
|
||||
sk->sk_data_ready = tcp_sw_conn->old_data_ready;
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*******************************************************************
|
||||
* This file is part of the Emulex Linux Device Driver for *
|
||||
* Fibre Channel Host Bus Adapters. *
|
||||
* Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term *
|
||||
* Copyright (C) 2017-2026 Broadcom. All Rights Reserved. The term *
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
|
||||
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
|
||||
* EMULEX and SLI are trademarks of Emulex. *
|
||||
@@ -552,8 +552,6 @@ struct lpfc_cgn_info {
|
||||
);
|
||||
|
||||
__le32 cgn_info_crc;
|
||||
#define LPFC_CGN_CRC32_MAGIC_NUMBER 0x1EDC6F41
|
||||
#define LPFC_CGN_CRC32_SEED 0xFFFFFFFF
|
||||
};
|
||||
|
||||
#define LPFC_CGN_INFO_SZ (sizeof(struct lpfc_cgn_info) - \
|
||||
@@ -812,9 +810,10 @@ struct unsol_rcv_ct_ctx {
|
||||
#define LPFC_USER_LINK_SPEED_16G 16 /* 16 Gigabaud */
|
||||
#define LPFC_USER_LINK_SPEED_32G 32 /* 32 Gigabaud */
|
||||
#define LPFC_USER_LINK_SPEED_64G 64 /* 64 Gigabaud */
|
||||
#define LPFC_USER_LINK_SPEED_MAX LPFC_USER_LINK_SPEED_64G
|
||||
#define LPFC_USER_LINK_SPEED_128G 128 /* 128 Gigabaud */
|
||||
#define LPFC_USER_LINK_SPEED_MAX LPFC_USER_LINK_SPEED_128G
|
||||
|
||||
#define LPFC_LINK_SPEED_STRING "0, 1, 2, 4, 8, 10, 16, 32, 64"
|
||||
#define LPFC_LINK_SPEED_STRING "0, 1, 2, 4, 8, 10, 16, 32, 64, 128"
|
||||
|
||||
enum nemb_type {
|
||||
nemb_mse = 1,
|
||||
@@ -1017,7 +1016,6 @@ struct lpfc_hba {
|
||||
#define LPFC_SLI3_CRP_ENABLED 0x08
|
||||
#define LPFC_SLI3_BG_ENABLED 0x20
|
||||
#define LPFC_SLI3_DSS_ENABLED 0x40
|
||||
#define LPFC_SLI4_PERFH_ENABLED 0x80
|
||||
#define LPFC_SLI4_PHWQ_ENABLED 0x100
|
||||
uint32_t iocb_cmd_size;
|
||||
uint32_t iocb_rsp_size;
|
||||
@@ -1190,7 +1188,6 @@ struct lpfc_hba {
|
||||
uint32_t cfg_ras_fwlog_func;
|
||||
uint32_t cfg_enable_bbcr; /* Enable BB Credit Recovery */
|
||||
uint32_t cfg_enable_dpp; /* Enable Direct Packet Push */
|
||||
uint32_t cfg_enable_pbde;
|
||||
uint32_t cfg_enable_mi;
|
||||
struct nvmet_fc_target_port *targetport;
|
||||
lpfc_vpd_t vpd; /* vital product data */
|
||||
@@ -1667,8 +1664,9 @@ lpfc_phba_elsring(struct lpfc_hba *phba)
|
||||
* @mask: Pointer to phba's cpumask member.
|
||||
* @start: starting cpu index
|
||||
*
|
||||
* Note: If no valid cpu found, then nr_cpu_ids is returned.
|
||||
* Returns: next online CPU in @mask on success
|
||||
*
|
||||
* Note: If no valid cpu found, then nr_cpu_ids is returned.
|
||||
**/
|
||||
static __always_inline unsigned int
|
||||
lpfc_next_online_cpu(const struct cpumask *mask, unsigned int start)
|
||||
@@ -1680,8 +1678,9 @@ lpfc_next_online_cpu(const struct cpumask *mask, unsigned int start)
|
||||
* lpfc_next_present_cpu - Finds next present CPU after n
|
||||
* @n: the cpu prior to search
|
||||
*
|
||||
* Note: If no next present cpu, then fallback to first present cpu.
|
||||
* Returns: next present CPU after CPU @n
|
||||
*
|
||||
* Note: If no next present cpu, then fallback to first present cpu.
|
||||
**/
|
||||
static __always_inline unsigned int lpfc_next_present_cpu(int n)
|
||||
{
|
||||
@@ -1691,7 +1690,7 @@ static __always_inline unsigned int lpfc_next_present_cpu(int n)
|
||||
/**
|
||||
* lpfc_sli4_mod_hba_eq_delay - update EQ delay
|
||||
* @phba: Pointer to HBA context object.
|
||||
* @q: The Event Queue to update.
|
||||
* @eq: The Event Queue to update.
|
||||
* @delay: The delay value (in us) to be written.
|
||||
*
|
||||
**/
|
||||
@@ -1753,8 +1752,9 @@ static const char *routine(enum enum_name table_key) \
|
||||
* Pr Tag 1 0 N
|
||||
* Pr Tag 1 1 Y
|
||||
* Pr Tag 2 * Y
|
||||
---------------------------------------------------
|
||||
* ---------------------------------------------------
|
||||
*
|
||||
* Returns: whether VMID is enabled
|
||||
**/
|
||||
static inline int lpfc_is_vmid_enabled(struct lpfc_hba *phba)
|
||||
{
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
/*******************************************************************
|
||||
* This file is part of the Emulex Linux Device Driver for *
|
||||
* Fibre Channel Host Bus Adapters. *
|
||||
* Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term *
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
|
||||
* Copyright (C) 2017-2026 Broadcom. All Rights Reserved. The term *
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
|
||||
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
|
||||
* EMULEX and SLI are trademarks of Emulex. *
|
||||
* www.broadcom.com *
|
||||
@@ -4415,7 +4415,7 @@ static DEVICE_ATTR_RO(lpfc_static_vport);
|
||||
/*
|
||||
# lpfc_link_speed: Link speed selection for initializing the Fibre Channel
|
||||
# connection.
|
||||
# Value range is [0,16]. Default value is 0.
|
||||
# Value range is [0,128]. Default value is 0.
|
||||
*/
|
||||
/**
|
||||
* lpfc_link_speed_store - Set the adapters link speed
|
||||
@@ -4468,14 +4468,15 @@ lpfc_link_speed_store(struct device *dev, struct device_attribute *attr,
|
||||
"3055 lpfc_link_speed changed from %d to %d %s\n",
|
||||
phba->cfg_link_speed, val, nolip ? "(nolip)" : "(lip)");
|
||||
|
||||
if (((val == LPFC_USER_LINK_SPEED_1G) && !(phba->lmt & LMT_1Gb)) ||
|
||||
((val == LPFC_USER_LINK_SPEED_2G) && !(phba->lmt & LMT_2Gb)) ||
|
||||
((val == LPFC_USER_LINK_SPEED_4G) && !(phba->lmt & LMT_4Gb)) ||
|
||||
((val == LPFC_USER_LINK_SPEED_8G) && !(phba->lmt & LMT_8Gb)) ||
|
||||
((val == LPFC_USER_LINK_SPEED_10G) && !(phba->lmt & LMT_10Gb)) ||
|
||||
((val == LPFC_USER_LINK_SPEED_16G) && !(phba->lmt & LMT_16Gb)) ||
|
||||
((val == LPFC_USER_LINK_SPEED_32G) && !(phba->lmt & LMT_32Gb)) ||
|
||||
((val == LPFC_USER_LINK_SPEED_64G) && !(phba->lmt & LMT_64Gb))) {
|
||||
if ((val == LPFC_USER_LINK_SPEED_1G && !(phba->lmt & LMT_1Gb)) ||
|
||||
(val == LPFC_USER_LINK_SPEED_2G && !(phba->lmt & LMT_2Gb)) ||
|
||||
(val == LPFC_USER_LINK_SPEED_4G && !(phba->lmt & LMT_4Gb)) ||
|
||||
(val == LPFC_USER_LINK_SPEED_8G && !(phba->lmt & LMT_8Gb)) ||
|
||||
(val == LPFC_USER_LINK_SPEED_10G && !(phba->lmt & LMT_10Gb)) ||
|
||||
(val == LPFC_USER_LINK_SPEED_16G && !(phba->lmt & LMT_16Gb)) ||
|
||||
(val == LPFC_USER_LINK_SPEED_32G && !(phba->lmt & LMT_32Gb)) ||
|
||||
(val == LPFC_USER_LINK_SPEED_64G && !(phba->lmt & LMT_64Gb)) ||
|
||||
(val == LPFC_USER_LINK_SPEED_128G && !(phba->lmt & LMT_128Gb))) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"2879 lpfc_link_speed attribute cannot be set "
|
||||
"to %d. Speed is not supported by this port.\n",
|
||||
@@ -4500,6 +4501,7 @@ lpfc_link_speed_store(struct device *dev, struct device_attribute *attr,
|
||||
case LPFC_USER_LINK_SPEED_16G:
|
||||
case LPFC_USER_LINK_SPEED_32G:
|
||||
case LPFC_USER_LINK_SPEED_64G:
|
||||
case LPFC_USER_LINK_SPEED_128G:
|
||||
prev_val = phba->cfg_link_speed;
|
||||
phba->cfg_link_speed = val;
|
||||
if (nolip)
|
||||
@@ -4564,6 +4566,7 @@ lpfc_link_speed_init(struct lpfc_hba *phba, int val)
|
||||
case LPFC_USER_LINK_SPEED_16G:
|
||||
case LPFC_USER_LINK_SPEED_32G:
|
||||
case LPFC_USER_LINK_SPEED_64G:
|
||||
case LPFC_USER_LINK_SPEED_128G:
|
||||
phba->cfg_link_speed = val;
|
||||
return 0;
|
||||
default:
|
||||
@@ -7467,8 +7470,6 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
|
||||
|
||||
phba->cfg_auto_imax = (phba->cfg_fcp_imax) ? 0 : 1;
|
||||
|
||||
phba->cfg_enable_pbde = 0;
|
||||
|
||||
/* A value of 0 means use the number of CPUs found in the system */
|
||||
if (phba->cfg_hdw_queue == 0)
|
||||
phba->cfg_hdw_queue = phba->sli4_hba.num_present_cpu;
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*******************************************************************
|
||||
* This file is part of the Emulex Linux Device Driver for *
|
||||
* Fibre Channel Host Bus Adapters. *
|
||||
* Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
|
||||
* Copyright (C) 2017-2026 Broadcom. All Rights Reserved. The term *
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
|
||||
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
|
||||
* EMULEX and SLI are trademarks of Emulex. *
|
||||
@@ -86,7 +86,7 @@ void lpfc_cmf_stop(struct lpfc_hba *phba);
|
||||
void lpfc_init_congestion_stat(struct lpfc_hba *phba);
|
||||
void lpfc_init_congestion_buf(struct lpfc_hba *phba);
|
||||
int lpfc_sli4_cgn_params_read(struct lpfc_hba *phba);
|
||||
uint32_t lpfc_cgn_calc_crc32(void *bufp, uint32_t sz, uint32_t seed);
|
||||
uint32_t lpfc_cgn_calc_crc32(const void *data, size_t size);
|
||||
int lpfc_config_cgn_signal(struct lpfc_hba *phba);
|
||||
int lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total);
|
||||
void lpfc_cgn_dump_rxmonitor(struct lpfc_hba *phba);
|
||||
@@ -660,6 +660,7 @@ void lpfc_wqe_cmd_template(void);
|
||||
void lpfc_nvmet_cmd_template(void);
|
||||
void lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
|
||||
uint32_t stat, uint32_t param);
|
||||
void lpfc_nvme_flush_abts_list(struct lpfc_hba *phba);
|
||||
void lpfc_nvmels_flush_cmd(struct lpfc_hba *phba);
|
||||
extern int lpfc_enable_nvmet_cnt;
|
||||
extern unsigned long long lpfc_enable_nvmet[];
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*******************************************************************
|
||||
* This file is part of the Emulex Linux Device Driver for *
|
||||
* Fibre Channel Host Bus Adapters. *
|
||||
* Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term *
|
||||
* Copyright (C) 2017-2026 Broadcom. All Rights Reserved. The term *
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
|
||||
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
|
||||
* EMULEX and SLI are trademarks of Emulex. *
|
||||
@@ -2427,13 +2427,14 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
||||
|
||||
/* CGN is only for the physical port, no vports */
|
||||
if (lpfc_fdmi_cmd(vport, ndlp, cmd,
|
||||
LPFC_FDMI_VENDOR_ATTR_mi) == 0)
|
||||
LPFC_FDMI_VENDOR_ATTR_mi) == 0) {
|
||||
phba->link_flag |= LS_CT_VEN_RPA;
|
||||
lpfc_printf_log(phba, KERN_INFO,
|
||||
lpfc_printf_log(phba, KERN_INFO,
|
||||
LOG_DISCOVERY | LOG_ELS,
|
||||
"6458 Send MI FDMI:%x Flag x%x\n",
|
||||
phba->sli4_hba.pc_sli4_params.mi_ver,
|
||||
phba->link_flag);
|
||||
}
|
||||
} else {
|
||||
lpfc_printf_log(phba, KERN_INFO,
|
||||
LOG_DISCOVERY | LOG_ELS,
|
||||
@@ -3214,7 +3215,7 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
struct lpfc_iocbq *rspiocb);
|
||||
|
||||
if (!ndlp)
|
||||
return 0;
|
||||
goto fdmi_cmd_exit;
|
||||
|
||||
cmpl = lpfc_cmpl_ct_disc_fdmi; /* called from discovery */
|
||||
|
||||
@@ -3320,7 +3321,7 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
if (vport->port_type != LPFC_PHYSICAL_PORT) {
|
||||
ndlp = lpfc_findnode_did(phba->pport, FDMI_DID);
|
||||
if (!ndlp)
|
||||
return 0;
|
||||
goto fdmi_cmd_free_rspvirt;
|
||||
}
|
||||
fallthrough;
|
||||
case SLI_MGMT_RPA:
|
||||
@@ -3396,7 +3397,7 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
if (vport->port_type != LPFC_PHYSICAL_PORT) {
|
||||
ndlp = lpfc_findnode_did(phba->pport, FDMI_DID);
|
||||
if (!ndlp)
|
||||
return 0;
|
||||
goto fdmi_cmd_free_rspvirt;
|
||||
}
|
||||
fallthrough;
|
||||
case SLI_MGMT_DPA:
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*******************************************************************
|
||||
* This file is part of the Emulex Linux Device Driver for *
|
||||
* Fibre Channel Host Bus Adapters. *
|
||||
* Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term *
|
||||
* Copyright (C) 2017-2026 Broadcom. All Rights Reserved. The term *
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
|
||||
* Copyright (C) 2004-2013 Emulex. All rights reserved. *
|
||||
* EMULEX and SLI are trademarks of Emulex. *
|
||||
@@ -137,7 +137,8 @@ struct lpfc_nodelist {
|
||||
uint16_t nlp_maxframe; /* Max RCV frame size */
|
||||
uint8_t nlp_class_sup; /* Supported Classes */
|
||||
uint8_t nlp_retry; /* used for ELS retries */
|
||||
uint8_t nlp_fcp_info; /* class info, bits 0-3 */
|
||||
uint8_t nlp_fcp_info; /* class info, bits 0-2 */
|
||||
#define NLP_FCP_CLASS_MASK 0x07 /* class info bitmask */
|
||||
#define NLP_FCP_2_DEVICE 0x10 /* FCP-2 device */
|
||||
u8 nlp_nvme_info; /* NVME NSLER Support */
|
||||
uint8_t vmid_support; /* destination VMID support */
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*******************************************************************
|
||||
* This file is part of the Emulex Linux Device Driver for *
|
||||
* Fibre Channel Host Bus Adapters. *
|
||||
* Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term *
|
||||
* Copyright (C) 2017-2026 Broadcom. All Rights Reserved. The term *
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
|
||||
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
|
||||
* EMULEX and SLI are trademarks of Emulex. *
|
||||
@@ -1107,7 +1107,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
||||
vport->vmid_flag = 0;
|
||||
}
|
||||
if (sp->cmn.priority_tagging)
|
||||
vport->phba->pport->vmid_flag |= (LPFC_VMID_ISSUE_QFPA |
|
||||
vport->vmid_flag |= (LPFC_VMID_ISSUE_QFPA |
|
||||
LPFC_VMID_TYPE_PRIO);
|
||||
|
||||
/*
|
||||
@@ -1303,8 +1303,12 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
|
||||
ndlp->nlp_DID, ELS_CMD_FLOGI);
|
||||
|
||||
if (!elsiocb)
|
||||
if (!elsiocb) {
|
||||
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
|
||||
lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS | LOG_DISCOVERY,
|
||||
"4296 Unable to prepare FLOGI iocb\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
wqe = &elsiocb->wqe;
|
||||
pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
|
||||
@@ -1394,10 +1398,8 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
phba->sli3_options, 0, 0);
|
||||
|
||||
elsiocb->ndlp = lpfc_nlp_get(ndlp);
|
||||
if (!elsiocb->ndlp) {
|
||||
lpfc_els_free_iocb(phba, elsiocb);
|
||||
return 1;
|
||||
}
|
||||
if (!elsiocb->ndlp)
|
||||
goto err_out;
|
||||
|
||||
/* Avoid race with FLOGI completion and hba_flags. */
|
||||
set_bit(HBA_FLOGI_ISSUED, &phba->hba_flag);
|
||||
@@ -1407,9 +1409,8 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
if (rc == IOCB_ERROR) {
|
||||
clear_bit(HBA_FLOGI_ISSUED, &phba->hba_flag);
|
||||
clear_bit(HBA_FLOGI_OUTSTANDING, &phba->hba_flag);
|
||||
lpfc_els_free_iocb(phba, elsiocb);
|
||||
lpfc_nlp_put(ndlp);
|
||||
return 1;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
/* Clear external loopback plug detected flag */
|
||||
@@ -1474,6 +1475,13 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_out:
|
||||
lpfc_els_free_iocb(phba, elsiocb);
|
||||
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
|
||||
lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS | LOG_DISCOVERY,
|
||||
"4297 Issue FLOGI: Cannot send IOCB\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -2641,7 +2649,9 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
}
|
||||
npr->estabImagePair = 1;
|
||||
npr->readXferRdyDis = 1;
|
||||
if (vport->cfg_first_burst_size)
|
||||
if (phba->sli_rev == LPFC_SLI_REV4 &&
|
||||
!test_bit(HBA_FCOE_MODE, &phba->hba_flag) &&
|
||||
vport->cfg_first_burst_size)
|
||||
npr->writeXferRdyDis = 1;
|
||||
|
||||
/* For FCP support */
|
||||
@@ -4319,18 +4329,28 @@ lpfc_format_edc_cgn_desc(struct lpfc_hba *phba, struct fc_tlv_desc *tlv)
|
||||
static bool
|
||||
lpfc_link_is_lds_capable(struct lpfc_hba *phba)
|
||||
{
|
||||
if (!(phba->lmt & LMT_64Gb))
|
||||
if (!(phba->lmt & (LMT_64Gb | LMT_128Gb)))
|
||||
return false;
|
||||
if (phba->sli_rev != LPFC_SLI_REV4)
|
||||
return false;
|
||||
|
||||
if (phba->sli4_hba.conf_trunk) {
|
||||
if (phba->trunk_link.phy_lnk_speed == LPFC_USER_LINK_SPEED_64G)
|
||||
switch (phba->trunk_link.phy_lnk_speed) {
|
||||
case LPFC_USER_LINK_SPEED_128G:
|
||||
case LPFC_USER_LINK_SPEED_64G:
|
||||
return true;
|
||||
} else if (phba->fc_linkspeed == LPFC_LINK_SPEED_64GHZ) {
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
switch (phba->fc_linkspeed) {
|
||||
case LPFC_LINK_SPEED_128GHZ:
|
||||
case LPFC_LINK_SPEED_64GHZ:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -10291,10 +10311,8 @@ lpfc_els_rcv_fpin_cgn(struct lpfc_hba *phba, struct fc_tlv_desc *tlv)
|
||||
cpu_to_le16(value);
|
||||
cp->cgn_warn_freq =
|
||||
cpu_to_le16(value);
|
||||
crc = lpfc_cgn_calc_crc32
|
||||
(cp,
|
||||
LPFC_CGN_INFO_SZ,
|
||||
LPFC_CGN_CRC32_SEED);
|
||||
crc = lpfc_cgn_calc_crc32(
|
||||
cp, LPFC_CGN_INFO_SZ);
|
||||
cp->cgn_info_crc = cpu_to_le32(crc);
|
||||
}
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*******************************************************************
|
||||
* This file is part of the Emulex Linux Device Driver for *
|
||||
* Fibre Channel Host Bus Adapters. *
|
||||
* Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term *
|
||||
* Copyright (C) 2017-2026 Broadcom. All Rights Reserved. The term *
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
|
||||
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
|
||||
* EMULEX and SLI are trademarks of Emulex. *
|
||||
@@ -425,7 +425,6 @@ lpfc_check_nlp_post_devloss(struct lpfc_vport *vport,
|
||||
{
|
||||
if (test_and_clear_bit(NLP_IN_RECOV_POST_DEV_LOSS, &ndlp->save_flags)) {
|
||||
clear_bit(NLP_DROPPED, &ndlp->nlp_flag);
|
||||
lpfc_nlp_get(ndlp);
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_NODE,
|
||||
"8438 Devloss timeout reversed on DID x%x "
|
||||
"refcnt %d ndlp %p flag x%lx "
|
||||
@@ -3174,7 +3173,11 @@ lpfc_init_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
|
||||
return;
|
||||
}
|
||||
|
||||
lpfc_initial_flogi(vport);
|
||||
if (!lpfc_initial_flogi(vport)) {
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_ELS,
|
||||
"2345 Can't issue initial FLOGI\n");
|
||||
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
|
||||
}
|
||||
mempool_free(mboxq, phba->mbox_mem_pool);
|
||||
return;
|
||||
}
|
||||
@@ -3247,8 +3250,14 @@ lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
|
||||
return;
|
||||
}
|
||||
|
||||
if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
|
||||
lpfc_initial_fdisc(vport);
|
||||
if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) {
|
||||
if (!lpfc_initial_fdisc(vport)) {
|
||||
lpfc_printf_vlog(vport, KERN_WARNING,
|
||||
LOG_MBOX | LOG_ELS,
|
||||
"2346 Can't issue initial FDISC\n");
|
||||
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
|
||||
}
|
||||
}
|
||||
else {
|
||||
lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
|
||||
@@ -3808,7 +3817,7 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
||||
if (phba->cmf_active_mode != LPFC_CFG_OFF)
|
||||
lpfc_cmf_signal_init(phba);
|
||||
|
||||
if (phba->lmt & LMT_64Gb)
|
||||
if (phba->lmt & (LMT_64Gb | LMT_128Gb))
|
||||
lpfc_read_lds_params(phba);
|
||||
|
||||
} else if (attn_type == LPFC_ATT_LINK_DOWN ||
|
||||
@@ -4401,7 +4410,7 @@ lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
||||
LOG_INIT | LOG_ELS | LOG_DISCOVERY,
|
||||
"4220 Issue EDC status x%x Data x%x\n",
|
||||
rc, phba->cgn_init_reg_signal);
|
||||
} else if (phba->lmt & LMT_64Gb) {
|
||||
} else if (phba->lmt & (LMT_64Gb | LMT_128Gb)) {
|
||||
/* may send link fault capability descriptor */
|
||||
lpfc_issue_els_edc(vport, 0);
|
||||
} else {
|
||||
@@ -5228,12 +5237,11 @@ lpfc_set_unreg_login_mbx_cmpl(struct lpfc_hba *phba, struct lpfc_vport *vport,
|
||||
|
||||
/*
|
||||
* Free rpi associated with LPFC_NODELIST entry.
|
||||
* This routine is called from lpfc_freenode(), when we are removing
|
||||
* a LPFC_NODELIST entry. It is also called if the driver initiates a
|
||||
* LOGO that completes successfully, and we are waiting to PLOGI back
|
||||
* to the remote NPort. In addition, it is called after we receive
|
||||
* and unsolicated ELS cmd, send back a rsp, the rsp completes and
|
||||
* we are waiting to PLOGI back to the remote NPort.
|
||||
* This routine is called if the driver initiates a LOGO that completes
|
||||
* successfully, and we are waiting to PLOGI back to the remote NPort.
|
||||
* In addition, it is called after we receive and unsolicated ELS cmd,
|
||||
* send back a rsp, the rsp completes and we are waiting to PLOGI back
|
||||
* to the remote NPort.
|
||||
*/
|
||||
int
|
||||
lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
||||
@@ -6599,11 +6607,6 @@ lpfc_nlp_get(struct lpfc_nodelist *ndlp)
|
||||
unsigned long flags;
|
||||
|
||||
if (ndlp) {
|
||||
lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
|
||||
"node get: did:x%x flg:x%lx refcnt:x%x",
|
||||
ndlp->nlp_DID, ndlp->nlp_flag,
|
||||
kref_read(&ndlp->kref));
|
||||
|
||||
/* The check of ndlp usage to prevent incrementing the
|
||||
* ndlp reference count that is in the process of being
|
||||
* released.
|
||||
@@ -6611,9 +6614,8 @@ lpfc_nlp_get(struct lpfc_nodelist *ndlp)
|
||||
spin_lock_irqsave(&ndlp->lock, flags);
|
||||
if (!kref_get_unless_zero(&ndlp->kref)) {
|
||||
spin_unlock_irqrestore(&ndlp->lock, flags);
|
||||
lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
|
||||
"0276 %s: ndlp:x%px refcnt:%d\n",
|
||||
__func__, (void *)ndlp, kref_read(&ndlp->kref));
|
||||
pr_info("0276 %s: NDLP x%px has zero reference count. "
|
||||
"Exiting\n", __func__, ndlp);
|
||||
return NULL;
|
||||
}
|
||||
spin_unlock_irqrestore(&ndlp->lock, flags);
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*******************************************************************
|
||||
* This file is part of the Emulex Linux Device Driver for *
|
||||
* Fibre Channel Host Bus Adapters. *
|
||||
* Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term *
|
||||
* Copyright (C) 2017-2026 Broadcom. All Rights Reserved. The term *
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
|
||||
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
|
||||
* EMULEX and SLI are trademarks of Emulex. *
|
||||
@@ -1771,6 +1771,7 @@ struct lpfc_fdmi_reg_portattr {
|
||||
#define PCI_DEVICE_ID_LANCER_G6_FC 0xe300
|
||||
#define PCI_DEVICE_ID_LANCER_G7_FC 0xf400
|
||||
#define PCI_DEVICE_ID_LANCER_G7P_FC 0xf500
|
||||
#define PCI_DEVICE_ID_LANCER_G8_FC 0xd300
|
||||
#define PCI_DEVICE_ID_SAT_SMB 0xf011
|
||||
#define PCI_DEVICE_ID_SAT_MID 0xf015
|
||||
#define PCI_DEVICE_ID_RFLY 0xf095
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
/*******************************************************************
|
||||
* This file is part of the Emulex Linux Device Driver for *
|
||||
* Fibre Channel Host Bus Adapters. *
|
||||
* Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term *
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
|
||||
* Copyright (C) 2017-2026 Broadcom. All Rights Reserved. The term *
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
|
||||
* Copyright (C) 2009-2016 Emulex. All rights reserved. *
|
||||
* EMULEX and SLI are trademarks of Emulex. *
|
||||
* www.broadcom.com *
|
||||
@@ -100,7 +100,8 @@ struct lpfc_sli_intf {
|
||||
#define lpfc_sli_intf_sli_family_MASK 0x0000000F
|
||||
#define lpfc_sli_intf_sli_family_WORD word0
|
||||
#define LPFC_SLI_INTF_FAMILY_BE2 0x0
|
||||
#define LPFC_SLI_INTF_FAMILY_BE3 0x1
|
||||
#define LPFC_SLI_INTF_ASIC_ID 0x1 /* Refer to ASIC_ID register */
|
||||
#define LPFC_SLI_INTF_FAMILY_BE3 0x3
|
||||
#define LPFC_SLI_INTF_FAMILY_LNCR_A0 0xa
|
||||
#define LPFC_SLI_INTF_FAMILY_LNCR_B0 0xb
|
||||
#define LPFC_SLI_INTF_FAMILY_G6 0xc
|
||||
@@ -118,6 +119,17 @@ struct lpfc_sli_intf {
|
||||
#define LPFC_SLI_INTF_IF_TYPE_VIRT 1
|
||||
};
|
||||
|
||||
struct lpfc_asic_id {
|
||||
u32 word0;
|
||||
#define lpfc_asic_id_gen_num_SHIFT 8
|
||||
#define lpfc_asic_id_gen_num_MASK 0x000000FF
|
||||
#define lpfc_asic_id_gen_num_WORD word0
|
||||
#define LPFC_SLI_INTF_FAMILY_G8 0x10
|
||||
#define lpfc_asic_id_rev_num_SHIFT 0
|
||||
#define lpfc_asic_id_rev_num_MASK 0x000000FF
|
||||
#define lpfc_asic_id_rev_num_WORD word0
|
||||
};
|
||||
|
||||
#define LPFC_SLI4_MBX_EMBED true
|
||||
#define LPFC_SLI4_MBX_NEMBED false
|
||||
|
||||
@@ -624,6 +636,10 @@ struct lpfc_register {
|
||||
|
||||
#define LPFC_PORT_SEM_UE_RECOVERABLE 0xE000
|
||||
#define LPFC_PORT_SEM_MASK 0xF000
|
||||
|
||||
/* The following are config space register offsets */
|
||||
#define LPFC_ASIC_ID_OFFSET 0x0308
|
||||
|
||||
/* The following BAR0 Registers apply to SLI4 if_type 0 UCNAs. */
|
||||
#define LPFC_UERR_STATUS_HI 0x00A4
|
||||
#define LPFC_UERR_STATUS_LO 0x00A0
|
||||
@@ -632,7 +648,6 @@ struct lpfc_register {
|
||||
|
||||
/* The following BAR0 register sets are defined for if_type 0 and 2 UCNAs. */
|
||||
#define LPFC_SLI_INTF 0x0058
|
||||
#define LPFC_SLI_ASIC_VER 0x009C
|
||||
|
||||
#define LPFC_CTL_PORT_SEM_OFFSET 0x400
|
||||
#define lpfc_port_smphr_perr_SHIFT 31
|
||||
@@ -3062,9 +3077,6 @@ struct lpfc_mbx_request_features {
|
||||
#define lpfc_mbx_rq_ftr_rq_iaar_SHIFT 9
|
||||
#define lpfc_mbx_rq_ftr_rq_iaar_MASK 0x00000001
|
||||
#define lpfc_mbx_rq_ftr_rq_iaar_WORD word2
|
||||
#define lpfc_mbx_rq_ftr_rq_perfh_SHIFT 11
|
||||
#define lpfc_mbx_rq_ftr_rq_perfh_MASK 0x00000001
|
||||
#define lpfc_mbx_rq_ftr_rq_perfh_WORD word2
|
||||
#define lpfc_mbx_rq_ftr_rq_mrqp_SHIFT 16
|
||||
#define lpfc_mbx_rq_ftr_rq_mrqp_MASK 0x00000001
|
||||
#define lpfc_mbx_rq_ftr_rq_mrqp_WORD word2
|
||||
@@ -3096,9 +3108,6 @@ struct lpfc_mbx_request_features {
|
||||
#define lpfc_mbx_rq_ftr_rsp_ifip_SHIFT 7
|
||||
#define lpfc_mbx_rq_ftr_rsp_ifip_MASK 0x00000001
|
||||
#define lpfc_mbx_rq_ftr_rsp_ifip_WORD word3
|
||||
#define lpfc_mbx_rq_ftr_rsp_perfh_SHIFT 11
|
||||
#define lpfc_mbx_rq_ftr_rsp_perfh_MASK 0x00000001
|
||||
#define lpfc_mbx_rq_ftr_rsp_perfh_WORD word3
|
||||
#define lpfc_mbx_rq_ftr_rsp_mrqp_SHIFT 16
|
||||
#define lpfc_mbx_rq_ftr_rsp_mrqp_MASK 0x00000001
|
||||
#define lpfc_mbx_rq_ftr_rsp_mrqp_WORD word3
|
||||
@@ -3461,10 +3470,6 @@ struct lpfc_sli4_parameters {
|
||||
#define cfg_pvl_MASK 0x00000001
|
||||
#define cfg_pvl_WORD word19
|
||||
|
||||
#define cfg_pbde_SHIFT 20
|
||||
#define cfg_pbde_MASK 0x00000001
|
||||
#define cfg_pbde_WORD word19
|
||||
|
||||
uint32_t word20;
|
||||
#define cfg_max_tow_xri_SHIFT 0
|
||||
#define cfg_max_tow_xri_MASK 0x0000ffff
|
||||
@@ -4484,9 +4489,6 @@ struct wqe_common {
|
||||
#define wqe_irsp_SHIFT 4
|
||||
#define wqe_irsp_MASK 0x00000001
|
||||
#define wqe_irsp_WORD word11
|
||||
#define wqe_pbde_SHIFT 5
|
||||
#define wqe_pbde_MASK 0x00000001
|
||||
#define wqe_pbde_WORD word11
|
||||
#define wqe_sup_SHIFT 6
|
||||
#define wqe_sup_MASK 0x00000001
|
||||
#define wqe_sup_WORD word11
|
||||
@@ -4978,6 +4980,7 @@ union lpfc_wqe128 {
|
||||
#define MAGIC_NUMBER_G6 0xFEAA0003
|
||||
#define MAGIC_NUMBER_G7 0xFEAA0005
|
||||
#define MAGIC_NUMBER_G7P 0xFEAA0020
|
||||
#define MAGIC_NUMBER_G8 0xFEAA0070
|
||||
|
||||
struct lpfc_grp_hdr {
|
||||
uint32_t size;
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*******************************************************************
|
||||
* This file is part of the Emulex Linux Device Driver for *
|
||||
* Fibre Channel Host Bus Adapters. *
|
||||
* Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
|
||||
* Copyright (C) 2017-2026 Broadcom. All Rights Reserved. The term *
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
|
||||
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
|
||||
* EMULEX and SLI are trademarks of Emulex. *
|
||||
@@ -118,6 +118,8 @@ const struct pci_device_id lpfc_id_table[] = {
|
||||
PCI_ANY_ID, PCI_ANY_ID, },
|
||||
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_G7P_FC,
|
||||
PCI_ANY_ID, PCI_ANY_ID, },
|
||||
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_G8_FC,
|
||||
PCI_ANY_ID, PCI_ANY_ID, },
|
||||
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK,
|
||||
PCI_ANY_ID, PCI_ANY_ID, },
|
||||
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK_VF,
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
/*******************************************************************
|
||||
* This file is part of the Emulex Linux Device Driver for *
|
||||
* Fibre Channel Host Bus Adapters. *
|
||||
* Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term *
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
|
||||
* Copyright (C) 2017-2026 Broadcom. All Rights Reserved. The term *
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
|
||||
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
|
||||
* EMULEX and SLI are trademarks of Emulex. *
|
||||
* www.broadcom.com *
|
||||
@@ -22,6 +22,7 @@
|
||||
*******************************************************************/
|
||||
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/crc32.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/idr.h>
|
||||
@@ -788,7 +789,9 @@ lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
|
||||
((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) &&
|
||||
!(phba->lmt & LMT_32Gb)) ||
|
||||
((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) &&
|
||||
!(phba->lmt & LMT_64Gb))) {
|
||||
!(phba->lmt & LMT_64Gb)) ||
|
||||
((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_128G) &&
|
||||
!(phba->lmt & LMT_128Gb))) {
|
||||
/* Reset link speed to auto */
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
|
||||
"1302 Invalid speed for this board:%d "
|
||||
@@ -1087,7 +1090,6 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
|
||||
struct lpfc_async_xchg_ctx *ctxp, *ctxp_next;
|
||||
struct lpfc_sli4_hdw_queue *qp;
|
||||
LIST_HEAD(aborts);
|
||||
LIST_HEAD(nvme_aborts);
|
||||
LIST_HEAD(nvmet_aborts);
|
||||
struct lpfc_sglq *sglq_entry = NULL;
|
||||
int cnt, idx;
|
||||
@@ -1946,6 +1948,7 @@ lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
|
||||
|
||||
lpfc_offline_prep(phba, mbx_action);
|
||||
lpfc_sli_flush_io_rings(phba);
|
||||
lpfc_nvme_flush_abts_list(phba);
|
||||
lpfc_nvmels_flush_cmd(phba);
|
||||
lpfc_offline(phba);
|
||||
/* release interrupt for possible resource change */
|
||||
@@ -2534,7 +2537,9 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
|
||||
return;
|
||||
}
|
||||
|
||||
if (phba->lmt & LMT_64Gb)
|
||||
if (phba->lmt & LMT_128Gb)
|
||||
max_speed = 128;
|
||||
else if (phba->lmt & LMT_64Gb)
|
||||
max_speed = 64;
|
||||
else if (phba->lmt & LMT_32Gb)
|
||||
max_speed = 32;
|
||||
@@ -2752,6 +2757,9 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
|
||||
case PCI_DEVICE_ID_LANCER_G7P_FC:
|
||||
m = (typeof(m)){"LPe38000", "PCIe", "Fibre Channel Adapter"};
|
||||
break;
|
||||
case PCI_DEVICE_ID_LANCER_G8_FC:
|
||||
m = (typeof(m)){"LPe42100", "PCIe", "Fibre Channel Adapter"};
|
||||
break;
|
||||
case PCI_DEVICE_ID_SKYHAWK:
|
||||
case PCI_DEVICE_ID_SKYHAWK_VF:
|
||||
oneConnect = 1;
|
||||
@@ -5634,8 +5642,7 @@ lpfc_cgn_update_stat(struct lpfc_hba *phba, uint32_t dtag)
|
||||
cp->cgn_stat_npm = value;
|
||||
}
|
||||
|
||||
value = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
|
||||
LPFC_CGN_CRC32_SEED);
|
||||
value = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ);
|
||||
cp->cgn_info_crc = cpu_to_le32(value);
|
||||
}
|
||||
|
||||
@@ -5897,8 +5904,7 @@ lpfc_cmf_stats_timer(struct hrtimer *timer)
|
||||
cp->cgn_warn_freq = cpu_to_le16(value);
|
||||
cp->cgn_alarm_freq = cpu_to_le16(value);
|
||||
|
||||
lvalue = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
|
||||
LPFC_CGN_CRC32_SEED);
|
||||
lvalue = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ);
|
||||
cp->cgn_info_crc = cpu_to_le32(lvalue);
|
||||
|
||||
hrtimer_forward_now(timer, ktime_set(0, LPFC_SEC_MIN * NSEC_PER_SEC));
|
||||
@@ -7121,8 +7127,7 @@ lpfc_cgn_params_parse(struct lpfc_hba *phba,
|
||||
cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0;
|
||||
cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1;
|
||||
cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2;
|
||||
crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
|
||||
LPFC_CGN_CRC32_SEED);
|
||||
crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ);
|
||||
cp->cgn_info_crc = cpu_to_le32(crc);
|
||||
}
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
@@ -8283,7 +8288,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
|
||||
phba->cfg_total_seg_cnt, phba->cfg_scsi_seg_cnt,
|
||||
phba->cfg_nvme_seg_cnt);
|
||||
|
||||
i = min(phba->cfg_sg_dma_buf_size, SLI4_PAGE_SIZE);
|
||||
i = min_t(u32, phba->cfg_sg_dma_buf_size, SLI4_PAGE_SIZE);
|
||||
|
||||
phba->lpfc_sg_dma_buf_pool =
|
||||
dma_pool_create("lpfc_sg_dma_buf_pool",
|
||||
@@ -10146,6 +10151,10 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
|
||||
phba->cfg_link_speed =
|
||||
LPFC_USER_LINK_SPEED_64G;
|
||||
break;
|
||||
case LINK_SPEED_128G:
|
||||
phba->cfg_link_speed =
|
||||
LPFC_USER_LINK_SPEED_128G;
|
||||
break;
|
||||
case 0xffff:
|
||||
phba->cfg_link_speed =
|
||||
LPFC_USER_LINK_SPEED_AUTO;
|
||||
@@ -11795,6 +11804,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
|
||||
unsigned long bar0map_len, bar1map_len, bar2map_len;
|
||||
int error;
|
||||
uint32_t if_type;
|
||||
u8 sli_family;
|
||||
|
||||
if (!pdev)
|
||||
return -ENODEV;
|
||||
@@ -11825,6 +11835,14 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/* Check if ASIC_ID register should be read */
|
||||
sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf);
|
||||
if (sli_family == LPFC_SLI_INTF_ASIC_ID) {
|
||||
if (pci_read_config_dword(pdev, LPFC_ASIC_ID_OFFSET,
|
||||
&phba->sli4_hba.asic_id.word0))
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
|
||||
/*
|
||||
* Get the bus address of SLI4 device Bar regions and the
|
||||
@@ -13043,6 +13061,10 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
|
||||
/* Iterate to next offline or online cpu in aff_mask */
|
||||
cpu = cpumask_next(cpu, aff_mask);
|
||||
|
||||
/* Reached the end of the aff_mask */
|
||||
if (cpu >= nr_cpu_ids)
|
||||
break;
|
||||
|
||||
/* Find next online cpu in aff_mask to set affinity */
|
||||
cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
|
||||
} else if (vectors == 1) {
|
||||
@@ -13495,54 +13517,14 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
|
||||
phba->pport->work_port_events = 0;
|
||||
}
|
||||
|
||||
static uint32_t
|
||||
lpfc_cgn_crc32(uint32_t crc, u8 byte)
|
||||
{
|
||||
uint32_t msb = 0;
|
||||
uint32_t bit;
|
||||
|
||||
for (bit = 0; bit < 8; bit++) {
|
||||
msb = (crc >> 31) & 1;
|
||||
crc <<= 1;
|
||||
|
||||
if (msb ^ (byte & 1)) {
|
||||
crc ^= LPFC_CGN_CRC32_MAGIC_NUMBER;
|
||||
crc |= 1;
|
||||
}
|
||||
byte >>= 1;
|
||||
}
|
||||
return crc;
|
||||
}
|
||||
|
||||
static uint32_t
|
||||
lpfc_cgn_reverse_bits(uint32_t wd)
|
||||
{
|
||||
uint32_t result = 0;
|
||||
uint32_t i;
|
||||
|
||||
for (i = 0; i < 32; i++) {
|
||||
result <<= 1;
|
||||
result |= (1 & (wd >> i));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/*
|
||||
* The routine corresponds with the algorithm the HBA firmware
|
||||
* uses to validate the data integrity.
|
||||
*/
|
||||
uint32_t
|
||||
lpfc_cgn_calc_crc32(void *ptr, uint32_t byteLen, uint32_t crc)
|
||||
lpfc_cgn_calc_crc32(const void *data, size_t size)
|
||||
{
|
||||
uint32_t i;
|
||||
uint32_t result;
|
||||
uint8_t *data = (uint8_t *)ptr;
|
||||
|
||||
for (i = 0; i < byteLen; ++i)
|
||||
crc = lpfc_cgn_crc32(crc, data[i]);
|
||||
|
||||
result = ~lpfc_cgn_reverse_bits(crc);
|
||||
return result;
|
||||
return ~crc32c(~0, data, size);
|
||||
}
|
||||
|
||||
void
|
||||
@@ -13591,7 +13573,7 @@ lpfc_init_congestion_buf(struct lpfc_hba *phba)
|
||||
|
||||
cp->cgn_warn_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ);
|
||||
cp->cgn_alarm_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ);
|
||||
crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED);
|
||||
crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ);
|
||||
cp->cgn_info_crc = cpu_to_le32(crc);
|
||||
|
||||
phba->cgn_evt_timestamp = jiffies +
|
||||
@@ -13614,7 +13596,7 @@ lpfc_init_congestion_stat(struct lpfc_hba *phba)
|
||||
memset(&cp->cgn_stat, 0, sizeof(cp->cgn_stat));
|
||||
|
||||
lpfc_cgn_update_tstamp(phba, &cp->stat_start);
|
||||
crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED);
|
||||
crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ);
|
||||
cp->cgn_info_crc = cpu_to_le32(crc);
|
||||
}
|
||||
|
||||
@@ -13756,7 +13738,9 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
|
||||
sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
|
||||
sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
|
||||
sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
|
||||
sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
|
||||
sli4_params->rqv =
|
||||
(sli4_params->if_type < LPFC_SLI_INTF_IF_TYPE_2) ?
|
||||
LPFC_Q_CREATE_VERSION_0 : LPFC_Q_CREATE_VERSION_1;
|
||||
sli4_params->eqav = bf_get(cfg_eqav, mbx_sli4_parameters);
|
||||
sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters);
|
||||
sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters);
|
||||
@@ -13818,12 +13802,6 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
|
||||
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
|
||||
phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
|
||||
|
||||
/* Enable embedded Payload BDE if support is indicated */
|
||||
if (bf_get(cfg_pbde, mbx_sli4_parameters))
|
||||
phba->cfg_enable_pbde = 1;
|
||||
else
|
||||
phba->cfg_enable_pbde = 0;
|
||||
|
||||
/*
|
||||
* To support Suppress Response feature we must satisfy 3 conditions.
|
||||
* lpfc_suppress_rsp module parameter must be set (default).
|
||||
@@ -13858,9 +13836,8 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
|
||||
phba->fcp_embed_io = 0;
|
||||
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
|
||||
"6422 XIB %d PBDE %d: FCP %d NVME %d %d %d\n",
|
||||
"6422 XIB %d: FCP %d NVME %d %d %d\n",
|
||||
bf_get(cfg_xib, mbx_sli4_parameters),
|
||||
phba->cfg_enable_pbde,
|
||||
phba->fcp_embed_io, sli4_params->nvme,
|
||||
phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp);
|
||||
|
||||
@@ -14525,6 +14502,12 @@ lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset,
|
||||
u8 sli_family;
|
||||
|
||||
sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf);
|
||||
|
||||
/* Refer to ASIC_ID register case */
|
||||
if (sli_family == LPFC_SLI_INTF_ASIC_ID)
|
||||
sli_family = bf_get(lpfc_asic_id_gen_num,
|
||||
&phba->sli4_hba.asic_id);
|
||||
|
||||
/* Three cases: (1) FW was not supported on the detected adapter.
|
||||
* (2) FW update has been locked out administratively.
|
||||
* (3) Some other error during FW update.
|
||||
@@ -14537,7 +14520,9 @@ lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset,
|
||||
(sli_family == LPFC_SLI_INTF_FAMILY_G7 &&
|
||||
magic_number != MAGIC_NUMBER_G7) ||
|
||||
(sli_family == LPFC_SLI_INTF_FAMILY_G7P &&
|
||||
magic_number != MAGIC_NUMBER_G7P)) {
|
||||
magic_number != MAGIC_NUMBER_G7P) ||
|
||||
(sli_family == LPFC_SLI_INTF_FAMILY_G8 &&
|
||||
magic_number != MAGIC_NUMBER_G8)) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
|
||||
"3030 This firmware version is not supported on"
|
||||
" this HBA model. Device:%x Magic:%x Type:%x "
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*******************************************************************
|
||||
* This file is part of the Emulex Linux Device Driver for *
|
||||
* Fibre Channel Host Bus Adapters. *
|
||||
* Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
|
||||
* Copyright (C) 2017-2026 Broadcom. All Rights Reserved. The term *
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
|
||||
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
|
||||
* EMULEX and SLI are trademarks of Emulex. *
|
||||
@@ -625,6 +625,10 @@ lpfc_init_link(struct lpfc_hba * phba,
|
||||
mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
|
||||
mb->un.varInitLnk.link_speed = LINK_SPEED_64G;
|
||||
break;
|
||||
case LPFC_USER_LINK_SPEED_128G:
|
||||
mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
|
||||
mb->un.varInitLnk.link_speed = LINK_SPEED_128G;
|
||||
break;
|
||||
case LPFC_USER_LINK_SPEED_AUTO:
|
||||
default:
|
||||
mb->un.varInitLnk.link_speed = LINK_SPEED_AUTO;
|
||||
@@ -2139,7 +2143,6 @@ lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq)
|
||||
|
||||
/* Set up host requested features. */
|
||||
bf_set(lpfc_mbx_rq_ftr_rq_fcpi, &mboxq->u.mqe.un.req_ftrs, 1);
|
||||
bf_set(lpfc_mbx_rq_ftr_rq_perfh, &mboxq->u.mqe.un.req_ftrs, 1);
|
||||
|
||||
/* Enable DIF (block guard) only if configured to do so. */
|
||||
if (phba->cfg_enable_bg)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*******************************************************************
|
||||
* This file is part of the Emulex Linux Device Driver for *
|
||||
* Fibre Channel Host Bus Adapters. *
|
||||
* Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term *
|
||||
* Copyright (C) 2017-2026 Broadcom. All Rights Reserved. The term *
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
|
||||
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
|
||||
* EMULEX and SLI are trademarks of Emulex. *
|
||||
@@ -316,8 +316,7 @@ lpfc_defer_plogi_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *login_mbox)
|
||||
struct lpfc_iocbq *save_iocb;
|
||||
struct lpfc_nodelist *ndlp;
|
||||
MAILBOX_t *mb = &login_mbox->u.mb;
|
||||
|
||||
int rc;
|
||||
int rc = 0;
|
||||
|
||||
ndlp = login_mbox->ctx_ndlp;
|
||||
save_iocb = login_mbox->ctx_u.save_iocb;
|
||||
@@ -346,7 +345,7 @@ lpfc_defer_plogi_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *login_mbox)
|
||||
* completes. This ensures, in Pt2Pt, that the PLOGI LS_ACC is sent
|
||||
* before the PRLI.
|
||||
*/
|
||||
if (!test_bit(FC_PT2PT, &ndlp->vport->fc_flag)) {
|
||||
if (!test_bit(FC_PT2PT, &ndlp->vport->fc_flag) || mb->mbxStatus || rc) {
|
||||
/* Now process the REG_RPI cmpl */
|
||||
lpfc_mbx_cmpl_reg_login(phba, login_mbox);
|
||||
clear_bit(NLP_ACC_REGLOGIN, &ndlp->nlp_flag);
|
||||
@@ -525,13 +524,13 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
/* Issue CONFIG_LINK for SLI3 or REG_VFI for SLI4,
|
||||
* to account for updated TOV's / parameters
|
||||
*/
|
||||
if (phba->sli_rev == LPFC_SLI_REV4)
|
||||
lpfc_issue_reg_vfi(vport);
|
||||
else {
|
||||
if (phba->sli_rev == LPFC_SLI_REV4) {
|
||||
rc = lpfc_issue_reg_vfi(vport);
|
||||
} else {
|
||||
link_mbox = mempool_alloc(phba->mbox_mem_pool,
|
||||
GFP_KERNEL);
|
||||
if (!link_mbox)
|
||||
goto out;
|
||||
goto rsp_rjt;
|
||||
lpfc_config_link(phba, link_mbox);
|
||||
link_mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
|
||||
link_mbox->vport = vport;
|
||||
@@ -544,11 +543,13 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
rc = lpfc_sli_issue_mbox(phba, link_mbox, MBX_NOWAIT);
|
||||
if (rc == MBX_NOT_FINISHED) {
|
||||
mempool_free(link_mbox, phba->mbox_mem_pool);
|
||||
goto out;
|
||||
goto rsp_rjt;
|
||||
}
|
||||
}
|
||||
|
||||
lpfc_can_disctmo(vport);
|
||||
if (rc)
|
||||
goto rsp_rjt;
|
||||
}
|
||||
|
||||
clear_bit(NLP_SUPPRESS_RSP, &ndlp->nlp_flag);
|
||||
@@ -562,11 +563,11 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
|
||||
login_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
||||
if (!login_mbox)
|
||||
goto out;
|
||||
goto rsp_rjt;
|
||||
|
||||
save_iocb = kzalloc_obj(*save_iocb);
|
||||
if (!save_iocb)
|
||||
goto out;
|
||||
goto free_login_mbox;
|
||||
|
||||
/* Save info from cmd IOCB to be used in rsp after all mbox completes */
|
||||
memcpy((uint8_t *)save_iocb, (uint8_t *)cmdiocb,
|
||||
@@ -586,7 +587,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
rc = lpfc_reg_rpi(phba, vport->vpi, remote_did,
|
||||
(uint8_t *)sp, login_mbox, ndlp->nlp_rpi);
|
||||
if (rc)
|
||||
goto out;
|
||||
goto free_save_iocb;
|
||||
|
||||
login_mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
|
||||
login_mbox->vport = vport;
|
||||
@@ -659,7 +660,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
login_mbox->mbox_cmpl = lpfc_defer_plogi_acc;
|
||||
login_mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
|
||||
if (!login_mbox->ctx_ndlp)
|
||||
goto out;
|
||||
goto free_save_iocb;
|
||||
|
||||
login_mbox->ctx_u.save_iocb = save_iocb; /* For PLOGI ACC */
|
||||
|
||||
@@ -670,16 +671,17 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
rc = lpfc_sli_issue_mbox(phba, login_mbox, MBX_NOWAIT);
|
||||
if (rc == MBX_NOT_FINISHED) {
|
||||
lpfc_nlp_put(ndlp);
|
||||
goto out;
|
||||
goto free_save_iocb;
|
||||
}
|
||||
lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
|
||||
|
||||
return 1;
|
||||
out:
|
||||
kfree(save_iocb);
|
||||
if (login_mbox)
|
||||
mempool_free(login_mbox, phba->mbox_mem_pool);
|
||||
|
||||
free_save_iocb:
|
||||
kfree(save_iocb);
|
||||
free_login_mbox:
|
||||
mempool_free(login_mbox, phba->mbox_mem_pool);
|
||||
rsp_rjt:
|
||||
stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
|
||||
stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE;
|
||||
lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*******************************************************************
|
||||
* This file is part of the Emulex Linux Device Driver for *
|
||||
* Fibre Channel Host Bus Adapters. *
|
||||
* Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term *
|
||||
* Copyright (C) 2017-2026 Broadcom. All Rights Reserved. The term *
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
|
||||
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
|
||||
* EMULEX and SLI are trademarks of Emulex. *
|
||||
@@ -1296,8 +1296,6 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
|
||||
/* Word 10 */
|
||||
bf_set(wqe_xchg, &wqe->fcp_iwrite.wqe_com, LPFC_NVME_XCHG);
|
||||
|
||||
/* Words 13 14 15 are for PBDE support */
|
||||
|
||||
/* add the VMID tags as per switch response */
|
||||
if (unlikely(lpfc_ncmd->cur_iocbq.cmd_flag & LPFC_IO_VMID)) {
|
||||
if (phba->pport->vmid_priority_tagging) {
|
||||
@@ -1335,16 +1333,13 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
|
||||
{
|
||||
struct lpfc_hba *phba = vport->phba;
|
||||
struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
|
||||
union lpfc_wqe128 *wqe = &lpfc_ncmd->cur_iocbq.wqe;
|
||||
struct sli4_sge *sgl = lpfc_ncmd->dma_sgl;
|
||||
struct sli4_hybrid_sgl *sgl_xtra = NULL;
|
||||
struct scatterlist *data_sg;
|
||||
struct sli4_sge *first_data_sgl;
|
||||
struct ulp_bde64 *bde;
|
||||
dma_addr_t physaddr = 0;
|
||||
uint32_t dma_len = 0;
|
||||
uint32_t dma_offset = 0;
|
||||
int nseg, i, j;
|
||||
int nseg, i, j, k;
|
||||
bool lsp_just_set = false;
|
||||
|
||||
/* Fix up the command and response DMA stuff. */
|
||||
@@ -1361,7 +1356,6 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
|
||||
*/
|
||||
sgl += 2;
|
||||
|
||||
first_data_sgl = sgl;
|
||||
lpfc_ncmd->seg_cnt = nCmd->sg_cnt;
|
||||
if (lpfc_ncmd->seg_cnt > lpfc_nvme_template.max_sgl_segments) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
|
||||
@@ -1385,6 +1379,9 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
|
||||
|
||||
/* for tracking the segment boundaries */
|
||||
j = 2;
|
||||
k = 5;
|
||||
if (unlikely(!phba->cfg_xpsgl))
|
||||
k = 1;
|
||||
for (i = 0; i < nseg; i++) {
|
||||
if (data_sg == NULL) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
|
||||
@@ -1403,9 +1400,8 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
|
||||
bf_set(lpfc_sli4_sge_last, sgl, 0);
|
||||
|
||||
/* expand the segment */
|
||||
if (!lsp_just_set &&
|
||||
!((j + 1) % phba->border_sge_num) &&
|
||||
((nseg - 1) != i)) {
|
||||
if (!lsp_just_set && (nseg != (i + k)) &&
|
||||
!((j + k) % phba->border_sge_num)) {
|
||||
/* set LSP type */
|
||||
bf_set(lpfc_sli4_sge_type, sgl,
|
||||
LPFC_SGE_TYPE_LSP);
|
||||
@@ -1428,8 +1424,8 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
|
||||
}
|
||||
}
|
||||
|
||||
if (!(bf_get(lpfc_sli4_sge_type, sgl) &
|
||||
LPFC_SGE_TYPE_LSP)) {
|
||||
if (bf_get(lpfc_sli4_sge_type, sgl) !=
|
||||
LPFC_SGE_TYPE_LSP) {
|
||||
if ((nseg - 1) == i)
|
||||
bf_set(lpfc_sli4_sge_last, sgl, 1);
|
||||
|
||||
@@ -1450,40 +1446,26 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
|
||||
sgl++;
|
||||
|
||||
lsp_just_set = false;
|
||||
j++;
|
||||
} else {
|
||||
sgl->word2 = cpu_to_le32(sgl->word2);
|
||||
|
||||
sgl->sge_len = cpu_to_le32(
|
||||
phba->cfg_sg_dma_buf_size);
|
||||
/* will remaining SGEs fill the next SGL? */
|
||||
if ((nseg - i) < phba->border_sge_num)
|
||||
sgl->sge_len =
|
||||
cpu_to_le32((nseg - i) *
|
||||
sizeof(*sgl));
|
||||
else
|
||||
sgl->sge_len =
|
||||
cpu_to_le32(phba->cfg_sg_dma_buf_size);
|
||||
|
||||
sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
|
||||
i = i - 1;
|
||||
|
||||
lsp_just_set = true;
|
||||
j += k;
|
||||
k = 1;
|
||||
}
|
||||
|
||||
j++;
|
||||
}
|
||||
|
||||
/* PBDE support for first data SGE only */
|
||||
if (nseg == 1 && phba->cfg_enable_pbde) {
|
||||
/* Words 13-15 */
|
||||
bde = (struct ulp_bde64 *)
|
||||
&wqe->words[13];
|
||||
bde->addrLow = first_data_sgl->addr_lo;
|
||||
bde->addrHigh = first_data_sgl->addr_hi;
|
||||
bde->tus.f.bdeSize =
|
||||
le32_to_cpu(first_data_sgl->sge_len);
|
||||
bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
|
||||
bde->tus.w = cpu_to_le32(bde->tus.w);
|
||||
|
||||
/* Word 11 - set PBDE bit */
|
||||
bf_set(wqe_pbde, &wqe->generic.wqe_com, 1);
|
||||
} else {
|
||||
memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3));
|
||||
/* Word 11 - PBDE bit disabled by default template */
|
||||
}
|
||||
|
||||
} else {
|
||||
lpfc_ncmd->seg_cnt = 0;
|
||||
|
||||
@@ -2846,6 +2828,54 @@ lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_nvme_flush_abts_list - Clean up nvme commands from the abts list
|
||||
* @phba: Pointer to HBA context object.
|
||||
*
|
||||
**/
|
||||
void
|
||||
lpfc_nvme_flush_abts_list(struct lpfc_hba *phba)
|
||||
{
|
||||
#if (IS_ENABLED(CONFIG_NVME_FC))
|
||||
struct lpfc_io_buf *psb, *psb_next;
|
||||
struct lpfc_sli4_hdw_queue *qp;
|
||||
LIST_HEAD(aborts);
|
||||
int i;
|
||||
|
||||
/* abts_xxxx_buf_list_lock required because worker thread uses this
|
||||
* list.
|
||||
*/
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
for (i = 0; i < phba->cfg_hdw_queue; i++) {
|
||||
qp = &phba->sli4_hba.hdwq[i];
|
||||
|
||||
spin_lock(&qp->abts_io_buf_list_lock);
|
||||
list_for_each_entry_safe(psb, psb_next,
|
||||
&qp->lpfc_abts_io_buf_list, list) {
|
||||
if (!(psb->cur_iocbq.cmd_flag & LPFC_IO_NVME))
|
||||
continue;
|
||||
list_move(&psb->list, &aborts);
|
||||
qp->abts_nvme_io_bufs--;
|
||||
}
|
||||
spin_unlock(&qp->abts_io_buf_list_lock);
|
||||
}
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
|
||||
list_for_each_entry_safe(psb, psb_next, &aborts, list) {
|
||||
list_del_init(&psb->list);
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
|
||||
"6195 %s: lpfc_ncmd x%px flags x%x "
|
||||
"cmd_flag x%x xri x%x\n", __func__,
|
||||
psb, psb->flags,
|
||||
psb->cur_iocbq.cmd_flag,
|
||||
psb->cur_iocbq.sli4_xritag);
|
||||
psb->flags &= ~LPFC_SBUF_XBUSY;
|
||||
psb->status = IOSTAT_SUCCESS;
|
||||
lpfc_sli4_nvme_pci_offline_aborted(phba, psb);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_nvmels_flush_cmd - Clean up outstanding nvmels commands for a port
|
||||
* @phba: Pointer to HBA context object.
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*******************************************************************
|
||||
* This file is part of the Emulex Linux Device Driver for *
|
||||
* Fibre Channel Host Bus Adapters. *
|
||||
* Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
|
||||
* Copyright (C) 2017-2026 Broadcom. All Rights Reserved. The term *
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
|
||||
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
|
||||
* EMULEX and SLI are trademarks of Emulex. *
|
||||
@@ -118,12 +118,9 @@ lpfc_nvmet_cmd_template(void)
|
||||
bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
|
||||
bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0);
|
||||
bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0);
|
||||
bf_set(wqe_pbde, &wqe->fcp_tsend.wqe_com, 0);
|
||||
|
||||
/* Word 12 - fcp_data_len is variable */
|
||||
|
||||
/* Word 13, 14, 15 - PBDE is zero */
|
||||
|
||||
/* TRECEIVE template */
|
||||
wqe = &lpfc_treceive_cmd_template;
|
||||
memset(wqe, 0, sizeof(union lpfc_wqe128));
|
||||
@@ -158,18 +155,15 @@ lpfc_nvmet_cmd_template(void)
|
||||
bf_set(wqe_lenloc, &wqe->fcp_treceive.wqe_com, LPFC_WQE_LENLOC_WORD12);
|
||||
bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1);
|
||||
|
||||
/* Word 11 - pbde is variable */
|
||||
/* Word 11 */
|
||||
bf_set(wqe_cmd_type, &wqe->fcp_treceive.wqe_com, FCP_COMMAND_TRECEIVE);
|
||||
bf_set(wqe_cqid, &wqe->fcp_treceive.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
|
||||
bf_set(wqe_sup, &wqe->fcp_treceive.wqe_com, 0);
|
||||
bf_set(wqe_irsp, &wqe->fcp_treceive.wqe_com, 0);
|
||||
bf_set(wqe_irsplen, &wqe->fcp_treceive.wqe_com, 0);
|
||||
bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 1);
|
||||
|
||||
/* Word 12 - fcp_data_len is variable */
|
||||
|
||||
/* Word 13, 14, 15 - PBDE is variable */
|
||||
|
||||
/* TRSP template */
|
||||
wqe = &lpfc_trsp_cmd_template;
|
||||
memset(wqe, 0, sizeof(union lpfc_wqe128));
|
||||
@@ -207,7 +201,6 @@ lpfc_nvmet_cmd_template(void)
|
||||
bf_set(wqe_sup, &wqe->fcp_trsp.wqe_com, 0);
|
||||
bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 0);
|
||||
bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com, 0);
|
||||
bf_set(wqe_pbde, &wqe->fcp_trsp.wqe_com, 0);
|
||||
|
||||
/* Word 12, 13, 14, 15 - is zero */
|
||||
}
|
||||
@@ -2722,7 +2715,6 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
|
||||
struct ulp_bde64 *bde;
|
||||
dma_addr_t physaddr;
|
||||
int i, cnt, nsegs;
|
||||
bool use_pbde = false;
|
||||
int xc = 1;
|
||||
|
||||
if (!lpfc_is_link_up(phba)) {
|
||||
@@ -2907,15 +2899,6 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
|
||||
if (!xc)
|
||||
bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, 0);
|
||||
|
||||
/* Word 11 - check for pbde */
|
||||
if (nsegs == 1 && phba->cfg_enable_pbde) {
|
||||
use_pbde = true;
|
||||
/* Word 11 - PBDE bit already preset by template */
|
||||
} else {
|
||||
/* Overwrite default template setting */
|
||||
bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 0);
|
||||
}
|
||||
|
||||
/* Word 12 */
|
||||
wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
|
||||
|
||||
@@ -3023,19 +3006,9 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
|
||||
}
|
||||
|
||||
bde = (struct ulp_bde64 *)&wqe->words[13];
|
||||
if (use_pbde) {
|
||||
/* decrement sgl ptr backwards once to first data sge */
|
||||
sgl--;
|
||||
|
||||
/* Words 13-15 (PBDE) */
|
||||
bde->addrLow = sgl->addr_lo;
|
||||
bde->addrHigh = sgl->addr_hi;
|
||||
bde->tus.f.bdeSize = le32_to_cpu(sgl->sge_len);
|
||||
bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
|
||||
bde->tus.w = cpu_to_le32(bde->tus.w);
|
||||
} else {
|
||||
memset(bde, 0, sizeof(struct ulp_bde64));
|
||||
}
|
||||
memset(bde, 0, sizeof(struct ulp_bde64));
|
||||
|
||||
ctxp->state = LPFC_NVME_STE_DATA;
|
||||
ctxp->entry_cnt++;
|
||||
return nvmewqe;
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*******************************************************************
|
||||
* This file is part of the Emulex Linux Device Driver for *
|
||||
* Fibre Channel Host Bus Adapters. *
|
||||
* Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term *
|
||||
* Copyright (C) 2017-2026 Broadcom. All Rights Reserved. The term *
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
|
||||
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
|
||||
* EMULEX and SLI are trademarks of Emulex. *
|
||||
@@ -1938,7 +1938,7 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
|
||||
uint32_t dma_len;
|
||||
uint32_t dma_offset = 0;
|
||||
struct sli4_hybrid_sgl *sgl_xtra = NULL;
|
||||
int j;
|
||||
int j, k;
|
||||
bool lsp_just_set = false;
|
||||
|
||||
status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
|
||||
@@ -2001,13 +2001,16 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
|
||||
/* assumption: caller has already run dma_map_sg on command data */
|
||||
sgde = scsi_sglist(sc);
|
||||
j = 3;
|
||||
k = 5;
|
||||
if (unlikely(!phba->cfg_xpsgl))
|
||||
k = 1;
|
||||
for (i = 0; i < datasegcnt; i++) {
|
||||
/* clear it */
|
||||
sgl->word2 = 0;
|
||||
|
||||
/* do we need to expand the segment */
|
||||
if (!lsp_just_set && !((j + 1) % phba->border_sge_num) &&
|
||||
((datasegcnt - 1) != i)) {
|
||||
/* do we need to expand the segment? */
|
||||
if (!lsp_just_set && (datasegcnt != (i + k)) &&
|
||||
!((j + k) % phba->border_sge_num)) {
|
||||
/* set LSP type */
|
||||
bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP);
|
||||
|
||||
@@ -2026,7 +2029,7 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
|
||||
bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
|
||||
}
|
||||
|
||||
if (!(bf_get(lpfc_sli4_sge_type, sgl) & LPFC_SGE_TYPE_LSP)) {
|
||||
if (bf_get(lpfc_sli4_sge_type, sgl) != LPFC_SGE_TYPE_LSP) {
|
||||
if ((datasegcnt - 1) == i)
|
||||
bf_set(lpfc_sli4_sge_last, sgl, 1);
|
||||
physaddr = sg_dma_address(sgde);
|
||||
@@ -2043,20 +2046,23 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
|
||||
|
||||
sgl++;
|
||||
num_sge++;
|
||||
j++;
|
||||
lsp_just_set = false;
|
||||
|
||||
} else {
|
||||
sgl->word2 = cpu_to_le32(sgl->word2);
|
||||
sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size);
|
||||
|
||||
/* will remaining SGEs fill the next SGL? */
|
||||
if ((datasegcnt - i) < phba->border_sge_num)
|
||||
sgl->sge_len = cpu_to_le32((datasegcnt - i) *
|
||||
sizeof(*sgl));
|
||||
else
|
||||
sgl->sge_len =
|
||||
cpu_to_le32(phba->cfg_sg_dma_buf_size);
|
||||
sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
|
||||
i = i - 1;
|
||||
|
||||
j += k;
|
||||
lsp_just_set = true;
|
||||
k = 1;
|
||||
}
|
||||
|
||||
j++;
|
||||
|
||||
}
|
||||
|
||||
out:
|
||||
@@ -2109,6 +2115,7 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
|
||||
struct scatterlist *sgde = NULL; /* s/g data entry */
|
||||
struct scatterlist *sgpe = NULL; /* s/g prot entry */
|
||||
struct sli4_sge_diseed *diseed = NULL;
|
||||
struct sli4_sge_le *lsp_sgl = NULL;
|
||||
dma_addr_t dataphysaddr, protphysaddr;
|
||||
unsigned short curr_prot = 0;
|
||||
unsigned int split_offset;
|
||||
@@ -2125,8 +2132,8 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
|
||||
uint32_t rc;
|
||||
#endif
|
||||
uint32_t checking = 1;
|
||||
uint32_t dma_offset = 0, num_sge = 0;
|
||||
int j = 2;
|
||||
uint32_t dma_offset = 0, num_sge = 0, lsp_len;
|
||||
int j = 2, k = 4;
|
||||
struct sli4_hybrid_sgl *sgl_xtra = NULL;
|
||||
|
||||
sgpe = scsi_prot_sglist(sc);
|
||||
@@ -2157,6 +2164,8 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
|
||||
}
|
||||
#endif
|
||||
|
||||
if (unlikely(!phba->cfg_xpsgl))
|
||||
k = 0;
|
||||
split_offset = 0;
|
||||
do {
|
||||
/* Check to see if we ran out of space */
|
||||
@@ -2164,10 +2173,10 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
|
||||
!(phba->cfg_xpsgl))
|
||||
return num_sge + 3;
|
||||
|
||||
/* DISEED and DIF have to be together */
|
||||
if (!((j + 1) % phba->border_sge_num) ||
|
||||
!((j + 2) % phba->border_sge_num) ||
|
||||
!((j + 3) % phba->border_sge_num)) {
|
||||
/* DISEED and DIF have to be together */
|
||||
if (!((j + k + 1) % phba->border_sge_num) ||
|
||||
!((j + k + 2) % phba->border_sge_num) ||
|
||||
!((j + k + 3) % phba->border_sge_num)) {
|
||||
sgl->word2 = 0;
|
||||
|
||||
/* set LSP type */
|
||||
@@ -2186,9 +2195,18 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
|
||||
|
||||
sgl->word2 = cpu_to_le32(sgl->word2);
|
||||
sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size);
|
||||
if (lsp_sgl) {
|
||||
j++;
|
||||
if (j % phba->border_sge_num) {
|
||||
lsp_len = j * (sizeof(*sgl));
|
||||
lsp_sgl->sge_len = cpu_to_le32(lsp_len);
|
||||
}
|
||||
}
|
||||
lsp_sgl = (struct sli4_sge_le *)sgl;
|
||||
|
||||
sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
|
||||
j = 0;
|
||||
k = 0;
|
||||
}
|
||||
|
||||
/* setup DISEED with what we have */
|
||||
@@ -2291,7 +2309,7 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!((j + 1) % phba->border_sge_num)) {
|
||||
if (!((j + k + 1) % phba->border_sge_num)) {
|
||||
sgl->word2 = 0;
|
||||
|
||||
/* set LSP type */
|
||||
@@ -2313,8 +2331,11 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
|
||||
sgl->word2 = cpu_to_le32(sgl->word2);
|
||||
sgl->sge_len = cpu_to_le32(
|
||||
phba->cfg_sg_dma_buf_size);
|
||||
lsp_sgl = (struct sli4_sge_le *)sgl;
|
||||
|
||||
sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
|
||||
j = 0;
|
||||
k = 0;
|
||||
} else {
|
||||
dataphysaddr = sg_dma_address(sgde) +
|
||||
split_offset;
|
||||
@@ -2362,11 +2383,9 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
|
||||
|
||||
/* Move to the next s/g segment if possible */
|
||||
sgde = sg_next(sgde);
|
||||
|
||||
sgl++;
|
||||
j++;
|
||||
}
|
||||
|
||||
j++;
|
||||
}
|
||||
|
||||
if (protgroup_offset) {
|
||||
@@ -2381,6 +2400,14 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
|
||||
sgl--;
|
||||
bf_set(lpfc_sli4_sge_last, sgl, 1);
|
||||
alldone = 1;
|
||||
|
||||
/* Reset length in previous LSP where necessary */
|
||||
if (lsp_sgl) {
|
||||
if (j % phba->border_sge_num) {
|
||||
lsp_len = j * (sizeof(*sgl));
|
||||
lsp_sgl->sge_len = cpu_to_le32(lsp_len);
|
||||
}
|
||||
}
|
||||
} else if (curr_prot < protcnt) {
|
||||
/* advance to next prot buffer */
|
||||
sgpe = sg_next(sgpe);
|
||||
@@ -2392,7 +2419,6 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
|
||||
"9085 BLKGRD: bug in %s\n", __func__);
|
||||
}
|
||||
|
||||
} while (!alldone);
|
||||
|
||||
out:
|
||||
@@ -3050,15 +3076,13 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
|
||||
struct scatterlist *sgel = NULL;
|
||||
struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
|
||||
struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
|
||||
struct sli4_sge *first_data_sgl;
|
||||
struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq;
|
||||
struct lpfc_vport *vport = phba->pport;
|
||||
union lpfc_wqe128 *wqe = &pwqeq->wqe;
|
||||
dma_addr_t physaddr;
|
||||
uint32_t dma_len;
|
||||
uint32_t dma_offset = 0;
|
||||
int nseg, i, j;
|
||||
struct ulp_bde64 *bde;
|
||||
int nseg, i, j, k;
|
||||
bool lsp_just_set = false;
|
||||
struct sli4_hybrid_sgl *sgl_xtra = NULL;
|
||||
|
||||
@@ -3085,7 +3109,6 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
|
||||
bf_set(lpfc_sli4_sge_last, sgl, 0);
|
||||
sgl->word2 = cpu_to_le32(sgl->word2);
|
||||
sgl += 1;
|
||||
first_data_sgl = sgl;
|
||||
lpfc_cmd->seg_cnt = nseg;
|
||||
if (!phba->cfg_xpsgl &&
|
||||
lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
|
||||
@@ -3114,6 +3137,9 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
|
||||
/* for tracking segment boundaries */
|
||||
sgel = scsi_sglist(scsi_cmnd);
|
||||
j = 2;
|
||||
k = 5;
|
||||
if (unlikely(!phba->cfg_xpsgl))
|
||||
k = 1;
|
||||
for (i = 0; i < nseg; i++) {
|
||||
sgl->word2 = 0;
|
||||
if (nseg == 1) {
|
||||
@@ -3124,9 +3150,8 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
|
||||
bf_set(lpfc_sli4_sge_last, sgl, 0);
|
||||
|
||||
/* do we need to expand the segment */
|
||||
if (!lsp_just_set &&
|
||||
!((j + 1) % phba->border_sge_num) &&
|
||||
((nseg - 1) != i)) {
|
||||
if (!lsp_just_set && (nseg != (i + k)) &&
|
||||
!((j + k) % phba->border_sge_num)) {
|
||||
/* set LSP type */
|
||||
bf_set(lpfc_sli4_sge_type, sgl,
|
||||
LPFC_SGE_TYPE_LSP);
|
||||
@@ -3150,8 +3175,8 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
|
||||
}
|
||||
}
|
||||
|
||||
if (!(bf_get(lpfc_sli4_sge_type, sgl) &
|
||||
LPFC_SGE_TYPE_LSP)) {
|
||||
if (bf_get(lpfc_sli4_sge_type, sgl) !=
|
||||
LPFC_SGE_TYPE_LSP) {
|
||||
if ((nseg - 1) == i)
|
||||
bf_set(lpfc_sli4_sge_last, sgl, 1);
|
||||
|
||||
@@ -3171,43 +3196,24 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
|
||||
|
||||
sgl++;
|
||||
lsp_just_set = false;
|
||||
|
||||
j++;
|
||||
} else {
|
||||
sgl->word2 = cpu_to_le32(sgl->word2);
|
||||
sgl->sge_len = cpu_to_le32(
|
||||
phba->cfg_sg_dma_buf_size);
|
||||
|
||||
/* will remaining SGEs fill the next SGL? */
|
||||
if ((nseg - i) < phba->border_sge_num)
|
||||
sgl->sge_len =
|
||||
cpu_to_le32((nseg - i) *
|
||||
sizeof(*sgl));
|
||||
else
|
||||
sgl->sge_len =
|
||||
cpu_to_le32(phba->cfg_sg_dma_buf_size);
|
||||
sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
|
||||
i = i - 1;
|
||||
|
||||
lsp_just_set = true;
|
||||
j += k;
|
||||
k = 1;
|
||||
}
|
||||
|
||||
j++;
|
||||
}
|
||||
|
||||
/* PBDE support for first data SGE only.
|
||||
* For FCoE, we key off Performance Hints.
|
||||
* For FC, we key off lpfc_enable_pbde.
|
||||
*/
|
||||
if (nseg == 1 &&
|
||||
((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
|
||||
phba->cfg_enable_pbde)) {
|
||||
/* Words 13-15 */
|
||||
bde = (struct ulp_bde64 *)
|
||||
&wqe->words[13];
|
||||
bde->addrLow = first_data_sgl->addr_lo;
|
||||
bde->addrHigh = first_data_sgl->addr_hi;
|
||||
bde->tus.f.bdeSize =
|
||||
le32_to_cpu(first_data_sgl->sge_len);
|
||||
bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
|
||||
bde->tus.w = cpu_to_le32(bde->tus.w);
|
||||
|
||||
/* Word 11 - set PBDE bit */
|
||||
bf_set(wqe_pbde, &wqe->generic.wqe_com, 1);
|
||||
} else {
|
||||
memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3));
|
||||
/* Word 11 - PBDE bit disabled by default template */
|
||||
}
|
||||
} else {
|
||||
sgl += 1;
|
||||
@@ -3215,13 +3221,6 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
|
||||
sgl->word2 = le32_to_cpu(sgl->word2);
|
||||
bf_set(lpfc_sli4_sge_last, sgl, 1);
|
||||
sgl->word2 = cpu_to_le32(sgl->word2);
|
||||
|
||||
if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
|
||||
phba->cfg_enable_pbde) {
|
||||
bde = (struct ulp_bde64 *)
|
||||
&wqe->words[13];
|
||||
memset(bde, 0, (sizeof(uint32_t) * 3));
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -4665,7 +4664,7 @@ static int lpfc_scsi_prep_cmnd_buf_s3(struct lpfc_vport *vport,
|
||||
else
|
||||
piocbq->iocb.ulpFCP2Rcvy = 0;
|
||||
|
||||
piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
|
||||
piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & NLP_FCP_CLASS_MASK);
|
||||
piocbq->io_buf = lpfc_cmd;
|
||||
if (!piocbq->cmd_cmpl)
|
||||
piocbq->cmd_cmpl = lpfc_scsi_cmd_iocb_cmpl;
|
||||
@@ -4777,7 +4776,7 @@ static int lpfc_scsi_prep_cmnd_buf_s4(struct lpfc_vport *vport,
|
||||
bf_set(wqe_erp, &wqe->generic.wqe_com, 1);
|
||||
|
||||
bf_set(wqe_class, &wqe->generic.wqe_com,
|
||||
(pnode->nlp_fcp_info & 0x0f));
|
||||
(pnode->nlp_fcp_info & NLP_FCP_CLASS_MASK));
|
||||
|
||||
/* Word 8 */
|
||||
wqe->generic.wqe_com.abort_tag = pwqeq->iotag;
|
||||
@@ -4877,7 +4876,7 @@ lpfc_scsi_prep_task_mgmt_cmd_s3(struct lpfc_vport *vport,
|
||||
piocb->ulpCommand = CMD_FCP_ICMND64_CR;
|
||||
piocb->ulpContext = ndlp->nlp_rpi;
|
||||
piocb->ulpFCP2Rcvy = (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0;
|
||||
piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
|
||||
piocb->ulpClass = (ndlp->nlp_fcp_info & NLP_FCP_CLASS_MASK);
|
||||
piocb->ulpPU = 0;
|
||||
piocb->un.fcpi.fcpi_parm = 0;
|
||||
|
||||
@@ -4945,7 +4944,7 @@ lpfc_scsi_prep_task_mgmt_cmd_s4(struct lpfc_vport *vport,
|
||||
bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
|
||||
((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0));
|
||||
bf_set(wqe_class, &wqe->fcp_icmd.wqe_com,
|
||||
(ndlp->nlp_fcp_info & 0x0f));
|
||||
(ndlp->nlp_fcp_info & NLP_FCP_CLASS_MASK));
|
||||
|
||||
/* ulpTimeout is only one byte */
|
||||
if (lpfc_cmd->timeout > 0xff) {
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*******************************************************************
|
||||
* This file is part of the Emulex Linux Device Driver for *
|
||||
* Fibre Channel Host Bus Adapters. *
|
||||
* Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term *
|
||||
* Copyright (C) 2017-2026 Broadcom. All Rights Reserved. The term *
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
|
||||
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
|
||||
* EMULEX and SLI are trademarks of Emulex. *
|
||||
@@ -136,15 +136,12 @@ void lpfc_wqe_cmd_template(void)
|
||||
bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
|
||||
bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
|
||||
|
||||
/* Word 11 - pbde is variable */
|
||||
/* Word 11 */
|
||||
bf_set(wqe_cmd_type, &wqe->fcp_iread.wqe_com, COMMAND_DATA_IN);
|
||||
bf_set(wqe_cqid, &wqe->fcp_iread.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
|
||||
bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
|
||||
|
||||
/* Word 12 - is zero */
|
||||
|
||||
/* Word 13, 14, 15 - PBDE is variable */
|
||||
|
||||
/* IWRITE template */
|
||||
wqe = &lpfc_iwrite_cmd_template;
|
||||
memset(wqe, 0, sizeof(union lpfc_wqe128));
|
||||
@@ -176,15 +173,12 @@ void lpfc_wqe_cmd_template(void)
|
||||
bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
|
||||
bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
|
||||
|
||||
/* Word 11 - pbde is variable */
|
||||
/* Word 11 */
|
||||
bf_set(wqe_cmd_type, &wqe->fcp_iwrite.wqe_com, COMMAND_DATA_OUT);
|
||||
bf_set(wqe_cqid, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
|
||||
bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
|
||||
|
||||
/* Word 12 - is zero */
|
||||
|
||||
/* Word 13, 14, 15 - PBDE is variable */
|
||||
|
||||
/* ICMND template */
|
||||
wqe = &lpfc_icmnd_cmd_template;
|
||||
memset(wqe, 0, sizeof(union lpfc_wqe128));
|
||||
@@ -217,7 +211,6 @@ void lpfc_wqe_cmd_template(void)
|
||||
/* Word 11 */
|
||||
bf_set(wqe_cmd_type, &wqe->fcp_icmd.wqe_com, COMMAND_DATA_IN);
|
||||
bf_set(wqe_cqid, &wqe->fcp_icmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
|
||||
bf_set(wqe_pbde, &wqe->fcp_icmd.wqe_com, 0);
|
||||
|
||||
/* Word 12, 13, 14, 15 - is zero */
|
||||
}
|
||||
@@ -4572,59 +4565,41 @@ void
|
||||
lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
|
||||
{
|
||||
LIST_HEAD(tx_completions);
|
||||
LIST_HEAD(txcmplq_completions);
|
||||
spinlock_t *plock; /* for transmit queue access */
|
||||
struct lpfc_iocbq *iocb, *next_iocb;
|
||||
int offline;
|
||||
|
||||
if (pring->ringno == LPFC_ELS_RING) {
|
||||
if (phba->sli_rev >= LPFC_SLI_REV4)
|
||||
plock = &pring->ring_lock;
|
||||
else
|
||||
plock = &phba->hbalock;
|
||||
|
||||
if (pring->ringno == LPFC_ELS_RING)
|
||||
lpfc_fabric_abort_hba(phba);
|
||||
}
|
||||
|
||||
offline = pci_channel_offline(phba->pcidev);
|
||||
|
||||
/* Error everything on txq and txcmplq
|
||||
* First do the txq.
|
||||
*/
|
||||
if (phba->sli_rev >= LPFC_SLI_REV4) {
|
||||
spin_lock_irq(&pring->ring_lock);
|
||||
list_splice_init(&pring->txq, &tx_completions);
|
||||
pring->txq_cnt = 0;
|
||||
|
||||
if (offline) {
|
||||
list_splice_init(&pring->txcmplq,
|
||||
&txcmplq_completions);
|
||||
} else {
|
||||
/* Next issue ABTS for everything on the txcmplq */
|
||||
list_for_each_entry_safe(iocb, next_iocb,
|
||||
&pring->txcmplq, list)
|
||||
lpfc_sli_issue_abort_iotag(phba, pring,
|
||||
iocb, NULL);
|
||||
}
|
||||
spin_unlock_irq(&pring->ring_lock);
|
||||
} else {
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
list_splice_init(&pring->txq, &tx_completions);
|
||||
pring->txq_cnt = 0;
|
||||
|
||||
if (offline) {
|
||||
list_splice_init(&pring->txcmplq, &txcmplq_completions);
|
||||
} else {
|
||||
/* Next issue ABTS for everything on the txcmplq */
|
||||
list_for_each_entry_safe(iocb, next_iocb,
|
||||
&pring->txcmplq, list)
|
||||
lpfc_sli_issue_abort_iotag(phba, pring,
|
||||
iocb, NULL);
|
||||
}
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
}
|
||||
/* Cancel everything on txq */
|
||||
spin_lock_irq(plock);
|
||||
list_splice_init(&pring->txq, &tx_completions);
|
||||
pring->txq_cnt = 0;
|
||||
|
||||
if (offline) {
|
||||
/* Cancel all the IOCBs from the completions list */
|
||||
lpfc_sli_cancel_iocbs(phba, &txcmplq_completions,
|
||||
IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
|
||||
/* Cancel everything on txcmplq */
|
||||
list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
|
||||
iocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
|
||||
list_splice_init(&pring->txcmplq, &tx_completions);
|
||||
pring->txcmplq_cnt = 0;
|
||||
} else {
|
||||
/* Make sure HBA is alive */
|
||||
lpfc_issue_hb_tmo(phba);
|
||||
/* Issue ABTS for everything on the txcmplq */
|
||||
list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
|
||||
lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL);
|
||||
}
|
||||
spin_unlock_irq(plock);
|
||||
|
||||
if (!offline)
|
||||
lpfc_issue_hb_tmo(phba);
|
||||
|
||||
/* Cancel all the IOCBs from the completions list */
|
||||
lpfc_sli_cancel_iocbs(phba, &tx_completions, IOSTAT_LOCAL_REJECT,
|
||||
IOERR_SLI_ABORTED);
|
||||
@@ -8750,14 +8725,6 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
|
||||
ftr_rsp++;
|
||||
}
|
||||
|
||||
/* Performance Hints are ONLY for FCoE */
|
||||
if (test_bit(HBA_FCOE_MODE, &phba->hba_flag)) {
|
||||
if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
|
||||
phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
|
||||
else
|
||||
phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the port cannot support the host's requested features
|
||||
* then turn off the global config parameters to disable the
|
||||
@@ -14355,13 +14322,15 @@ lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
|
||||
/* Get the reference to the active mbox command */
|
||||
spin_lock_irqsave(&phba->hbalock, iflags);
|
||||
pmb = phba->sli.mbox_active;
|
||||
spin_unlock_irqrestore(&phba->hbalock, iflags);
|
||||
if (unlikely(!pmb)) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
|
||||
"1832 No pending MBOX command to handle\n");
|
||||
spin_unlock_irqrestore(&phba->hbalock, iflags);
|
||||
"1832 No pending MBOX command to handle, "
|
||||
"mcqe: x%08x x%08x x%08x x%08x\n",
|
||||
mcqe->word0, mcqe->mcqe_tag0,
|
||||
mcqe->mcqe_tag1, mcqe->trailer);
|
||||
goto out_no_mqe_complete;
|
||||
}
|
||||
spin_unlock_irqrestore(&phba->hbalock, iflags);
|
||||
mqe = &pmb->u.mqe;
|
||||
pmbox = (MAILBOX_t *)&pmb->u.mqe;
|
||||
mbox = phba->mbox;
|
||||
@@ -14736,11 +14705,22 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
|
||||
atomic_read(&tgtp->rcv_fcp_cmd_out),
|
||||
atomic_read(&tgtp->xmt_fcp_release));
|
||||
}
|
||||
hrq->RQ_discard_frm++;
|
||||
fallthrough;
|
||||
|
||||
case FC_STATUS_INSUFF_BUF_NEED_BUF:
|
||||
/* Unexpected event - bump the counter for support. */
|
||||
hrq->RQ_no_posted_buf++;
|
||||
/* Post more buffers if possible */
|
||||
|
||||
lpfc_log_msg(phba, KERN_WARNING,
|
||||
LOG_ELS | LOG_DISCOVERY | LOG_SLI,
|
||||
"6423 RQE completion Status x%x, needed x%x "
|
||||
"discarded x%x\n", status,
|
||||
hrq->RQ_no_posted_buf - hrq->RQ_discard_frm,
|
||||
hrq->RQ_discard_frm);
|
||||
|
||||
/* For SLI3, post more buffers if possible. No action for SLI4.
|
||||
* SLI4 is reposting immediately after processing the RQE.
|
||||
*/
|
||||
set_bit(HBA_POST_RECEIVE_BUFFER, &phba->hba_flag);
|
||||
workposted = true;
|
||||
break;
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*******************************************************************
|
||||
* This file is part of the Emulex Linux Device Driver for *
|
||||
* Fibre Channel Host Bus Adapters. *
|
||||
* Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term *
|
||||
* Copyright (C) 2017-2026 Broadcom. All Rights Reserved. The term *
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
|
||||
* Copyright (C) 2009-2016 Emulex. All rights reserved. *
|
||||
* EMULEX and SLI are trademarks of Emulex. *
|
||||
@@ -246,6 +246,8 @@ struct lpfc_queue {
|
||||
uint32_t q_cnt_2;
|
||||
uint32_t q_cnt_3;
|
||||
uint64_t q_cnt_4;
|
||||
uint32_t q_cnt_5;
|
||||
|
||||
/* defines for EQ stats */
|
||||
#define EQ_max_eqe q_cnt_1
|
||||
#define EQ_no_entry q_cnt_2
|
||||
@@ -268,6 +270,7 @@ struct lpfc_queue {
|
||||
#define RQ_no_buf_found q_cnt_2
|
||||
#define RQ_buf_posted q_cnt_3
|
||||
#define RQ_rcv_buf q_cnt_4
|
||||
#define RQ_discard_frm q_cnt_5
|
||||
|
||||
struct work_struct irqwork;
|
||||
struct work_struct spwork;
|
||||
@@ -838,6 +841,7 @@ struct lpfc_sli4_hba {
|
||||
uint32_t ue_to_sr;
|
||||
uint32_t ue_to_rp;
|
||||
struct lpfc_register sli_intf;
|
||||
struct lpfc_register asic_id;
|
||||
struct lpfc_pc_sli4_params pc_sli4_params;
|
||||
struct lpfc_bbscn_params bbscn_params;
|
||||
struct lpfc_hba_eq_hdl *hba_eq_hdl; /* HBA per-WQ handle */
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*******************************************************************
|
||||
* This file is part of the Emulex Linux Device Driver for *
|
||||
* Fibre Channel Host Bus Adapters. *
|
||||
* Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term *
|
||||
* Copyright (C) 2017-2026 Broadcom. All Rights Reserved. The term *
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
|
||||
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
|
||||
* EMULEX and SLI are trademarks of Emulex. *
|
||||
@@ -20,7 +20,7 @@
|
||||
* included with this package. *
|
||||
*******************************************************************/
|
||||
|
||||
#define LPFC_DRIVER_VERSION "14.4.0.13"
|
||||
#define LPFC_DRIVER_VERSION "15.0.0.0"
|
||||
#define LPFC_DRIVER_NAME "lpfc"
|
||||
|
||||
/* Used for SLI 2/3 */
|
||||
@@ -32,6 +32,6 @@
|
||||
|
||||
#define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \
|
||||
LPFC_DRIVER_VERSION
|
||||
#define LPFC_COPYRIGHT "Copyright (C) 2017-2025 Broadcom. All Rights " \
|
||||
#define LPFC_COPYRIGHT "Copyright (C) 2017-2026 Broadcom. All Rights " \
|
||||
"Reserved. The term \"Broadcom\" refers to Broadcom Inc. " \
|
||||
"and/or its subsidiaries."
|
||||
|
||||
@@ -6365,11 +6365,13 @@ static int megasas_init_fw(struct megasas_instance *instance)
|
||||
|
||||
megasas_setup_jbod_map(instance);
|
||||
|
||||
if (megasas_get_device_list(instance) != SUCCESS) {
|
||||
dev_err(&instance->pdev->dev,
|
||||
"%s: megasas_get_device_list failed\n",
|
||||
__func__);
|
||||
goto fail_get_ld_pd_list;
|
||||
scoped_guard(mutex, &instance->reset_mutex) {
|
||||
if (megasas_get_device_list(instance) != SUCCESS) {
|
||||
dev_err(&instance->pdev->dev,
|
||||
"%s: megasas_get_device_list failed\n",
|
||||
__func__);
|
||||
goto fail_get_ld_pd_list;
|
||||
}
|
||||
}
|
||||
|
||||
/* stream detection initialization */
|
||||
@@ -6468,7 +6470,8 @@ static int megasas_init_fw(struct megasas_instance *instance)
|
||||
}
|
||||
|
||||
if (instance->snapdump_wait_time) {
|
||||
megasas_get_snapdump_properties(instance);
|
||||
scoped_guard(mutex, &instance->reset_mutex)
|
||||
megasas_get_snapdump_properties(instance);
|
||||
dev_info(&instance->pdev->dev, "Snap dump wait time\t: %d\n",
|
||||
instance->snapdump_wait_time);
|
||||
}
|
||||
|
||||
@@ -159,6 +159,7 @@ extern atomic64_t event_counter;
|
||||
/* Controller Reset related definitions */
|
||||
#define MPI3MR_HOSTDIAG_UNLOCK_RETRY_COUNT 5
|
||||
#define MPI3MR_MAX_RESET_RETRY_COUNT 3
|
||||
#define MPI3MR_MAX_SHUTDOWN_RETRY_COUNT 2
|
||||
|
||||
/* ResponseCode definitions */
|
||||
#define MPI3MR_RI_MASK_RESPCODE (0x000000FF)
|
||||
@@ -323,6 +324,7 @@ enum mpi3mr_reset_reason {
|
||||
MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT = 29,
|
||||
MPI3MR_RESET_FROM_SAS_TRANSPORT_TIMEOUT = 30,
|
||||
MPI3MR_RESET_FROM_TRIGGER = 31,
|
||||
MPI3MR_RESET_FROM_INVALID_COMPLETION = 32,
|
||||
};
|
||||
|
||||
#define MPI3MR_RESET_REASON_OSTYPE_LINUX 1
|
||||
@@ -428,6 +430,14 @@ struct segments {
|
||||
* @q_segments: Segment descriptor pointer
|
||||
* @q_segment_list: Segment list base virtual address
|
||||
* @q_segment_list_dma: Segment list base DMA address
|
||||
* @last_full_host_tag: Hosttag of last IO returned to SML
|
||||
* due to queue full
|
||||
* @qfull_io_count: Number of IOs returned back to SML
|
||||
* due to queue full
|
||||
* @qfull_instances: Total queue full occurrences.One occurrence
|
||||
* starts with queue full detection and ends
|
||||
* with queue full breaks.
|
||||
*
|
||||
*/
|
||||
struct op_req_qinfo {
|
||||
u16 ci;
|
||||
@@ -441,6 +451,10 @@ struct op_req_qinfo {
|
||||
struct segments *q_segments;
|
||||
void *q_segment_list;
|
||||
dma_addr_t q_segment_list_dma;
|
||||
u16 last_full_host_tag;
|
||||
u64 qfull_io_count;
|
||||
u32 qfull_instances;
|
||||
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -1183,6 +1197,7 @@ struct scmd_priv {
|
||||
* @num_tb_segs: Number of Segments in Trace buffer
|
||||
* @trace_buf_pool: DMA pool for Segmented trace buffer segments
|
||||
* @trace_buf: Trace buffer segments memory descriptor
|
||||
* @invalid_io_comp: Invalid IO completion
|
||||
*/
|
||||
struct mpi3mr_ioc {
|
||||
struct list_head list;
|
||||
@@ -1394,6 +1409,7 @@ struct mpi3mr_ioc {
|
||||
u32 num_tb_segs;
|
||||
struct dma_pool *trace_buf_pool;
|
||||
struct segments *trace_buf;
|
||||
u8 invalid_io_comp;
|
||||
|
||||
};
|
||||
|
||||
|
||||
@@ -996,6 +996,7 @@ static const struct {
|
||||
{ MPI3MR_RESET_FROM_FIRMWARE, "firmware asynchronous reset" },
|
||||
{ MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT, "configuration request timeout"},
|
||||
{ MPI3MR_RESET_FROM_SAS_TRANSPORT_TIMEOUT, "timeout of a SAS transport layer request" },
|
||||
{ MPI3MR_RESET_FROM_INVALID_COMPLETION, "invalid cmd completion" },
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -2371,6 +2372,9 @@ static int mpi3mr_create_op_req_q(struct mpi3mr_ioc *mrioc, u16 idx,
|
||||
op_req_q->ci = 0;
|
||||
op_req_q->pi = 0;
|
||||
op_req_q->reply_qid = reply_qid;
|
||||
op_req_q->last_full_host_tag = MPI3MR_HOSTTAG_INVALID;
|
||||
op_req_q->qfull_io_count = 0;
|
||||
op_req_q->qfull_instances = 0;
|
||||
spin_lock_init(&op_req_q->q_lock);
|
||||
|
||||
if (!op_req_q->q_segments) {
|
||||
@@ -2557,6 +2561,8 @@ int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc,
|
||||
u16 req_sz = mrioc->facts.op_req_sz;
|
||||
struct segments *segments = op_req_q->q_segments;
|
||||
struct op_reply_qinfo *op_reply_q = NULL;
|
||||
struct mpi3_scsi_io_request *scsiio_req =
|
||||
(struct mpi3_scsi_io_request *)req;
|
||||
|
||||
reply_qidx = op_req_q->reply_qid - 1;
|
||||
op_reply_q = mrioc->op_reply_qinfo + reply_qidx;
|
||||
@@ -2574,11 +2580,21 @@ int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc,
|
||||
mpi3mr_process_op_reply_q(mrioc, mrioc->intr_info[midx].op_reply_q);
|
||||
|
||||
if (mpi3mr_check_req_qfull(op_req_q)) {
|
||||
|
||||
if (op_req_q->last_full_host_tag ==
|
||||
MPI3MR_HOSTTAG_INVALID)
|
||||
op_req_q->qfull_instances++;
|
||||
|
||||
op_req_q->last_full_host_tag = scsiio_req->host_tag;
|
||||
op_req_q->qfull_io_count++;
|
||||
retval = -EAGAIN;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
if (op_req_q->last_full_host_tag != MPI3MR_HOSTTAG_INVALID)
|
||||
op_req_q->last_full_host_tag = MPI3MR_HOSTTAG_INVALID;
|
||||
|
||||
if (mrioc->reset_in_progress) {
|
||||
ioc_err(mrioc, "OpReqQ submit reset in progress\n");
|
||||
retval = -EAGAIN;
|
||||
@@ -2699,7 +2715,7 @@ void mpi3mr_check_rh_fault_ioc(struct mpi3mr_ioc *mrioc, u32 reason_code)
|
||||
* mpi3mr_sync_timestamp - Issue time stamp sync request
|
||||
* @mrioc: Adapter reference
|
||||
*
|
||||
* Issue IO unit control MPI request to synchornize firmware
|
||||
* Issue IO unit control MPI request to synchronize firmware
|
||||
* timestamp with host time.
|
||||
*
|
||||
* Return: 0 on success, non-zero on failure.
|
||||
@@ -2889,6 +2905,11 @@ static void mpi3mr_watchdog_work(struct work_struct *work)
|
||||
return;
|
||||
}
|
||||
|
||||
if (mrioc->invalid_io_comp) {
|
||||
mpi3mr_soft_reset_handler(mrioc, MPI3MR_RESET_FROM_INVALID_COMPLETION, 1);
|
||||
return;
|
||||
}
|
||||
|
||||
if (atomic_read(&mrioc->admin_pend_isr)) {
|
||||
ioc_err(mrioc, "Unprocessed admin ISR instance found\n"
|
||||
"flush admin replies\n");
|
||||
@@ -4834,6 +4855,7 @@ void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc)
|
||||
mrioc->req_qinfo[i].qid = 0;
|
||||
mrioc->req_qinfo[i].reply_qid = 0;
|
||||
spin_lock_init(&mrioc->req_qinfo[i].q_lock);
|
||||
mrioc->req_qinfo[i].last_full_host_tag = 0;
|
||||
mpi3mr_memset_op_req_q_buffers(mrioc, i);
|
||||
}
|
||||
}
|
||||
@@ -5050,9 +5072,9 @@ void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc)
|
||||
*/
|
||||
static void mpi3mr_issue_ioc_shutdown(struct mpi3mr_ioc *mrioc)
|
||||
{
|
||||
u32 ioc_config, ioc_status;
|
||||
u8 retval = 1;
|
||||
u32 timeout = MPI3MR_DEFAULT_SHUTDOWN_TIME * 10;
|
||||
u32 ioc_config, ioc_status, shutdown_action;
|
||||
u8 retval = 1, retry = 0;
|
||||
u32 timeout = MPI3MR_DEFAULT_SHUTDOWN_TIME * 10, timeout_remaining = 0;
|
||||
|
||||
ioc_info(mrioc, "Issuing shutdown Notification\n");
|
||||
if (mrioc->unrecoverable) {
|
||||
@@ -5067,14 +5089,16 @@ static void mpi3mr_issue_ioc_shutdown(struct mpi3mr_ioc *mrioc)
|
||||
return;
|
||||
}
|
||||
|
||||
shutdown_action = MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL |
|
||||
MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN_SEND_REQ;
|
||||
ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
|
||||
ioc_config |= MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL;
|
||||
ioc_config |= MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN_SEND_REQ;
|
||||
ioc_config |= shutdown_action;
|
||||
|
||||
writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
|
||||
|
||||
if (mrioc->facts.shutdown_timeout)
|
||||
timeout = mrioc->facts.shutdown_timeout * 10;
|
||||
timeout_remaining = timeout;
|
||||
|
||||
do {
|
||||
ioc_status = readl(&mrioc->sysif_regs->ioc_status);
|
||||
@@ -5083,8 +5107,26 @@ static void mpi3mr_issue_ioc_shutdown(struct mpi3mr_ioc *mrioc)
|
||||
retval = 0;
|
||||
break;
|
||||
}
|
||||
if (mrioc->unrecoverable)
|
||||
break;
|
||||
if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) {
|
||||
mpi3mr_print_fault_info(mrioc);
|
||||
if (retry >= MPI3MR_MAX_SHUTDOWN_RETRY_COUNT)
|
||||
break;
|
||||
if (mpi3mr_issue_reset(mrioc,
|
||||
MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
|
||||
MPI3MR_RESET_FROM_CTLR_CLEANUP))
|
||||
break;
|
||||
ioc_config =
|
||||
readl(&mrioc->sysif_regs->ioc_configuration);
|
||||
ioc_config |= shutdown_action;
|
||||
writel(ioc_config,
|
||||
&mrioc->sysif_regs->ioc_configuration);
|
||||
timeout_remaining = timeout;
|
||||
retry++;
|
||||
}
|
||||
msleep(100);
|
||||
} while (--timeout);
|
||||
} while (--timeout_remaining);
|
||||
|
||||
ioc_status = readl(&mrioc->sysif_regs->ioc_status);
|
||||
ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
|
||||
@@ -5658,6 +5700,7 @@ int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
|
||||
ssleep(MPI3MR_RESET_TOPOLOGY_SETTLE_TIME);
|
||||
|
||||
out:
|
||||
mrioc->invalid_io_comp = 0;
|
||||
if (!retval) {
|
||||
mrioc->diagsave_timeout = 0;
|
||||
mrioc->reset_in_progress = 0;
|
||||
|
||||
@@ -3459,8 +3459,15 @@ void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc,
|
||||
}
|
||||
scmd = mpi3mr_scmd_from_host_tag(mrioc, host_tag, qidx);
|
||||
if (!scmd) {
|
||||
panic("%s: Cannot Identify scmd for host_tag 0x%x\n",
|
||||
mrioc->name, host_tag);
|
||||
ioc_err(mrioc, "Cannot Identify scmd for host_tag 0x%x", host_tag);
|
||||
ioc_err(mrioc,
|
||||
"reply_desc_type(%d) host_tag(%d(0x%04x)): qid(%d): command issued to\n"
|
||||
"handle(0x%04x) returned with ioc_status(0x%04x), log_info(0x%08x),\n"
|
||||
"scsi_state(0x%02x), scsi_status(0x%02x), xfer_count(%d), resp_data(0x%08x)\n",
|
||||
reply_desc_type, host_tag, host_tag, qidx+1, dev_handle, ioc_status,
|
||||
ioc_loginfo, scsi_state, scsi_status, xfer_count,
|
||||
resp_data);
|
||||
mrioc->invalid_io_comp = 1;
|
||||
goto out;
|
||||
}
|
||||
priv = scsi_cmd_priv(scmd);
|
||||
|
||||
@@ -1638,7 +1638,7 @@ qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
|
||||
{
|
||||
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
|
||||
int rval = QLA_FUNCTION_FAILED;
|
||||
uint16_t state[6];
|
||||
uint16_t state[16];
|
||||
uint32_t pstate;
|
||||
|
||||
if (IS_QLAFX00(vha->hw)) {
|
||||
@@ -2402,6 +2402,63 @@ qla2x00_dport_diagnostics_show(struct device *dev,
|
||||
vha->dport_data[0], vha->dport_data[1],
|
||||
vha->dport_data[2], vha->dport_data[3]);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
qla2x00_mpi_fw_state_show(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
|
||||
int rval = QLA_FUNCTION_FAILED;
|
||||
u16 state[16];
|
||||
u16 mpi_state;
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
|
||||
if (!(IS_QLA27XX(ha) || IS_QLA28XX(ha)))
|
||||
return scnprintf(buf, PAGE_SIZE,
|
||||
"MPI state reporting is not supported for this HBA.\n");
|
||||
|
||||
memset(state, 0, sizeof(state));
|
||||
|
||||
mutex_lock(&vha->hw->optrom_mutex);
|
||||
if (qla2x00_chip_is_down(vha)) {
|
||||
mutex_unlock(&vha->hw->optrom_mutex);
|
||||
ql_dbg(ql_dbg_user, vha, 0x70df,
|
||||
"ISP reset is in progress, failing mpi_fw_state.\n");
|
||||
return -EBUSY;
|
||||
} else if (vha->hw->flags.eeh_busy) {
|
||||
mutex_unlock(&vha->hw->optrom_mutex);
|
||||
ql_dbg(ql_dbg_user, vha, 0x70ea,
|
||||
"HBA in PCI error state, failing mpi_fw_state.\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
rval = qla2x00_get_firmware_state(vha, state);
|
||||
mutex_unlock(&vha->hw->optrom_mutex);
|
||||
if (rval != QLA_SUCCESS) {
|
||||
ql_dbg(ql_dbg_user, vha, 0x70eb,
|
||||
"MB Command to retrieve MPI state failed (%d), failing mpi_fw_state.\n",
|
||||
rval);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
mpi_state = state[11];
|
||||
|
||||
if (!(mpi_state & BIT_15))
|
||||
return scnprintf(buf, PAGE_SIZE,
|
||||
"MPI firmware state reporting is not supported by this firmware. (0x%02x)\n",
|
||||
mpi_state);
|
||||
|
||||
if (!(mpi_state & BIT_8))
|
||||
return scnprintf(buf, PAGE_SIZE,
|
||||
"MPI firmware is disabled. (0x%02x)\n",
|
||||
mpi_state);
|
||||
|
||||
return scnprintf(buf, PAGE_SIZE,
|
||||
"MPI firmware is enabled, state is %s. (0x%02x)\n",
|
||||
mpi_state & BIT_9 ? "active" : "inactive",
|
||||
mpi_state);
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(dport_diagnostics, 0444,
|
||||
qla2x00_dport_diagnostics_show, NULL);
|
||||
|
||||
@@ -2469,6 +2526,8 @@ static DEVICE_ATTR(port_speed, 0644, qla2x00_port_speed_show,
|
||||
qla2x00_port_speed_store);
|
||||
static DEVICE_ATTR(port_no, 0444, qla2x00_port_no_show, NULL);
|
||||
static DEVICE_ATTR(fw_attr, 0444, qla2x00_fw_attr_show, NULL);
|
||||
static DEVICE_ATTR(mpi_fw_state, 0444, qla2x00_mpi_fw_state_show, NULL);
|
||||
|
||||
|
||||
static struct attribute *qla2x00_host_attrs[] = {
|
||||
&dev_attr_driver_version.attr.attr,
|
||||
@@ -2517,6 +2576,7 @@ static struct attribute *qla2x00_host_attrs[] = {
|
||||
&dev_attr_qlini_mode.attr,
|
||||
&dev_attr_ql2xiniexchg.attr,
|
||||
&dev_attr_ql2xexchoffld.attr,
|
||||
&dev_attr_mpi_fw_state.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
|
||||
@@ -4914,7 +4914,7 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
|
||||
unsigned long wtime, mtime, cs84xx_time;
|
||||
uint16_t min_wait; /* Minimum wait time if loop is down */
|
||||
uint16_t wait_time; /* Wait time if loop is coming ready */
|
||||
uint16_t state[6];
|
||||
uint16_t state[16];
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
|
||||
if (IS_QLAFX00(vha->hw))
|
||||
|
||||
@@ -621,7 +621,7 @@ static inline int qla_mapq_alloc_qp_cpu_map(struct qla_hw_data *ha)
|
||||
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
|
||||
|
||||
if (!ha->qp_cpu_map) {
|
||||
ha->qp_cpu_map = kzalloc_objs(struct qla_qpair *, NR_CPUS);
|
||||
ha->qp_cpu_map = kzalloc_objs(struct qla_qpair *, nr_cpu_ids);
|
||||
if (!ha->qp_cpu_map) {
|
||||
ql_log(ql_log_fatal, vha, 0x0180,
|
||||
"Unable to allocate memory for qp_cpu_map ptrs.\n");
|
||||
|
||||
@@ -2268,6 +2268,13 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
|
||||
mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
|
||||
else
|
||||
mcp->in_mb = MBX_1|MBX_0;
|
||||
|
||||
if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
|
||||
mcp->mb[12] = 0;
|
||||
mcp->out_mb |= MBX_12;
|
||||
mcp->in_mb |= MBX_12;
|
||||
}
|
||||
|
||||
mcp->tov = MBX_TOV_SECONDS;
|
||||
mcp->flags = 0;
|
||||
rval = qla2x00_mailbox_command(vha, mcp);
|
||||
@@ -2280,6 +2287,8 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
|
||||
states[3] = mcp->mb[4];
|
||||
states[4] = mcp->mb[5];
|
||||
states[5] = mcp->mb[6]; /* DPORT status */
|
||||
if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
|
||||
states[11] = mcp->mb[12]; /* MPI state. */
|
||||
}
|
||||
|
||||
if (rval != QLA_SUCCESS) {
|
||||
|
||||
@@ -1841,6 +1841,7 @@ static const struct target_core_fabric_ops tcm_qla2xxx_ops = {
|
||||
.tfc_tpg_base_attrs = tcm_qla2xxx_tpg_attrs,
|
||||
.tfc_tpg_attrib_attrs = tcm_qla2xxx_tpg_attrib_attrs,
|
||||
|
||||
.default_compl_type = TARGET_QUEUE_COMPL,
|
||||
.default_submit_type = TARGET_DIRECT_SUBMIT,
|
||||
.direct_submit_supp = 1,
|
||||
};
|
||||
@@ -1881,6 +1882,7 @@ static const struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {
|
||||
|
||||
.tfc_wwn_attrs = tcm_qla2xxx_wwn_attrs,
|
||||
|
||||
.default_compl_type = TARGET_QUEUE_COMPL,
|
||||
.default_submit_type = TARGET_DIRECT_SUBMIT,
|
||||
.direct_submit_supp = 1,
|
||||
};
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/init.h>
|
||||
@@ -3460,6 +3461,52 @@ int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len)
|
||||
}
|
||||
EXPORT_SYMBOL(scsi_vpd_lun_id);
|
||||
|
||||
/**
|
||||
* scsi_vpd_lun_serial - return a unique device serial number
|
||||
* @sdev: SCSI device
|
||||
* @sn: buffer for the serial number
|
||||
* @sn_size: size of the buffer
|
||||
*
|
||||
* Copies the device serial number into @sn based on the information in
|
||||
* the VPD page 0x80 of the device. The string will be null terminated
|
||||
* and have leading and trailing whitespace stripped.
|
||||
*
|
||||
* Returns the length of the serial number or error on failure.
|
||||
*/
|
||||
int scsi_vpd_lun_serial(struct scsi_device *sdev, char *sn, size_t sn_size)
|
||||
{
|
||||
const struct scsi_vpd *vpd_pg80;
|
||||
const unsigned char *d;
|
||||
int len;
|
||||
|
||||
guard(rcu)();
|
||||
vpd_pg80 = rcu_dereference(sdev->vpd_pg80);
|
||||
if (!vpd_pg80)
|
||||
return -ENXIO;
|
||||
|
||||
len = vpd_pg80->len - 4;
|
||||
d = vpd_pg80->data + 4;
|
||||
|
||||
/* Skip leading spaces */
|
||||
while (len > 0 && isspace(*d)) {
|
||||
len--;
|
||||
d++;
|
||||
}
|
||||
|
||||
/* Skip trailing spaces */
|
||||
while (len > 0 && isspace(d[len - 1]))
|
||||
len--;
|
||||
|
||||
if (sn_size < len + 1)
|
||||
return -EINVAL;
|
||||
|
||||
memcpy(sn, d, len);
|
||||
sn[len] = '\0';
|
||||
|
||||
return len;
|
||||
}
|
||||
EXPORT_SYMBOL(scsi_vpd_lun_serial);
|
||||
|
||||
/**
|
||||
* scsi_vpd_tpg_id - return a target port group identifier
|
||||
* @sdev: SCSI device
|
||||
|
||||
@@ -1940,7 +1940,6 @@ static void scsi_sysfs_add_devices(struct Scsi_Host *shost)
|
||||
static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
|
||||
{
|
||||
struct async_scan_data *data = NULL;
|
||||
unsigned long flags;
|
||||
|
||||
if (strncmp(scsi_scan_type, "sync", 4) == 0)
|
||||
return NULL;
|
||||
@@ -1959,9 +1958,7 @@ static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
|
||||
goto err;
|
||||
init_completion(&data->prev_finished);
|
||||
|
||||
spin_lock_irqsave(shost->host_lock, flags);
|
||||
shost->async_scan = 1;
|
||||
spin_unlock_irqrestore(shost->host_lock, flags);
|
||||
shost->async_scan = true;
|
||||
mutex_unlock(&shost->scan_mutex);
|
||||
|
||||
spin_lock(&async_scan_lock);
|
||||
@@ -1989,7 +1986,6 @@ static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
|
||||
static void scsi_finish_async_scan(struct async_scan_data *data)
|
||||
{
|
||||
struct Scsi_Host *shost;
|
||||
unsigned long flags;
|
||||
|
||||
if (!data)
|
||||
return;
|
||||
@@ -2009,9 +2005,7 @@ static void scsi_finish_async_scan(struct async_scan_data *data)
|
||||
|
||||
scsi_sysfs_add_devices(shost);
|
||||
|
||||
spin_lock_irqsave(shost->host_lock, flags);
|
||||
shost->async_scan = 0;
|
||||
spin_unlock_irqrestore(shost->host_lock, flags);
|
||||
shost->async_scan = false;
|
||||
|
||||
mutex_unlock(&shost->scan_mutex);
|
||||
|
||||
|
||||
@@ -1051,6 +1051,21 @@ sdev_show_wwid(struct device *dev, struct device_attribute *attr,
|
||||
}
|
||||
static DEVICE_ATTR(wwid, S_IRUGO, sdev_show_wwid, NULL);
|
||||
|
||||
static ssize_t
|
||||
sdev_show_serial(struct device *dev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct scsi_device *sdev = to_scsi_device(dev);
|
||||
ssize_t ret;
|
||||
|
||||
ret = scsi_vpd_lun_serial(sdev, buf, PAGE_SIZE - 1);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
buf[ret] = '\n';
|
||||
return ret + 1;
|
||||
}
|
||||
static DEVICE_ATTR(serial, S_IRUGO, sdev_show_serial, NULL);
|
||||
|
||||
#define BLIST_FLAG_NAME(name) \
|
||||
[const_ilog2((__force __u64)BLIST_##name)] = #name
|
||||
static const char *const sdev_bflags_name[] = {
|
||||
@@ -1295,6 +1310,7 @@ static struct attribute *scsi_sdev_attrs[] = {
|
||||
&dev_attr_device_busy.attr,
|
||||
&dev_attr_vendor.attr,
|
||||
&dev_attr_model.attr,
|
||||
&dev_attr_serial.attr,
|
||||
&dev_attr_rev.attr,
|
||||
&dev_attr_rescan.attr,
|
||||
&dev_attr_delete.attr,
|
||||
|
||||
@@ -107,8 +107,11 @@ static void sd_config_write_same(struct scsi_disk *sdkp,
|
||||
static void sd_revalidate_disk(struct gendisk *);
|
||||
|
||||
static DEFINE_IDA(sd_index_ida);
|
||||
static DEFINE_MUTEX(sd_mutex_lock);
|
||||
|
||||
static mempool_t *sd_page_pool;
|
||||
static mempool_t *sd_large_page_pool;
|
||||
static atomic_t sd_large_page_pool_users = ATOMIC_INIT(0);
|
||||
static struct lock_class_key sd_bio_compl_lkclass;
|
||||
|
||||
static const char *sd_cache_types[] = {
|
||||
@@ -116,6 +119,33 @@ static const char *sd_cache_types[] = {
|
||||
"write back, no read (daft)"
|
||||
};
|
||||
|
||||
static int sd_large_pool_create(void)
|
||||
{
|
||||
mutex_lock(&sd_mutex_lock);
|
||||
if (!sd_large_page_pool) {
|
||||
sd_large_page_pool = mempool_create_page_pool(
|
||||
SD_MEMPOOL_SIZE, get_order(BLK_MAX_BLOCK_SIZE));
|
||||
if (!sd_large_page_pool) {
|
||||
printk(KERN_ERR "sd: can't create large page mempool\n");
|
||||
mutex_unlock(&sd_mutex_lock);
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
atomic_inc(&sd_large_page_pool_users);
|
||||
mutex_unlock(&sd_mutex_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void sd_large_pool_destroy(void)
|
||||
{
|
||||
mutex_lock(&sd_mutex_lock);
|
||||
if (atomic_dec_and_test(&sd_large_page_pool_users)) {
|
||||
mempool_destroy(sd_large_page_pool);
|
||||
sd_large_page_pool = NULL;
|
||||
}
|
||||
mutex_unlock(&sd_mutex_lock);
|
||||
}
|
||||
|
||||
static void sd_disable_discard(struct scsi_disk *sdkp)
|
||||
{
|
||||
sdkp->provisioning_mode = SD_LBP_DISABLE;
|
||||
@@ -928,14 +958,24 @@ static unsigned char sd_setup_protect_cmnd(struct scsi_cmnd *scmd,
|
||||
return protect;
|
||||
}
|
||||
|
||||
static void *sd_set_special_bvec(struct request *rq, unsigned int data_len)
|
||||
static void *sd_set_special_bvec(struct scsi_cmnd *cmd, unsigned int data_len)
|
||||
{
|
||||
struct page *page;
|
||||
struct request *rq = scsi_cmd_to_rq(cmd);
|
||||
struct scsi_device *sdp = cmd->device;
|
||||
unsigned sector_size = sdp->sector_size;
|
||||
unsigned int nr_pages = DIV_ROUND_UP(sector_size, PAGE_SIZE);
|
||||
int n;
|
||||
|
||||
page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
|
||||
if (sector_size > PAGE_SIZE)
|
||||
page = mempool_alloc(sd_large_page_pool, GFP_ATOMIC);
|
||||
else
|
||||
page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
|
||||
if (!page)
|
||||
return NULL;
|
||||
clear_highpage(page);
|
||||
|
||||
for (n = 0; n < nr_pages; n++)
|
||||
clear_highpage(page + n);
|
||||
bvec_set_page(&rq->special_vec, page, data_len, 0);
|
||||
rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
|
||||
return bvec_virt(&rq->special_vec);
|
||||
@@ -951,7 +991,7 @@ static blk_status_t sd_setup_unmap_cmnd(struct scsi_cmnd *cmd)
|
||||
unsigned int data_len = 24;
|
||||
char *buf;
|
||||
|
||||
buf = sd_set_special_bvec(rq, data_len);
|
||||
buf = sd_set_special_bvec(cmd, data_len);
|
||||
if (!buf)
|
||||
return BLK_STS_RESOURCE;
|
||||
|
||||
@@ -1040,7 +1080,7 @@ static blk_status_t sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd,
|
||||
u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
|
||||
u32 data_len = sdp->sector_size;
|
||||
|
||||
if (!sd_set_special_bvec(rq, data_len))
|
||||
if (!sd_set_special_bvec(cmd, data_len))
|
||||
return BLK_STS_RESOURCE;
|
||||
|
||||
cmd->cmd_len = 16;
|
||||
@@ -1067,7 +1107,7 @@ static blk_status_t sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd,
|
||||
u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
|
||||
u32 data_len = sdp->sector_size;
|
||||
|
||||
if (!sd_set_special_bvec(rq, data_len))
|
||||
if (!sd_set_special_bvec(cmd, data_len))
|
||||
return BLK_STS_RESOURCE;
|
||||
|
||||
cmd->cmd_len = 10;
|
||||
@@ -1513,9 +1553,15 @@ static blk_status_t sd_init_command(struct scsi_cmnd *cmd)
|
||||
static void sd_uninit_command(struct scsi_cmnd *SCpnt)
|
||||
{
|
||||
struct request *rq = scsi_cmd_to_rq(SCpnt);
|
||||
struct scsi_device *sdp = SCpnt->device;
|
||||
unsigned sector_size = sdp->sector_size;
|
||||
|
||||
if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
|
||||
mempool_free(rq->special_vec.bv_page, sd_page_pool);
|
||||
if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) {
|
||||
if (sector_size > PAGE_SIZE)
|
||||
mempool_free(rq->special_vec.bv_page, sd_large_page_pool);
|
||||
else
|
||||
mempool_free(rq->special_vec.bv_page, sd_page_pool);
|
||||
}
|
||||
}
|
||||
|
||||
static bool sd_need_revalidate(struct gendisk *disk, struct scsi_disk *sdkp)
|
||||
@@ -2912,10 +2958,7 @@ sd_read_capacity(struct scsi_disk *sdkp, struct queue_limits *lim,
|
||||
"Sector size 0 reported, assuming 512.\n");
|
||||
}
|
||||
|
||||
if (sector_size != 512 &&
|
||||
sector_size != 1024 &&
|
||||
sector_size != 2048 &&
|
||||
sector_size != 4096) {
|
||||
if (blk_validate_block_size(sector_size)) {
|
||||
sd_printk(KERN_NOTICE, sdkp, "Unsupported sector size %d.\n",
|
||||
sector_size);
|
||||
/*
|
||||
@@ -4018,6 +4061,7 @@ static int sd_probe(struct scsi_device *sdp)
|
||||
error = device_add(&sdkp->disk_dev);
|
||||
if (error) {
|
||||
put_device(&sdkp->disk_dev);
|
||||
put_disk(gd);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -4043,6 +4087,12 @@ static int sd_probe(struct scsi_device *sdp)
|
||||
sdkp->max_medium_access_timeouts = SD_MAX_MEDIUM_TIMEOUTS;
|
||||
|
||||
sd_revalidate_disk(gd);
|
||||
if (sdp->sector_size > PAGE_SIZE) {
|
||||
if (sd_large_pool_create()) {
|
||||
error = -ENOMEM;
|
||||
goto out_free_index;
|
||||
}
|
||||
}
|
||||
|
||||
if (sdp->removable) {
|
||||
gd->flags |= GENHD_FL_REMOVABLE;
|
||||
@@ -4060,6 +4110,8 @@ static int sd_probe(struct scsi_device *sdp)
|
||||
if (error) {
|
||||
device_unregister(&sdkp->disk_dev);
|
||||
put_disk(gd);
|
||||
if (sdp->sector_size > PAGE_SIZE)
|
||||
sd_large_pool_destroy();
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -4212,6 +4264,9 @@ static void sd_remove(struct scsi_device *sdp)
|
||||
sd_shutdown(sdp);
|
||||
|
||||
put_disk(sdkp->disk);
|
||||
|
||||
if (sdp->sector_size > PAGE_SIZE)
|
||||
sd_large_pool_destroy();
|
||||
}
|
||||
|
||||
static inline bool sd_do_start_stop(struct scsi_device *sdev, bool runtime)
|
||||
@@ -4435,6 +4490,8 @@ static void __exit exit_sd(void)
|
||||
|
||||
scsi_unregister_driver(&sd_template);
|
||||
mempool_destroy(sd_page_pool);
|
||||
if (sd_large_page_pool)
|
||||
mempool_destroy(sd_large_page_pool);
|
||||
|
||||
class_unregister(&sd_disk_class);
|
||||
|
||||
|
||||
@@ -81,14 +81,14 @@ static int sg_proc_init(void);
|
||||
|
||||
#define SG_DEFAULT_TIMEOUT mult_frac(SG_DEFAULT_TIMEOUT_USER, HZ, USER_HZ)
|
||||
|
||||
static int sg_big_buff = SG_DEF_RESERVED_SIZE;
|
||||
/* N.B. This variable is readable and writeable via
|
||||
/proc/scsi/sg/def_reserved_size . Each time sg_open() is called a buffer
|
||||
of this size (or less if there is not enough memory) will be reserved
|
||||
for use by this file descriptor. [Deprecated usage: this variable is also
|
||||
readable via /proc/sys/kernel/sg-big-buff if the sg driver is built into
|
||||
the kernel (i.e. it is not a module).] */
|
||||
static int def_reserved_size = -1; /* picks up init parameter */
|
||||
* /proc/scsi/sg/def_reserved_size . Each time sg_open() is called a buffer
|
||||
* of this size (or less if there is not enough memory) will be reserved
|
||||
* for use by this file descriptor.
|
||||
*/
|
||||
|
||||
/* picks up init parameter */
|
||||
static int def_reserved_size = SG_DEF_RESERVED_SIZE;
|
||||
static int sg_allow_dio = SG_ALLOW_DIO_DEF;
|
||||
|
||||
static int scatter_elem_sz = SG_SCATTER_SZ;
|
||||
@@ -1623,10 +1623,35 @@ sg_remove_device(struct device *cl_dev)
|
||||
}
|
||||
|
||||
module_param_named(scatter_elem_sz, scatter_elem_sz, int, S_IRUGO | S_IWUSR);
|
||||
module_param_named(def_reserved_size, def_reserved_size, int,
|
||||
S_IRUGO | S_IWUSR);
|
||||
module_param_named(allow_dio, sg_allow_dio, int, S_IRUGO | S_IWUSR);
|
||||
|
||||
static int def_reserved_size_set(const char *val, const struct kernel_param *kp)
|
||||
{
|
||||
int size, ret;
|
||||
|
||||
if (!val)
|
||||
return -EINVAL;
|
||||
|
||||
ret = kstrtoint(val, 0, &size);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* limit to 1 MB */
|
||||
if (size < 0 || size > 1048576)
|
||||
return -ERANGE;
|
||||
|
||||
def_reserved_size = size;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct kernel_param_ops def_reserved_size_ops = {
|
||||
.set = def_reserved_size_set,
|
||||
.get = param_get_int,
|
||||
};
|
||||
|
||||
module_param_cb(def_reserved_size, &def_reserved_size_ops, &def_reserved_size,
|
||||
S_IRUGO | S_IWUSR);
|
||||
|
||||
MODULE_AUTHOR("Douglas Gilbert");
|
||||
MODULE_DESCRIPTION("SCSI generic (sg) driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
@@ -1638,35 +1663,6 @@ MODULE_PARM_DESC(scatter_elem_sz, "scatter gather element "
|
||||
MODULE_PARM_DESC(def_reserved_size, "size of buffer reserved for each fd");
|
||||
MODULE_PARM_DESC(allow_dio, "allow direct I/O (default: 0 (disallow))");
|
||||
|
||||
#ifdef CONFIG_SYSCTL
|
||||
#include <linux/sysctl.h>
|
||||
|
||||
static const struct ctl_table sg_sysctls[] = {
|
||||
{
|
||||
.procname = "sg-big-buff",
|
||||
.data = &sg_big_buff,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0444,
|
||||
.proc_handler = proc_dointvec,
|
||||
},
|
||||
};
|
||||
|
||||
static struct ctl_table_header *hdr;
|
||||
static void register_sg_sysctls(void)
|
||||
{
|
||||
if (!hdr)
|
||||
hdr = register_sysctl("kernel", sg_sysctls);
|
||||
}
|
||||
|
||||
static void unregister_sg_sysctls(void)
|
||||
{
|
||||
unregister_sysctl_table(hdr);
|
||||
}
|
||||
#else
|
||||
#define register_sg_sysctls() do { } while (0)
|
||||
#define unregister_sg_sysctls() do { } while (0)
|
||||
#endif /* CONFIG_SYSCTL */
|
||||
|
||||
static int __init
|
||||
init_sg(void)
|
||||
{
|
||||
@@ -1676,10 +1672,6 @@ init_sg(void)
|
||||
scatter_elem_sz = PAGE_SIZE;
|
||||
scatter_elem_sz_prev = scatter_elem_sz;
|
||||
}
|
||||
if (def_reserved_size >= 0)
|
||||
sg_big_buff = def_reserved_size;
|
||||
else
|
||||
def_reserved_size = sg_big_buff;
|
||||
|
||||
rc = register_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0),
|
||||
SG_MAX_DEVS, "sg");
|
||||
@@ -1697,7 +1689,6 @@ init_sg(void)
|
||||
return 0;
|
||||
}
|
||||
class_unregister(&sg_sysfs_class);
|
||||
register_sg_sysctls();
|
||||
err_out:
|
||||
unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), SG_MAX_DEVS);
|
||||
return rc;
|
||||
@@ -1706,7 +1697,6 @@ init_sg(void)
|
||||
static void __exit
|
||||
exit_sg(void)
|
||||
{
|
||||
unregister_sg_sysctls();
|
||||
#ifdef CONFIG_SCSI_PROC_FS
|
||||
remove_proc_subtree("scsi/sg", NULL);
|
||||
#endif /* CONFIG_SCSI_PROC_FS */
|
||||
@@ -2182,10 +2172,8 @@ sg_add_sfp(Sg_device * sdp)
|
||||
write_unlock_irqrestore(&sdp->sfd_lock, iflags);
|
||||
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
|
||||
"sg_add_sfp: sfp=0x%p\n", sfp));
|
||||
if (unlikely(sg_big_buff != def_reserved_size))
|
||||
sg_big_buff = def_reserved_size;
|
||||
|
||||
bufflen = min_t(int, sg_big_buff,
|
||||
bufflen = min_t(int, def_reserved_size,
|
||||
max_sectors_bytes(sdp->device->request_queue));
|
||||
sg_build_reserve(sfp, bufflen);
|
||||
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
|
||||
@@ -2413,7 +2401,7 @@ sg_proc_write_adio(struct file *filp, const char __user *buffer,
|
||||
|
||||
static int sg_proc_single_open_dressz(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, sg_proc_seq_show_int, &sg_big_buff);
|
||||
return single_open(file, sg_proc_seq_show_int, &def_reserved_size);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
@@ -2430,7 +2418,7 @@ sg_proc_write_dressz(struct file *filp, const char __user *buffer,
|
||||
if (err)
|
||||
return err;
|
||||
if (k <= 1048576) { /* limit "big buff" to 1 MB */
|
||||
sg_big_buff = k;
|
||||
def_reserved_size = k;
|
||||
return count;
|
||||
}
|
||||
return -ERANGE;
|
||||
@@ -2603,7 +2591,7 @@ static int sg_proc_seq_show_debug(struct seq_file *s, void *v)
|
||||
|
||||
if (it && (0 == it->index))
|
||||
seq_printf(s, "max_active_device=%d def_reserved_size=%d\n",
|
||||
(int)it->max, sg_big_buff);
|
||||
(int)it->max, def_reserved_size);
|
||||
|
||||
read_lock_irqsave(&sg_index_lock, iflags);
|
||||
sdp = it ? sg_lookup_dev(it->index) : NULL;
|
||||
|
||||
@@ -1131,6 +1131,26 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request,
|
||||
kfree(payload);
|
||||
}
|
||||
|
||||
/*
|
||||
* The current SCSI handling on the host side does not correctly handle:
|
||||
* INQUIRY with page code 0x80, MODE_SENSE / MODE_SENSE_10 with cmd[2] == 0x1c,
|
||||
* and (for FC) MAINTENANCE_IN / PERSISTENT_RESERVE_IN passthrough.
|
||||
*/
|
||||
static bool storvsc_host_mishandles_cmd(u8 opcode, struct hv_device *device)
|
||||
{
|
||||
switch (opcode) {
|
||||
case INQUIRY:
|
||||
case MODE_SENSE:
|
||||
case MODE_SENSE_10:
|
||||
return true;
|
||||
case MAINTENANCE_IN:
|
||||
case PERSISTENT_RESERVE_IN:
|
||||
return hv_dev_is_fc(device);
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static void storvsc_on_io_completion(struct storvsc_device *stor_device,
|
||||
struct vstor_packet *vstor_packet,
|
||||
struct storvsc_cmd_request *request)
|
||||
@@ -1141,22 +1161,12 @@ static void storvsc_on_io_completion(struct storvsc_device *stor_device,
|
||||
stor_pkt = &request->vstor_packet;
|
||||
|
||||
/*
|
||||
* The current SCSI handling on the host side does
|
||||
* not correctly handle:
|
||||
* INQUIRY command with page code parameter set to 0x80
|
||||
* MODE_SENSE and MODE_SENSE_10 command with cmd[2] == 0x1c
|
||||
* MAINTENANCE_IN is not supported by HyperV FC passthrough
|
||||
*
|
||||
* Setup srb and scsi status so this won't be fatal.
|
||||
* We do this so we can distinguish truly fatal failues
|
||||
* (srb status == 0x4) and off-line the device in that case.
|
||||
*/
|
||||
|
||||
if ((stor_pkt->vm_srb.cdb[0] == INQUIRY) ||
|
||||
(stor_pkt->vm_srb.cdb[0] == MODE_SENSE) ||
|
||||
(stor_pkt->vm_srb.cdb[0] == MODE_SENSE_10) ||
|
||||
(stor_pkt->vm_srb.cdb[0] == MAINTENANCE_IN &&
|
||||
hv_dev_is_fc(device))) {
|
||||
if (storvsc_host_mishandles_cmd(stor_pkt->vm_srb.cdb[0], device)) {
|
||||
vstor_packet->vm_srb.scsi_status = 0;
|
||||
vstor_packet->vm_srb.srb_status = SRB_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
@@ -233,7 +233,6 @@ static void virtscsi_ctrl_done(struct virtqueue *vq)
|
||||
virtscsi_vq_done(vscsi, &vscsi->ctrl_vq, virtscsi_complete_free);
|
||||
};
|
||||
|
||||
static void virtscsi_handle_event(struct work_struct *work);
|
||||
|
||||
static int virtscsi_kick_event(struct virtio_scsi *vscsi,
|
||||
struct virtio_scsi_event_node *event_node)
|
||||
@@ -242,7 +241,6 @@ static int virtscsi_kick_event(struct virtio_scsi *vscsi,
|
||||
struct scatterlist sg;
|
||||
unsigned long flags;
|
||||
|
||||
INIT_WORK(&event_node->work, virtscsi_handle_event);
|
||||
sg_init_one(&sg, event_node->event, sizeof(struct virtio_scsi_event));
|
||||
|
||||
spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags);
|
||||
@@ -984,8 +982,10 @@ static int virtscsi_probe(struct virtio_device *vdev)
|
||||
|
||||
virtio_device_ready(vdev);
|
||||
|
||||
if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
|
||||
virtscsi_kick_event_all(vscsi);
|
||||
for (int i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++)
|
||||
INIT_WORK(&vscsi->event_list[i].work, virtscsi_handle_event);
|
||||
|
||||
virtscsi_kick_event_all(vscsi);
|
||||
|
||||
scsi_scan_host(shost);
|
||||
return 0;
|
||||
@@ -1002,8 +1002,7 @@ static void virtscsi_remove(struct virtio_device *vdev)
|
||||
struct Scsi_Host *shost = virtio_scsi_host(vdev);
|
||||
struct virtio_scsi *vscsi = shost_priv(shost);
|
||||
|
||||
if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
|
||||
virtscsi_cancel_event_work(vscsi);
|
||||
virtscsi_cancel_event_work(vscsi);
|
||||
|
||||
scsi_remove_host(shost);
|
||||
virtscsi_remove_vqs(vdev);
|
||||
@@ -1029,8 +1028,7 @@ static int virtscsi_restore(struct virtio_device *vdev)
|
||||
|
||||
virtio_device_ready(vdev);
|
||||
|
||||
if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
|
||||
virtscsi_kick_event_all(vscsi);
|
||||
virtscsi_kick_event_all(vscsi);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -1591,6 +1591,7 @@ const struct target_core_fabric_ops iscsi_ops = {
|
||||
|
||||
.write_pending_must_be_called = 1,
|
||||
|
||||
.default_compl_type = TARGET_QUEUE_COMPL,
|
||||
.default_submit_type = TARGET_DIRECT_SUBMIT,
|
||||
.direct_submit_supp = 1,
|
||||
};
|
||||
|
||||
@@ -1147,6 +1147,7 @@ static const struct target_core_fabric_ops loop_ops = {
|
||||
.tfc_wwn_attrs = tcm_loop_wwn_attrs,
|
||||
.tfc_tpg_base_attrs = tcm_loop_tpg_attrs,
|
||||
.tfc_tpg_attrib_attrs = tcm_loop_tpg_attrib_attrs,
|
||||
.default_compl_type = TARGET_QUEUE_COMPL,
|
||||
.default_submit_type = TARGET_QUEUE_SUBMIT,
|
||||
.direct_submit_supp = 0,
|
||||
};
|
||||
|
||||
@@ -2278,6 +2278,7 @@ static const struct target_core_fabric_ops sbp_ops = {
|
||||
.tfc_tpg_base_attrs = sbp_tpg_base_attrs,
|
||||
.tfc_tpg_attrib_attrs = sbp_tpg_attrib_attrs,
|
||||
|
||||
.default_compl_type = TARGET_QUEUE_COMPL,
|
||||
.default_submit_type = TARGET_DIRECT_SUBMIT,
|
||||
.direct_submit_supp = 1,
|
||||
};
|
||||
|
||||
@@ -575,6 +575,7 @@ DEF_CONFIGFS_ATTRIB_SHOW(unmap_zeroes_data);
|
||||
DEF_CONFIGFS_ATTRIB_SHOW(max_write_same_len);
|
||||
DEF_CONFIGFS_ATTRIB_SHOW(emulate_rsoc);
|
||||
DEF_CONFIGFS_ATTRIB_SHOW(submit_type);
|
||||
DEF_CONFIGFS_ATTRIB_SHOW(complete_type);
|
||||
DEF_CONFIGFS_ATTRIB_SHOW(atomic_max_len);
|
||||
DEF_CONFIGFS_ATTRIB_SHOW(atomic_alignment);
|
||||
DEF_CONFIGFS_ATTRIB_SHOW(atomic_granularity);
|
||||
@@ -1266,6 +1267,24 @@ static ssize_t submit_type_store(struct config_item *item, const char *page,
|
||||
return count;
|
||||
}
|
||||
|
||||
static ssize_t complete_type_store(struct config_item *item, const char *page,
|
||||
size_t count)
|
||||
{
|
||||
struct se_dev_attrib *da = to_attrib(item);
|
||||
int ret;
|
||||
u8 val;
|
||||
|
||||
ret = kstrtou8(page, 0, &val);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (val > TARGET_QUEUE_COMPL)
|
||||
return -EINVAL;
|
||||
|
||||
da->complete_type = val;
|
||||
return count;
|
||||
}
|
||||
|
||||
CONFIGFS_ATTR(, emulate_model_alias);
|
||||
CONFIGFS_ATTR(, emulate_dpo);
|
||||
CONFIGFS_ATTR(, emulate_fua_write);
|
||||
@@ -1302,6 +1321,7 @@ CONFIGFS_ATTR(, max_write_same_len);
|
||||
CONFIGFS_ATTR(, alua_support);
|
||||
CONFIGFS_ATTR(, pgr_support);
|
||||
CONFIGFS_ATTR(, submit_type);
|
||||
CONFIGFS_ATTR(, complete_type);
|
||||
CONFIGFS_ATTR_RO(, atomic_max_len);
|
||||
CONFIGFS_ATTR_RO(, atomic_alignment);
|
||||
CONFIGFS_ATTR_RO(, atomic_granularity);
|
||||
@@ -1350,6 +1370,7 @@ struct configfs_attribute *sbc_attrib_attrs[] = {
|
||||
&attr_pgr_support,
|
||||
&attr_emulate_rsoc,
|
||||
&attr_submit_type,
|
||||
&attr_complete_type,
|
||||
&attr_atomic_alignment,
|
||||
&attr_atomic_max_len,
|
||||
&attr_atomic_granularity,
|
||||
@@ -1373,6 +1394,7 @@ struct configfs_attribute *passthrough_attrib_attrs[] = {
|
||||
&attr_alua_support,
|
||||
&attr_pgr_support,
|
||||
&attr_submit_type,
|
||||
&attr_complete_type,
|
||||
NULL,
|
||||
};
|
||||
EXPORT_SYMBOL(passthrough_attrib_attrs);
|
||||
|
||||
@@ -813,6 +813,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
|
||||
DA_UNMAP_ZEROES_DATA_DEFAULT;
|
||||
dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
|
||||
dev->dev_attrib.submit_type = TARGET_FABRIC_DEFAULT_SUBMIT;
|
||||
dev->dev_attrib.complete_type = TARGET_FABRIC_DEFAULT_COMPL;
|
||||
|
||||
/* Skip allocating lun_stats since we can't export them. */
|
||||
xcopy_lun = &dev->xcopy_lun;
|
||||
|
||||
@@ -1065,6 +1065,28 @@ target_fabric_wwn_cmd_completion_affinity_store(struct config_item *item,
|
||||
}
|
||||
CONFIGFS_ATTR(target_fabric_wwn_, cmd_completion_affinity);
|
||||
|
||||
static ssize_t
|
||||
target_fabric_wwn_default_complete_type_show(struct config_item *item,
|
||||
char *page)
|
||||
{
|
||||
struct se_wwn *wwn = container_of(to_config_group(item), struct se_wwn,
|
||||
param_group);
|
||||
return sysfs_emit(page, "%u\n",
|
||||
wwn->wwn_tf->tf_ops->default_compl_type);
|
||||
}
|
||||
CONFIGFS_ATTR_RO(target_fabric_wwn_, default_complete_type);
|
||||
|
||||
static ssize_t
|
||||
target_fabric_wwn_direct_complete_supported_show(struct config_item *item,
|
||||
char *page)
|
||||
{
|
||||
struct se_wwn *wwn = container_of(to_config_group(item), struct se_wwn,
|
||||
param_group);
|
||||
return sysfs_emit(page, "%u\n",
|
||||
wwn->wwn_tf->tf_ops->direct_compl_supp);
|
||||
}
|
||||
CONFIGFS_ATTR_RO(target_fabric_wwn_, direct_complete_supported);
|
||||
|
||||
static ssize_t
|
||||
target_fabric_wwn_default_submit_type_show(struct config_item *item,
|
||||
char *page)
|
||||
@@ -1089,6 +1111,8 @@ CONFIGFS_ATTR_RO(target_fabric_wwn_, direct_submit_supported);
|
||||
|
||||
static struct configfs_attribute *target_fabric_wwn_param_attrs[] = {
|
||||
&target_fabric_wwn_attr_cmd_completion_affinity,
|
||||
&target_fabric_wwn_attr_default_complete_type,
|
||||
&target_fabric_wwn_attr_direct_complete_supported,
|
||||
&target_fabric_wwn_attr_default_submit_type,
|
||||
&target_fabric_wwn_attr_direct_submit_supported,
|
||||
NULL,
|
||||
|
||||
@@ -2809,7 +2809,7 @@ static void core_scsi3_release_preempt_and_abort(
|
||||
}
|
||||
|
||||
static sense_reason_t
|
||||
core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
|
||||
core_scsi3_emulate_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
|
||||
u64 sa_res_key, enum preempt_type preempt_type)
|
||||
{
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
@@ -2838,11 +2838,6 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
|
||||
core_scsi3_put_pr_reg(pr_reg_n);
|
||||
return TCM_RESERVATION_CONFLICT;
|
||||
}
|
||||
if (scope != PR_SCOPE_LU_SCOPE) {
|
||||
pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
|
||||
core_scsi3_put_pr_reg(pr_reg_n);
|
||||
return TCM_INVALID_PARAMETER_LIST;
|
||||
}
|
||||
|
||||
spin_lock(&dev->dev_reservation_lock);
|
||||
pr_res_holder = dev->dev_pr_res_holder;
|
||||
@@ -2856,6 +2851,37 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
|
||||
core_scsi3_put_pr_reg(pr_reg_n);
|
||||
return TCM_INVALID_PARAMETER_LIST;
|
||||
}
|
||||
|
||||
/* Validate TYPE and SCOPE fields if they will be used */
|
||||
if (pr_res_holder &&
|
||||
(pr_res_holder->pr_res_key == sa_res_key ||
|
||||
(all_reg && !sa_res_key))) {
|
||||
switch (type) {
|
||||
case PR_TYPE_WRITE_EXCLUSIVE:
|
||||
case PR_TYPE_EXCLUSIVE_ACCESS:
|
||||
case PR_TYPE_WRITE_EXCLUSIVE_REGONLY:
|
||||
case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY:
|
||||
case PR_TYPE_WRITE_EXCLUSIVE_ALLREG:
|
||||
case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG:
|
||||
break;
|
||||
default:
|
||||
pr_err("SPC-3 PR: Unknown Service Action PREEMPT%s"
|
||||
" Type: 0x%02x\n",
|
||||
(preempt_type == PREEMPT_AND_ABORT) ?
|
||||
"_AND_ABORT" : "", type);
|
||||
spin_unlock(&dev->dev_reservation_lock);
|
||||
core_scsi3_put_pr_reg(pr_reg_n);
|
||||
return TCM_INVALID_CDB_FIELD;
|
||||
}
|
||||
|
||||
if (scope != PR_SCOPE_LU_SCOPE) {
|
||||
pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
|
||||
spin_unlock(&dev->dev_reservation_lock);
|
||||
core_scsi3_put_pr_reg(pr_reg_n);
|
||||
return TCM_INVALID_PARAMETER_LIST;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* From spc4r17, section 5.7.11.4.4 Removing Registrations:
|
||||
*
|
||||
@@ -3118,27 +3144,6 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static sense_reason_t
|
||||
core_scsi3_emulate_pro_preempt(struct se_cmd *cmd, int type, int scope,
|
||||
u64 res_key, u64 sa_res_key, enum preempt_type preempt_type)
|
||||
{
|
||||
switch (type) {
|
||||
case PR_TYPE_WRITE_EXCLUSIVE:
|
||||
case PR_TYPE_EXCLUSIVE_ACCESS:
|
||||
case PR_TYPE_WRITE_EXCLUSIVE_REGONLY:
|
||||
case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY:
|
||||
case PR_TYPE_WRITE_EXCLUSIVE_ALLREG:
|
||||
case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG:
|
||||
return core_scsi3_pro_preempt(cmd, type, scope, res_key,
|
||||
sa_res_key, preempt_type);
|
||||
default:
|
||||
pr_err("SPC-3 PR: Unknown Service Action PREEMPT%s"
|
||||
" Type: 0x%02x\n", (preempt_type == PREEMPT_AND_ABORT) ? "_AND_ABORT" : "", type);
|
||||
return TCM_INVALID_CDB_FIELD;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static sense_reason_t
|
||||
core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
|
||||
u64 sa_res_key, int aptpl, int unreg)
|
||||
|
||||
@@ -1187,7 +1187,8 @@ sbc_execute_unmap(struct se_cmd *cmd)
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (lba + range > dev->transport->get_blocks(dev) + 1) {
|
||||
if (lba + range < lba ||
|
||||
lba + range > dev->transport->get_blocks(dev) + 1) {
|
||||
ret = TCM_ADDRESS_OUT_OF_RANGE;
|
||||
goto err;
|
||||
}
|
||||
|
||||
@@ -902,13 +902,59 @@ static bool target_cmd_interrupted(struct se_cmd *cmd)
|
||||
return false;
|
||||
}
|
||||
|
||||
static void target_complete(struct se_cmd *cmd, int success)
|
||||
{
|
||||
struct se_wwn *wwn = cmd->se_sess->se_tpg->se_tpg_wwn;
|
||||
struct se_dev_attrib *da;
|
||||
u8 compl_type;
|
||||
int cpu;
|
||||
|
||||
if (!wwn) {
|
||||
cpu = cmd->cpuid;
|
||||
goto queue_work;
|
||||
}
|
||||
|
||||
da = &cmd->se_dev->dev_attrib;
|
||||
if (da->complete_type == TARGET_FABRIC_DEFAULT_COMPL)
|
||||
compl_type = wwn->wwn_tf->tf_ops->default_compl_type;
|
||||
else if (da->complete_type == TARGET_DIRECT_COMPL &&
|
||||
wwn->wwn_tf->tf_ops->direct_compl_supp)
|
||||
compl_type = TARGET_DIRECT_COMPL;
|
||||
else
|
||||
compl_type = TARGET_QUEUE_COMPL;
|
||||
|
||||
if (compl_type == TARGET_DIRECT_COMPL) {
|
||||
/*
|
||||
* Failure handling and processing secondary stages of
|
||||
* complex commands can be too heavy to handle from the
|
||||
* fabric driver so always defer.
|
||||
*/
|
||||
if (success && !cmd->transport_complete_callback) {
|
||||
target_complete_ok_work(&cmd->work);
|
||||
return;
|
||||
}
|
||||
|
||||
compl_type = TARGET_QUEUE_COMPL;
|
||||
}
|
||||
|
||||
queue_work:
|
||||
INIT_WORK(&cmd->work, success ? target_complete_ok_work :
|
||||
target_complete_failure_work);
|
||||
|
||||
if (!wwn || wwn->cmd_compl_affinity == SE_COMPL_AFFINITY_CPUID)
|
||||
cpu = cmd->cpuid;
|
||||
else
|
||||
cpu = wwn->cmd_compl_affinity;
|
||||
|
||||
queue_work_on(cpu, target_completion_wq, &cmd->work);
|
||||
}
|
||||
|
||||
/* May be called from interrupt context so must not sleep. */
|
||||
void target_complete_cmd_with_sense(struct se_cmd *cmd, u8 scsi_status,
|
||||
sense_reason_t sense_reason)
|
||||
{
|
||||
struct se_wwn *wwn = cmd->se_sess->se_tpg->se_tpg_wwn;
|
||||
int success, cpu;
|
||||
unsigned long flags;
|
||||
int success;
|
||||
|
||||
if (target_cmd_interrupted(cmd))
|
||||
return;
|
||||
@@ -933,15 +979,7 @@ void target_complete_cmd_with_sense(struct se_cmd *cmd, u8 scsi_status,
|
||||
cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE);
|
||||
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
|
||||
|
||||
INIT_WORK(&cmd->work, success ? target_complete_ok_work :
|
||||
target_complete_failure_work);
|
||||
|
||||
if (!wwn || wwn->cmd_compl_affinity == SE_COMPL_AFFINITY_CPUID)
|
||||
cpu = cmd->cpuid;
|
||||
else
|
||||
cpu = wwn->cmd_compl_affinity;
|
||||
|
||||
queue_work_on(cpu, target_completion_wq, &cmd->work);
|
||||
target_complete(cmd, success);
|
||||
}
|
||||
EXPORT_SYMBOL(target_complete_cmd_with_sense);
|
||||
|
||||
@@ -1112,7 +1150,7 @@ void transport_dump_vpd_proto_id(
|
||||
}
|
||||
|
||||
if (p_buf)
|
||||
strncpy(p_buf, buf, p_buf_len);
|
||||
strscpy(p_buf, buf, p_buf_len);
|
||||
else
|
||||
pr_debug("%s", buf);
|
||||
}
|
||||
@@ -1162,7 +1200,7 @@ int transport_dump_vpd_assoc(
|
||||
}
|
||||
|
||||
if (p_buf)
|
||||
strncpy(p_buf, buf, p_buf_len);
|
||||
strscpy(p_buf, buf, p_buf_len);
|
||||
else
|
||||
pr_debug("%s", buf);
|
||||
|
||||
@@ -1222,7 +1260,7 @@ int transport_dump_vpd_ident_type(
|
||||
if (p_buf) {
|
||||
if (p_buf_len < strlen(buf)+1)
|
||||
return -EINVAL;
|
||||
strncpy(p_buf, buf, p_buf_len);
|
||||
strscpy(p_buf, buf, p_buf_len);
|
||||
} else {
|
||||
pr_debug("%s", buf);
|
||||
}
|
||||
@@ -1276,7 +1314,7 @@ int transport_dump_vpd_ident(
|
||||
}
|
||||
|
||||
if (p_buf)
|
||||
strncpy(p_buf, buf, p_buf_len);
|
||||
strscpy(p_buf, buf, p_buf_len);
|
||||
else
|
||||
pr_debug("%s", buf);
|
||||
|
||||
|
||||
@@ -434,6 +434,7 @@ static const struct target_core_fabric_ops ft_fabric_ops = {
|
||||
.tfc_wwn_attrs = ft_wwn_attrs,
|
||||
.tfc_tpg_nacl_base_attrs = ft_nacl_base_attrs,
|
||||
|
||||
.default_compl_type = TARGET_QUEUE_COMPL,
|
||||
.default_submit_type = TARGET_DIRECT_SUBMIT,
|
||||
.direct_submit_supp = 1,
|
||||
};
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
obj-$(CONFIG_SCSI_UFSHCD) += ufshcd-core.o
|
||||
ufshcd-core-y += ufshcd.o ufs-sysfs.o ufs-mcq.o
|
||||
ufshcd-core-y += ufshcd.o ufs-sysfs.o ufs-mcq.o ufs-txeq.o
|
||||
ufshcd-core-$(CONFIG_RPMB) += ufs-rpmb.o
|
||||
ufshcd-core-$(CONFIG_DEBUG_FS) += ufs-debugfs.o
|
||||
ufshcd-core-$(CONFIG_SCSI_UFS_BSG) += ufs_bsg.o
|
||||
|
||||
@@ -209,6 +209,265 @@ static const struct ufs_debugfs_attr ufs_attrs[] = {
|
||||
{ }
|
||||
};
|
||||
|
||||
static int ufs_tx_eq_params_show(struct seq_file *s, void *data)
|
||||
{
|
||||
const char *file_name = s->file->f_path.dentry->d_name.name;
|
||||
u32 gear = (u32)(uintptr_t)s->file->f_inode->i_private;
|
||||
struct ufs_hba *hba = hba_from_file(s->file);
|
||||
struct ufshcd_tx_eq_settings *settings;
|
||||
struct ufs_pa_layer_attr *pwr_info;
|
||||
struct ufshcd_tx_eq_params *params;
|
||||
u32 rate = hba->pwr_info.hs_rate;
|
||||
u32 num_lanes;
|
||||
int lane;
|
||||
|
||||
if (!ufshcd_is_tx_eq_supported(hba))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (gear < UFS_HS_G1 || gear > UFS_HS_GEAR_MAX) {
|
||||
seq_printf(s, "Invalid gear selected: %u\n", gear);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!hba->max_pwr_info.is_valid) {
|
||||
seq_puts(s, "Max power info is invalid\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
pwr_info = &hba->max_pwr_info.info;
|
||||
params = &hba->tx_eq_params[gear - 1];
|
||||
if (!params->is_valid) {
|
||||
seq_printf(s, "TX EQ params are invalid for HS-G%u, Rate-%s\n",
|
||||
gear, ufs_hs_rate_to_str(rate));
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (strcmp(file_name, "host_tx_eq_params") == 0) {
|
||||
settings = params->host;
|
||||
num_lanes = pwr_info->lane_tx;
|
||||
seq_printf(s, "Host TX EQ PreShoot Cap: 0x%02x, DeEmphasis Cap: 0x%02x\n",
|
||||
hba->host_preshoot_cap, hba->host_deemphasis_cap);
|
||||
} else if (strcmp(file_name, "device_tx_eq_params") == 0) {
|
||||
settings = params->device;
|
||||
num_lanes = pwr_info->lane_rx;
|
||||
seq_printf(s, "Device TX EQ PreShoot Cap: 0x%02x, DeEmphasis Cap: 0x%02x\n",
|
||||
hba->device_preshoot_cap, hba->device_deemphasis_cap);
|
||||
} else {
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
seq_printf(s, "TX EQ setting for HS-G%u, Rate-%s:\n", gear,
|
||||
ufs_hs_rate_to_str(rate));
|
||||
for (lane = 0; lane < num_lanes; lane++)
|
||||
seq_printf(s, "TX Lane %d - PreShoot: %d, DeEmphasis: %d, Pre-Coding %senabled\n",
|
||||
lane, settings[lane].preshoot,
|
||||
settings[lane].deemphasis,
|
||||
settings[lane].precode_en ? "" : "not ");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ufs_tx_eq_params_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, ufs_tx_eq_params_show, inode->i_private);
|
||||
}
|
||||
|
||||
static const struct file_operations ufs_tx_eq_params_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = ufs_tx_eq_params_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
};
|
||||
|
||||
static const struct ufs_debugfs_attr ufs_tx_eq_attrs[] = {
|
||||
{ "host_tx_eq_params", 0400, &ufs_tx_eq_params_fops },
|
||||
{ "device_tx_eq_params", 0400, &ufs_tx_eq_params_fops },
|
||||
{ }
|
||||
};
|
||||
|
||||
static int ufs_tx_eqtr_record_show(struct seq_file *s, void *data)
|
||||
{
|
||||
const char *file_name = s->file->f_path.dentry->d_name.name;
|
||||
u8 (*fom_array)[TX_HS_NUM_PRESHOOT][TX_HS_NUM_DEEMPHASIS];
|
||||
u32 gear = (u32)(uintptr_t)s->file->f_inode->i_private;
|
||||
unsigned long preshoot_bitmap, deemphasis_bitmap;
|
||||
struct ufs_hba *hba = hba_from_file(s->file);
|
||||
struct ufs_pa_layer_attr *pwr_info;
|
||||
struct ufshcd_tx_eq_params *params;
|
||||
struct ufshcd_tx_eqtr_record *rec;
|
||||
u32 rate = hba->pwr_info.hs_rate;
|
||||
u8 preshoot, deemphasis;
|
||||
u32 num_lanes;
|
||||
char name[32];
|
||||
int lane;
|
||||
|
||||
if (!ufshcd_is_tx_eq_supported(hba))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (gear < UFS_HS_G1 || gear > UFS_HS_GEAR_MAX) {
|
||||
seq_printf(s, "Invalid gear selected: %u\n", gear);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!hba->max_pwr_info.is_valid) {
|
||||
seq_puts(s, "Max power info is invalid\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
pwr_info = &hba->max_pwr_info.info;
|
||||
params = &hba->tx_eq_params[gear - 1];
|
||||
if (!params->is_valid) {
|
||||
seq_printf(s, "TX EQ params are invalid for HS-G%u, Rate-%s\n",
|
||||
gear, ufs_hs_rate_to_str(rate));
|
||||
return 0;
|
||||
}
|
||||
|
||||
rec = params->eqtr_record;
|
||||
if (!rec || !rec->last_record_index) {
|
||||
seq_printf(s, "No TX EQTR records found for HS-G%u, Rate-%s.\n",
|
||||
gear, ufs_hs_rate_to_str(rate));
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (strcmp(file_name, "host_tx_eqtr_record") == 0) {
|
||||
preshoot_bitmap = (hba->host_preshoot_cap << 0x1) | 0x1;
|
||||
deemphasis_bitmap = (hba->host_deemphasis_cap << 0x1) | 0x1;
|
||||
num_lanes = pwr_info->lane_tx;
|
||||
fom_array = rec->host_fom;
|
||||
snprintf(name, sizeof(name), "%s", "Host");
|
||||
} else if (strcmp(file_name, "device_tx_eqtr_record") == 0) {
|
||||
preshoot_bitmap = (hba->device_preshoot_cap << 0x1) | 0x1;
|
||||
deemphasis_bitmap = (hba->device_deemphasis_cap << 0x1) | 0x1;
|
||||
num_lanes = pwr_info->lane_rx;
|
||||
fom_array = rec->device_fom;
|
||||
snprintf(name, sizeof(name), "%s", "Device");
|
||||
} else {
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
seq_printf(s, "%s TX EQTR record summary -\n", name);
|
||||
seq_printf(s, "Target Power Mode: HS-G%u, Rate-%s\n", gear,
|
||||
ufs_hs_rate_to_str(rate));
|
||||
seq_printf(s, "Most recent record index: %d\n",
|
||||
rec->last_record_index);
|
||||
seq_printf(s, "Most recent record timestamp: %llu us\n",
|
||||
ktime_to_us(rec->last_record_ts));
|
||||
|
||||
for (lane = 0; lane < num_lanes; lane++) {
|
||||
seq_printf(s, "\nTX Lane %d FOM - %s\n", lane, "PreShoot\\DeEmphasis");
|
||||
seq_puts(s, "\\");
|
||||
/* Print DeEmphasis header as X-axis. */
|
||||
for (deemphasis = 0; deemphasis < TX_HS_NUM_DEEMPHASIS; deemphasis++)
|
||||
seq_printf(s, "%8d%s", deemphasis, " ");
|
||||
seq_puts(s, "\n");
|
||||
/* Print matrix rows with PreShoot as Y-axis. */
|
||||
for (preshoot = 0; preshoot < TX_HS_NUM_PRESHOOT; preshoot++) {
|
||||
seq_printf(s, "%d", preshoot);
|
||||
for (deemphasis = 0; deemphasis < TX_HS_NUM_DEEMPHASIS; deemphasis++) {
|
||||
if (test_bit(preshoot, &preshoot_bitmap) &&
|
||||
test_bit(deemphasis, &deemphasis_bitmap)) {
|
||||
u8 fom = fom_array[lane][preshoot][deemphasis];
|
||||
u8 fom_val = fom & RX_FOM_VALUE_MASK;
|
||||
bool precode_en = fom & RX_FOM_PRECODING_EN_BIT;
|
||||
|
||||
if (ufshcd_is_txeq_presets_used(hba) &&
|
||||
!ufshcd_is_txeq_preset_selected(preshoot, deemphasis))
|
||||
seq_printf(s, "%8s%s", "-", " ");
|
||||
else
|
||||
seq_printf(s, "%8u%s", fom_val,
|
||||
precode_en ? "*" : " ");
|
||||
} else {
|
||||
seq_printf(s, "%8s%s", "x", " ");
|
||||
}
|
||||
}
|
||||
seq_puts(s, "\n");
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ufs_tx_eqtr_record_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, ufs_tx_eqtr_record_show, inode->i_private);
|
||||
}
|
||||
|
||||
static const struct file_operations ufs_tx_eqtr_record_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = ufs_tx_eqtr_record_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
};
|
||||
|
||||
static ssize_t ufs_tx_eq_ctrl_write(struct file *file, const char __user *buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
u32 gear = (u32)(uintptr_t)file->f_inode->i_private;
|
||||
struct ufs_hba *hba = hba_from_file(file);
|
||||
char kbuf[32];
|
||||
int ret;
|
||||
|
||||
if (count >= sizeof(kbuf))
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(kbuf, buf, count))
|
||||
return -EFAULT;
|
||||
|
||||
if (!ufshcd_is_tx_eq_supported(hba))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL ||
|
||||
!hba->max_pwr_info.is_valid)
|
||||
return -EBUSY;
|
||||
|
||||
if (!hba->ufs_device_wlun)
|
||||
return -ENODEV;
|
||||
|
||||
kbuf[count] = '\0';
|
||||
|
||||
if (sysfs_streq(kbuf, "retrain")) {
|
||||
ret = ufs_debugfs_get_user_access(hba);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = ufshcd_retrain_tx_eq(hba, gear);
|
||||
ufs_debugfs_put_user_access(hba);
|
||||
} else {
|
||||
/* Unknown operation */
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return ret ? ret : count;
|
||||
}
|
||||
|
||||
static int ufs_tx_eq_ctrl_show(struct seq_file *s, void *data)
|
||||
{
|
||||
seq_puts(s, "write 'retrain' to retrain TX Equalization settings\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ufs_tx_eq_ctrl_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, ufs_tx_eq_ctrl_show, inode->i_private);
|
||||
}
|
||||
|
||||
static const struct file_operations ufs_tx_eq_ctrl_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = ufs_tx_eq_ctrl_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.write = ufs_tx_eq_ctrl_write,
|
||||
.release = single_release,
|
||||
};
|
||||
|
||||
static const struct ufs_debugfs_attr ufs_tx_eqtr_attrs[] = {
|
||||
{ "host_tx_eqtr_record", 0400, &ufs_tx_eqtr_record_fops },
|
||||
{ "device_tx_eqtr_record", 0400, &ufs_tx_eqtr_record_fops },
|
||||
{ "tx_eq_ctrl", 0600, &ufs_tx_eq_ctrl_fops },
|
||||
{ }
|
||||
};
|
||||
|
||||
void ufs_debugfs_hba_init(struct ufs_hba *hba)
|
||||
{
|
||||
const struct ufs_debugfs_attr *attr;
|
||||
@@ -230,6 +489,37 @@ void ufs_debugfs_hba_init(struct ufs_hba *hba)
|
||||
hba, &ee_usr_mask_fops);
|
||||
debugfs_create_u32("exception_event_rate_limit_ms", 0600, hba->debugfs_root,
|
||||
&hba->debugfs_ee_rate_limit_ms);
|
||||
|
||||
if (!(hba->caps & UFSHCD_CAP_TX_EQUALIZATION))
|
||||
return;
|
||||
|
||||
for (u32 gear = UFS_HS_G1; gear <= UFS_HS_GEAR_MAX; gear++) {
|
||||
struct dentry *txeq_dir;
|
||||
char name[32];
|
||||
|
||||
snprintf(name, sizeof(name), "tx_eq_hs_gear%d", gear);
|
||||
txeq_dir = debugfs_create_dir(name, hba->debugfs_root);
|
||||
if (IS_ERR_OR_NULL(txeq_dir))
|
||||
return;
|
||||
|
||||
d_inode(txeq_dir)->i_private = hba;
|
||||
|
||||
/* Create files for TX Equalization parameters */
|
||||
for (attr = ufs_tx_eq_attrs; attr->name; attr++)
|
||||
debugfs_create_file(attr->name, attr->mode, txeq_dir,
|
||||
(void *)(uintptr_t)gear,
|
||||
attr->fops);
|
||||
|
||||
/* TX EQTR is supported for HS-G4 and higher Gears */
|
||||
if (gear < UFS_HS_G4)
|
||||
continue;
|
||||
|
||||
/* Create files for TX EQTR related attributes */
|
||||
for (attr = ufs_tx_eqtr_attrs; attr->name; attr++)
|
||||
debugfs_create_file(attr->name, attr->mode, txeq_dir,
|
||||
(void *)(uintptr_t)gear,
|
||||
attr->fops);
|
||||
}
|
||||
}
|
||||
|
||||
void ufs_debugfs_hba_exit(struct ufs_hba *hba)
|
||||
|
||||
@@ -5,6 +5,9 @@
|
||||
#ifndef __UFS_DEBUGFS_H__
|
||||
#define __UFS_DEBUGFS_H__
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
struct ufs_hba;
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
|
||||
@@ -6,6 +6,8 @@
|
||||
#include <linux/kconfig.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
struct ufs_hba;
|
||||
|
||||
#ifdef CONFIG_SCSI_UFS_FAULT_INJECTION
|
||||
void ufs_fault_inject_hba_init(struct ufs_hba *hba);
|
||||
bool ufs_trigger_eh(struct ufs_hba *hba);
|
||||
|
||||
@@ -31,7 +31,8 @@
|
||||
|
||||
#define UFSHCD_ENABLE_MCQ_INTRS (UTP_TASK_REQ_COMPL |\
|
||||
UFSHCD_ERROR_MASK |\
|
||||
MCQ_CQ_EVENT_STATUS)
|
||||
MCQ_CQ_EVENT_STATUS |\
|
||||
MCQ_IAG_EVENT_STATUS)
|
||||
|
||||
/* Max mcq register polling time in microseconds */
|
||||
#define MCQ_POLL_US 500000
|
||||
@@ -272,14 +273,29 @@ void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ufshcd_mcq_write_cqis);
|
||||
|
||||
u32 ufshcd_mcq_read_mcqiacr(struct ufs_hba *hba, int i)
|
||||
{
|
||||
return readl(mcq_opr_base(hba, OPR_CQIS, i) + REG_MCQIACR);
|
||||
}
|
||||
|
||||
void ufshcd_mcq_write_mcqiacr(struct ufs_hba *hba, u32 val, int i)
|
||||
{
|
||||
writel(val, mcq_opr_base(hba, OPR_CQIS, i) + REG_MCQIACR);
|
||||
}
|
||||
|
||||
/*
|
||||
* Current MCQ specification doesn't provide a Task Tag or its equivalent in
|
||||
* UFSHCI 4.0 MCQ specification doesn't provide a Task Tag or its equivalent in
|
||||
* the Completion Queue Entry. Find the Task Tag using an indirect method.
|
||||
* UFSHCI 4.1 and above can directly return the Task Tag in the Completion Queue
|
||||
* Entry.
|
||||
*/
|
||||
static int ufshcd_mcq_get_tag(struct ufs_hba *hba, struct cq_entry *cqe)
|
||||
{
|
||||
u64 addr;
|
||||
|
||||
if (hba->ufs_version >= ufshci_version(4, 1))
|
||||
return cqe->task_tag;
|
||||
|
||||
/* sizeof(struct utp_transfer_cmd_desc) must be a multiple of 128 */
|
||||
BUILD_BUG_ON(sizeof(struct utp_transfer_cmd_desc) & GENMASK(6, 0));
|
||||
|
||||
@@ -301,9 +317,19 @@ static void ufshcd_mcq_process_cqe(struct ufs_hba *hba,
|
||||
ufshcd_compl_one_cqe(hba, tag, cqe);
|
||||
/* After processed the cqe, mark it empty (invalid) entry */
|
||||
cqe->command_desc_base_addr = 0;
|
||||
} else {
|
||||
dev_err(hba->dev, "Abnormal CQ entry!\n");
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* This function is called from the UFS error handler with the UFS host
|
||||
* controller disabled (HCE = 0). Reading host controller registers, e.g. the
|
||||
* CQ tail pointer (CQTPy), may not be safe with the host controller disabled.
|
||||
* Hence, iterate over all completion queue entries. This won't result in
|
||||
* double completions because ufshcd_mcq_process_cqe() clears a CQE after it
|
||||
* has been processed.
|
||||
*/
|
||||
void ufshcd_mcq_compl_all_cqes_lock(struct ufs_hba *hba,
|
||||
struct ufs_hw_queue *hwq)
|
||||
{
|
||||
|
||||
@@ -605,6 +605,34 @@ static ssize_t device_lvl_exception_id_show(struct device *dev,
|
||||
return sysfs_emit(buf, "%llu\n", exception_id);
|
||||
}
|
||||
|
||||
static ssize_t dme_qos_notification_show(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct ufs_hba *hba = dev_get_drvdata(dev);
|
||||
|
||||
return sysfs_emit(buf, "0x%x\n", atomic_read(&hba->dme_qos_notification));
|
||||
}
|
||||
|
||||
static ssize_t dme_qos_notification_store(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct ufs_hba *hba = dev_get_drvdata(dev);
|
||||
unsigned int value;
|
||||
|
||||
if (kstrtouint(buf, 0, &value))
|
||||
return -EINVAL;
|
||||
|
||||
/* the only supported usecase is to reset the dme_qos_notification */
|
||||
if (value)
|
||||
return -EINVAL;
|
||||
|
||||
atomic_set(&hba->dme_qos_notification, 0);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static DEVICE_ATTR_RW(rpm_lvl);
|
||||
static DEVICE_ATTR_RO(rpm_target_dev_state);
|
||||
static DEVICE_ATTR_RO(rpm_target_link_state);
|
||||
@@ -621,6 +649,7 @@ static DEVICE_ATTR_RW(pm_qos_enable);
|
||||
static DEVICE_ATTR_RO(critical_health);
|
||||
static DEVICE_ATTR_RW(device_lvl_exception_count);
|
||||
static DEVICE_ATTR_RO(device_lvl_exception_id);
|
||||
static DEVICE_ATTR_RW(dme_qos_notification);
|
||||
|
||||
static struct attribute *ufs_sysfs_ufshcd_attrs[] = {
|
||||
&dev_attr_rpm_lvl.attr,
|
||||
@@ -639,6 +668,7 @@ static struct attribute *ufs_sysfs_ufshcd_attrs[] = {
|
||||
&dev_attr_critical_health.attr,
|
||||
&dev_attr_device_lvl_exception_count.attr,
|
||||
&dev_attr_device_lvl_exception_id.attr,
|
||||
&dev_attr_dme_qos_notification.attr,
|
||||
NULL
|
||||
};
|
||||
|
||||
|
||||
1293
drivers/ufs/core/ufs-txeq.c
Normal file
1293
drivers/ufs/core/ufs-txeq.c
Normal file
File diff suppressed because it is too large
Load Diff
@@ -8,7 +8,6 @@
|
||||
|
||||
#include <scsi/scsi_cmnd.h>
|
||||
#include <ufs/ufshcd.h>
|
||||
#include "ufshcd-priv.h"
|
||||
#include <ufs/ufshci.h>
|
||||
|
||||
#ifdef CONFIG_SCSI_UFS_CRYPTO
|
||||
|
||||
@@ -76,8 +76,13 @@ void ufshcd_mcq_compl_all_cqes_lock(struct ufs_hba *hba,
|
||||
bool ufshcd_cmd_inflight(struct scsi_cmnd *cmd);
|
||||
int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag);
|
||||
int ufshcd_mcq_abort(struct scsi_cmnd *cmd);
|
||||
u32 ufshcd_mcq_read_mcqiacr(struct ufs_hba *hba, int i);
|
||||
void ufshcd_mcq_write_mcqiacr(struct ufs_hba *hba, u32 val, int i);
|
||||
int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag);
|
||||
void ufshcd_release_scsi_cmd(struct ufs_hba *hba, struct scsi_cmnd *cmd);
|
||||
int ufshcd_pause_command_processing(struct ufs_hba *hba, u64 timeout_us);
|
||||
void ufshcd_resume_command_processing(struct ufs_hba *hba);
|
||||
int ufshcd_scale_clks(struct ufs_hba *hba, unsigned long freq, bool scale_up);
|
||||
|
||||
/**
|
||||
* enum ufs_descr_fmt - UFS string descriptor format
|
||||
@@ -103,6 +108,16 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
|
||||
int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable);
|
||||
int ufshcd_read_device_lvl_exception_id(struct ufs_hba *hba, u64 *exception_id);
|
||||
|
||||
int ufshcd_uic_tx_eqtr(struct ufs_hba *hba, int gear);
|
||||
void ufshcd_apply_valid_tx_eq_settings(struct ufs_hba *hba);
|
||||
int ufshcd_config_tx_eq_settings(struct ufs_hba *hba,
|
||||
struct ufs_pa_layer_attr *pwr_mode,
|
||||
bool force_tx_eqtr);
|
||||
void ufshcd_print_tx_eq_params(struct ufs_hba *hba);
|
||||
bool ufshcd_is_txeq_presets_used(struct ufs_hba *hba);
|
||||
bool ufshcd_is_txeq_preset_selected(u8 preshoot, u8 deemphasis);
|
||||
int ufshcd_retrain_tx_eq(struct ufs_hba *hba, u32 gear);
|
||||
|
||||
/* Wrapper functions for safely calling variant operations */
|
||||
static inline const char *ufshcd_get_var_name(struct ufs_hba *hba)
|
||||
{
|
||||
@@ -167,14 +182,24 @@ static inline int ufshcd_vops_link_startup_notify(struct ufs_hba *hba,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int ufshcd_vops_negotiate_pwr_mode(struct ufs_hba *hba,
|
||||
const struct ufs_pa_layer_attr *dev_max_params,
|
||||
struct ufs_pa_layer_attr *dev_req_params)
|
||||
{
|
||||
if (hba->vops && hba->vops->negotiate_pwr_mode)
|
||||
return hba->vops->negotiate_pwr_mode(hba, dev_max_params,
|
||||
dev_req_params);
|
||||
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
static inline int ufshcd_vops_pwr_change_notify(struct ufs_hba *hba,
|
||||
enum ufs_notify_change_status status,
|
||||
const struct ufs_pa_layer_attr *dev_max_params,
|
||||
struct ufs_pa_layer_attr *dev_req_params)
|
||||
{
|
||||
if (hba->vops && hba->vops->pwr_change_notify)
|
||||
return hba->vops->pwr_change_notify(hba, status,
|
||||
dev_max_params, dev_req_params);
|
||||
dev_req_params);
|
||||
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
@@ -287,6 +312,38 @@ static inline u32 ufshcd_vops_freq_to_gear_speed(struct ufs_hba *hba, unsigned l
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int ufshcd_vops_get_rx_fom(struct ufs_hba *hba,
|
||||
struct ufs_pa_layer_attr *pwr_mode,
|
||||
struct tx_eqtr_iter *h_iter,
|
||||
struct tx_eqtr_iter *d_iter)
|
||||
{
|
||||
if (hba->vops && hba->vops->get_rx_fom)
|
||||
return hba->vops->get_rx_fom(hba, pwr_mode, h_iter, d_iter);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int ufshcd_vops_apply_tx_eqtr_settings(struct ufs_hba *hba,
|
||||
struct ufs_pa_layer_attr *pwr_mode,
|
||||
struct tx_eqtr_iter *h_iter,
|
||||
struct tx_eqtr_iter *d_iter)
|
||||
{
|
||||
if (hba->vops && hba->vops->apply_tx_eqtr_settings)
|
||||
return hba->vops->apply_tx_eqtr_settings(hba, pwr_mode, h_iter, d_iter);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int ufshcd_vops_tx_eqtr_notify(struct ufs_hba *hba,
|
||||
enum ufs_notify_change_status status,
|
||||
struct ufs_pa_layer_attr *pwr_mode)
|
||||
{
|
||||
if (hba->vops && hba->vops->tx_eqtr_notify)
|
||||
return hba->vops->tx_eqtr_notify(hba, status, pwr_mode);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
extern const struct ufs_pm_lvl_states ufs_pm_lvl_states[];
|
||||
|
||||
/**
|
||||
|
||||
@@ -315,6 +315,9 @@ static const struct ufs_dev_quirk ufs_fixups[] = {
|
||||
{ .wmanufacturerid = UFS_VENDOR_TOSHIBA,
|
||||
.model = "THGLF2G9D8KBADG",
|
||||
.quirk = UFS_DEVICE_QUIRK_PA_TACTIVATE },
|
||||
{ .wmanufacturerid = UFS_VENDOR_TOSHIBA,
|
||||
.model = "THGJFJT0E25BAIP",
|
||||
.quirk = UFS_DEVICE_QUIRK_NO_TIMESTAMP_SUPPORT },
|
||||
{ .wmanufacturerid = UFS_VENDOR_TOSHIBA,
|
||||
.model = "THGJFJT1E45BATP",
|
||||
.quirk = UFS_DEVICE_QUIRK_NO_TIMESTAMP_SUPPORT },
|
||||
@@ -334,11 +337,7 @@ static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
|
||||
static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
|
||||
static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
|
||||
static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
|
||||
static int ufshcd_scale_clks(struct ufs_hba *hba, unsigned long freq,
|
||||
bool scale_up);
|
||||
static irqreturn_t ufshcd_intr(int irq, void *__hba);
|
||||
static int ufshcd_change_power_mode(struct ufs_hba *hba,
|
||||
struct ufs_pa_layer_attr *pwr_mode);
|
||||
static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on);
|
||||
static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on);
|
||||
static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
|
||||
@@ -1212,8 +1211,7 @@ static int ufshcd_opp_set_rate(struct ufs_hba *hba, unsigned long freq)
|
||||
*
|
||||
* Return: 0 if successful; < 0 upon failure.
|
||||
*/
|
||||
static int ufshcd_scale_clks(struct ufs_hba *hba, unsigned long freq,
|
||||
bool scale_up)
|
||||
int ufshcd_scale_clks(struct ufs_hba *hba, unsigned long freq, bool scale_up)
|
||||
{
|
||||
int ret = 0;
|
||||
ktime_t start = ktime_get();
|
||||
@@ -1366,6 +1364,48 @@ static int ufshcd_wait_for_pending_cmds(struct ufs_hba *hba,
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* ufshcd_pause_command_processing - Pause command processing
|
||||
* @hba: per-adapter instance
|
||||
* @timeout_us: timeout in microseconds to wait for pending commands to finish
|
||||
*
|
||||
* This function stops new command submissions and waits for existing commands
|
||||
* to complete.
|
||||
*
|
||||
* Return: 0 on success, %-EBUSY if commands did not finish within @timeout_us.
|
||||
* On failure, all acquired locks are released and the tagset is unquiesced.
|
||||
*/
|
||||
int ufshcd_pause_command_processing(struct ufs_hba *hba, u64 timeout_us)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&hba->host->scan_mutex);
|
||||
blk_mq_quiesce_tagset(&hba->host->tag_set);
|
||||
down_write(&hba->clk_scaling_lock);
|
||||
|
||||
if (ufshcd_wait_for_pending_cmds(hba, timeout_us)) {
|
||||
ret = -EBUSY;
|
||||
up_write(&hba->clk_scaling_lock);
|
||||
blk_mq_unquiesce_tagset(&hba->host->tag_set);
|
||||
mutex_unlock(&hba->host->scan_mutex);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* ufshcd_resume_command_processing - Resume command processing
|
||||
* @hba: per-adapter instance
|
||||
*
|
||||
* This function resumes command submissions.
|
||||
*/
|
||||
void ufshcd_resume_command_processing(struct ufs_hba *hba)
|
||||
{
|
||||
up_write(&hba->clk_scaling_lock);
|
||||
blk_mq_unquiesce_tagset(&hba->host->tag_set);
|
||||
mutex_unlock(&hba->host->scan_mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
* ufshcd_scale_gear - scale up/down UFS gear
|
||||
* @hba: per adapter instance
|
||||
@@ -1411,7 +1451,8 @@ static int ufshcd_scale_gear(struct ufs_hba *hba, u32 target_gear, bool scale_up
|
||||
|
||||
config_pwr_mode:
|
||||
/* check if the power mode needs to be changed or not? */
|
||||
ret = ufshcd_config_pwr_mode(hba, &new_pwr_info);
|
||||
ret = ufshcd_config_pwr_mode(hba, &new_pwr_info,
|
||||
UFSHCD_PMC_POLICY_DONT_FORCE);
|
||||
if (ret)
|
||||
dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)",
|
||||
__func__, ret,
|
||||
@@ -4252,7 +4293,8 @@ int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
|
||||
pwr_mode_change = true;
|
||||
}
|
||||
if (pwr_mode_change) {
|
||||
ret = ufshcd_change_power_mode(hba, &temp_pwr_info);
|
||||
ret = ufshcd_change_power_mode(hba, &temp_pwr_info,
|
||||
UFSHCD_PMC_POLICY_DONT_FORCE);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
@@ -4276,7 +4318,8 @@ int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
|
||||
|
||||
if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
|
||||
&& pwr_mode_change)
|
||||
ufshcd_change_power_mode(hba, &orig_pwr_info);
|
||||
ufshcd_change_power_mode(hba, &orig_pwr_info,
|
||||
UFSHCD_PMC_POLICY_DONT_FORCE);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
@@ -4343,16 +4386,18 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
|
||||
ret = __ufshcd_send_uic_cmd(hba, cmd);
|
||||
if (ret) {
|
||||
dev_err(hba->dev,
|
||||
"pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
|
||||
cmd->command, cmd->argument3, ret);
|
||||
"pwr ctrl cmd 0x%x with (MIBattribute 0x%x, mode 0x%x) uic error %d\n",
|
||||
cmd->command, UIC_GET_ATTR_ID(cmd->argument1),
|
||||
cmd->argument3, ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!wait_for_completion_timeout(hba->uic_async_done,
|
||||
msecs_to_jiffies(uic_cmd_timeout))) {
|
||||
dev_err(hba->dev,
|
||||
"pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
|
||||
cmd->command, cmd->argument3);
|
||||
"pwr ctrl cmd 0x%x with (MIBattribute 0x%x, mode 0x%x) completion timeout\n",
|
||||
cmd->command, UIC_GET_ATTR_ID(cmd->argument1),
|
||||
cmd->argument3);
|
||||
|
||||
if (!cmd->cmd_active) {
|
||||
dev_err(hba->dev, "%s: Power Mode Change operation has been completed, go check UPMCRS\n",
|
||||
@@ -4368,14 +4413,16 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
|
||||
status = ufshcd_get_upmcrs(hba);
|
||||
if (status != PWR_LOCAL) {
|
||||
dev_err(hba->dev,
|
||||
"pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n",
|
||||
cmd->command, status);
|
||||
"pwr ctrl cmd 0x%x with (MIBattribute 0x%x, mode 0x%x) failed, host upmcrs:0x%x\n",
|
||||
cmd->command, UIC_GET_ATTR_ID(cmd->argument1),
|
||||
cmd->argument3, status);
|
||||
ret = (status != PWR_OK) ? status : -1;
|
||||
}
|
||||
out:
|
||||
if (ret) {
|
||||
ufshcd_print_host_state(hba);
|
||||
ufshcd_print_pwr_info(hba);
|
||||
ufshcd_print_tx_eq_params(hba);
|
||||
ufshcd_print_evt_hist(hba);
|
||||
}
|
||||
|
||||
@@ -4393,6 +4440,29 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* ufshcd_uic_tx_eqtr - Perform UIC TX Equalization Training
|
||||
* @hba: per adapter instance
|
||||
* @gear: target gear for EQTR
|
||||
*
|
||||
* Returns 0 on success, negative error code otherwise
|
||||
*/
|
||||
int ufshcd_uic_tx_eqtr(struct ufs_hba *hba, int gear)
|
||||
{
|
||||
struct uic_command uic_cmd = {
|
||||
.command = UIC_CMD_DME_SET,
|
||||
.argument1 = UIC_ARG_MIB(PA_EQTR_GEAR),
|
||||
.argument3 = gear,
|
||||
};
|
||||
int ret;
|
||||
|
||||
ufshcd_hold(hba);
|
||||
ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
|
||||
ufshcd_release(hba);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* ufshcd_send_bsg_uic_cmd - Send UIC commands requested via BSG layer and retrieve the result
|
||||
* @hba: per adapter instance
|
||||
@@ -4656,13 +4726,33 @@ static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ufshcd_change_power_mode(struct ufs_hba *hba,
|
||||
struct ufs_pa_layer_attr *pwr_mode)
|
||||
/**
|
||||
* ufshcd_dme_change_power_mode() - UniPro DME Power Mode change sequence
|
||||
* @hba: per-adapter instance
|
||||
* @pwr_mode: pointer to the target power mode (gear/lane) attributes
|
||||
* @pmc_policy: Power Mode change policy
|
||||
*
|
||||
* This function handles the low-level DME (Device Management Entity)
|
||||
* configuration required to transition the UFS link to a new power mode. It
|
||||
* performs the following steps:
|
||||
* 1. Checks if the requested mode matches the current state.
|
||||
* 2. Sets M-PHY and UniPro attributes including Gear (PA_RXGEAR/TXGEAR),
|
||||
* Lanes, Termination, and HS Series (PA_HSSERIES).
|
||||
* 3. Configures default UniPro timeout values (DL_FC0, etc.) unless
|
||||
* explicitly skipped via quirks.
|
||||
* 4. Triggers the actual hardware mode change via ufshcd_uic_change_pwr_mode().
|
||||
* 5. Updates the HBA's cached power information on success.
|
||||
*
|
||||
* Return: 0 on success, non-zero error code on failure.
|
||||
*/
|
||||
static int ufshcd_dme_change_power_mode(struct ufs_hba *hba,
|
||||
struct ufs_pa_layer_attr *pwr_mode,
|
||||
enum ufshcd_pmc_policy pmc_policy)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* if already configured to the requested pwr_mode */
|
||||
if (!hba->force_pmc &&
|
||||
if (pmc_policy == UFSHCD_PMC_POLICY_DONT_FORCE &&
|
||||
pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
|
||||
pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
|
||||
pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
|
||||
@@ -4741,32 +4831,68 @@ static int ufshcd_change_power_mode(struct ufs_hba *hba,
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* ufshcd_change_power_mode() - Change UFS Link Power Mode
|
||||
* @hba: per-adapter instance
|
||||
* @pwr_mode: pointer to the target power mode (gear/lane) attributes
|
||||
* @pmc_policy: Power Mode change policy
|
||||
*
|
||||
* This function handles the high-level sequence for changing the UFS link
|
||||
* power mode. It triggers vendor-specific pre-change notification,
|
||||
* executes the DME (Device Management Entity) power mode change sequence,
|
||||
* and, upon success, triggers vendor-specific post-change notification.
|
||||
*
|
||||
* Return: 0 on success, non-zero error code on failure.
|
||||
*/
|
||||
int ufshcd_change_power_mode(struct ufs_hba *hba,
|
||||
struct ufs_pa_layer_attr *pwr_mode,
|
||||
enum ufshcd_pmc_policy pmc_policy)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE, pwr_mode);
|
||||
|
||||
ret = ufshcd_dme_change_power_mode(hba, pwr_mode, pmc_policy);
|
||||
|
||||
if (!ret)
|
||||
ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, pwr_mode);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ufshcd_change_power_mode);
|
||||
|
||||
/**
|
||||
* ufshcd_config_pwr_mode - configure a new power mode
|
||||
* @hba: per-adapter instance
|
||||
* @desired_pwr_mode: desired power configuration
|
||||
* @pmc_policy: Power Mode change policy
|
||||
*
|
||||
* Return: 0 upon success; < 0 upon failure.
|
||||
*/
|
||||
int ufshcd_config_pwr_mode(struct ufs_hba *hba,
|
||||
struct ufs_pa_layer_attr *desired_pwr_mode)
|
||||
struct ufs_pa_layer_attr *desired_pwr_mode,
|
||||
enum ufshcd_pmc_policy pmc_policy)
|
||||
{
|
||||
struct ufs_pa_layer_attr final_params = { 0 };
|
||||
int ret;
|
||||
|
||||
ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE,
|
||||
desired_pwr_mode, &final_params);
|
||||
ret = ufshcd_vops_negotiate_pwr_mode(hba, desired_pwr_mode,
|
||||
&final_params);
|
||||
if (ret) {
|
||||
if (ret != -ENOTSUPP)
|
||||
dev_err(hba->dev, "Failed to negotiate power mode: %d, use desired as is\n",
|
||||
ret);
|
||||
|
||||
if (ret)
|
||||
memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
|
||||
}
|
||||
|
||||
ret = ufshcd_change_power_mode(hba, &final_params);
|
||||
ret = ufshcd_config_tx_eq_settings(hba, &final_params, false);
|
||||
if (ret)
|
||||
dev_warn(hba->dev, "Failed to configure TX Equalization for HS-G%u, Rate-%s: %d\n",
|
||||
final_params.gear_tx,
|
||||
ufs_hs_rate_to_str(final_params.hs_rate), ret);
|
||||
|
||||
if (!ret)
|
||||
ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
|
||||
&final_params);
|
||||
|
||||
return ret;
|
||||
return ufshcd_change_power_mode(hba, &final_params, pmc_policy);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode);
|
||||
|
||||
@@ -5580,8 +5706,11 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
|
||||
|
||||
guard(spinlock_irqsave)(hba->host->host_lock);
|
||||
cmd = hba->active_uic_cmd;
|
||||
if (!cmd)
|
||||
if (!cmd) {
|
||||
dev_err(hba->dev,
|
||||
"No active UIC command. Maybe a timeout occurred?\n");
|
||||
return retval;
|
||||
}
|
||||
|
||||
if (ufshcd_is_auto_hibern8_error(hba, intr_status))
|
||||
hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status);
|
||||
@@ -6785,6 +6914,7 @@ static void ufshcd_err_handler(struct work_struct *work)
|
||||
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
||||
ufshcd_print_host_state(hba);
|
||||
ufshcd_print_pwr_info(hba);
|
||||
ufshcd_print_tx_eq_params(hba);
|
||||
ufshcd_print_evt_hist(hba);
|
||||
ufshcd_print_tmrs(hba, hba->outstanding_tasks);
|
||||
ufshcd_print_trs_all(hba, pr_prdt);
|
||||
@@ -6843,14 +6973,13 @@ static void ufshcd_err_handler(struct work_struct *work)
|
||||
* are sent via bsg and/or sysfs.
|
||||
*/
|
||||
down_write(&hba->clk_scaling_lock);
|
||||
hba->force_pmc = true;
|
||||
pmc_err = ufshcd_config_pwr_mode(hba, &(hba->pwr_info));
|
||||
pmc_err = ufshcd_config_pwr_mode(hba, &hba->pwr_info,
|
||||
UFSHCD_PMC_POLICY_FORCE);
|
||||
if (pmc_err) {
|
||||
needs_reset = true;
|
||||
dev_err(hba->dev, "%s: Failed to restore power mode, err = %d\n",
|
||||
__func__, pmc_err);
|
||||
}
|
||||
hba->force_pmc = false;
|
||||
ufshcd_print_pwr_info(hba);
|
||||
up_write(&hba->clk_scaling_lock);
|
||||
spin_lock_irqsave(hba->host->host_lock, flags);
|
||||
@@ -6976,10 +7105,19 @@ static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
|
||||
}
|
||||
|
||||
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
|
||||
if ((reg & UIC_DME_ERROR) &&
|
||||
(reg & UIC_DME_ERROR_CODE_MASK)) {
|
||||
if (reg & UIC_DME_ERROR) {
|
||||
ufshcd_update_evt_hist(hba, UFS_EVT_DME_ERR, reg);
|
||||
hba->uic_error |= UFSHCD_UIC_DME_ERROR;
|
||||
|
||||
if (reg & UIC_DME_ERROR_CODE_MASK)
|
||||
hba->uic_error |= UFSHCD_UIC_DME_ERROR;
|
||||
|
||||
if (reg & UIC_DME_QOS_MASK) {
|
||||
atomic_set(&hba->dme_qos_notification,
|
||||
reg & UIC_DME_QOS_MASK);
|
||||
if (hba->dme_qos_sysfs_handle)
|
||||
sysfs_notify_dirent(hba->dme_qos_sysfs_handle);
|
||||
}
|
||||
|
||||
retval |= IRQ_HANDLED;
|
||||
}
|
||||
|
||||
@@ -7049,6 +7187,7 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status)
|
||||
ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE,
|
||||
"host_regs: ");
|
||||
ufshcd_print_pwr_info(hba);
|
||||
ufshcd_print_tx_eq_params(hba);
|
||||
}
|
||||
ufshcd_schedule_eh_work(hba);
|
||||
retval |= IRQ_HANDLED;
|
||||
@@ -7097,16 +7236,17 @@ static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
|
||||
/**
|
||||
* ufshcd_handle_mcq_cq_events - handle MCQ completion queue events
|
||||
* @hba: per adapter instance
|
||||
* @reset_iag: true, to reset MCQ IAG counter and timer of the CQ
|
||||
*
|
||||
* Return: IRQ_HANDLED if interrupt is handled.
|
||||
*/
|
||||
static irqreturn_t ufshcd_handle_mcq_cq_events(struct ufs_hba *hba)
|
||||
static irqreturn_t ufshcd_handle_mcq_cq_events(struct ufs_hba *hba, bool reset_iag)
|
||||
{
|
||||
struct ufs_hw_queue *hwq;
|
||||
unsigned long outstanding_cqs;
|
||||
unsigned int nr_queues;
|
||||
int i, ret;
|
||||
u32 events;
|
||||
u32 events, reg;
|
||||
|
||||
ret = ufshcd_vops_get_outstanding_cqs(hba, &outstanding_cqs);
|
||||
if (ret)
|
||||
@@ -7121,6 +7261,12 @@ static irqreturn_t ufshcd_handle_mcq_cq_events(struct ufs_hba *hba)
|
||||
if (events)
|
||||
ufshcd_mcq_write_cqis(hba, events, i);
|
||||
|
||||
if (reset_iag) {
|
||||
reg = ufshcd_mcq_read_mcqiacr(hba, i);
|
||||
reg |= INT_AGGR_COUNTER_AND_TIMER_RESET;
|
||||
ufshcd_mcq_write_mcqiacr(hba, reg, i);
|
||||
}
|
||||
|
||||
if (events & UFSHCD_MCQ_CQIS_TAIL_ENT_PUSH_STS)
|
||||
ufshcd_mcq_poll_cqe_lock(hba, hwq);
|
||||
}
|
||||
@@ -7154,7 +7300,10 @@ static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
|
||||
retval |= ufshcd_transfer_req_compl(hba);
|
||||
|
||||
if (intr_status & MCQ_CQ_EVENT_STATUS)
|
||||
retval |= ufshcd_handle_mcq_cq_events(hba);
|
||||
retval |= ufshcd_handle_mcq_cq_events(hba, false);
|
||||
|
||||
if (intr_status & MCQ_IAG_EVENT_STATUS)
|
||||
retval |= ufshcd_handle_mcq_cq_events(hba, true);
|
||||
|
||||
return retval;
|
||||
}
|
||||
@@ -7222,8 +7371,12 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
|
||||
struct ufs_hba *hba = __hba;
|
||||
u32 intr_status, enabled_intr_status;
|
||||
|
||||
/* Move interrupt handling to thread when MCQ & ESI are not enabled */
|
||||
if (!hba->mcq_enabled || !hba->mcq_esi_enabled)
|
||||
/*
|
||||
* Handle interrupt in thread if MCQ or ESI is disabled,
|
||||
* and no active UIC command.
|
||||
*/
|
||||
if ((!hba->mcq_enabled || !hba->mcq_esi_enabled) &&
|
||||
!hba->active_uic_cmd)
|
||||
return IRQ_WAKE_THREAD;
|
||||
|
||||
intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
|
||||
@@ -7830,6 +7983,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
|
||||
ufshcd_print_evt_hist(hba);
|
||||
ufshcd_print_host_state(hba);
|
||||
ufshcd_print_pwr_info(hba);
|
||||
ufshcd_print_tx_eq_params(hba);
|
||||
ufshcd_print_tr(hba, cmd, true);
|
||||
} else {
|
||||
ufshcd_print_tr(hba, cmd, false);
|
||||
@@ -8807,6 +8961,8 @@ static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
|
||||
|
||||
if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_HIBER8TIME)
|
||||
ufshcd_quirk_override_pa_h8time(hba);
|
||||
|
||||
ufshcd_apply_valid_tx_eq_settings(hba);
|
||||
}
|
||||
|
||||
static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
|
||||
@@ -9111,6 +9267,12 @@ static int ufshcd_post_device_init(struct ufs_hba *hba)
|
||||
|
||||
/* UFS device is also active now */
|
||||
ufshcd_set_ufs_dev_active(hba);
|
||||
|
||||
/* Indicate that DME QoS Monitor has been reset */
|
||||
atomic_set(&hba->dme_qos_notification, 0x1);
|
||||
if (hba->dme_qos_sysfs_handle)
|
||||
sysfs_notify_dirent(hba->dme_qos_sysfs_handle);
|
||||
|
||||
ufshcd_force_reset_auto_bkops(hba);
|
||||
|
||||
ufshcd_set_timestamp_attr(hba);
|
||||
@@ -9125,7 +9287,8 @@ static int ufshcd_post_device_init(struct ufs_hba *hba)
|
||||
if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL)
|
||||
ufshcd_set_dev_ref_clk(hba);
|
||||
/* Gear up to HS gear. */
|
||||
ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
|
||||
ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info,
|
||||
UFSHCD_PMC_POLICY_DONT_FORCE);
|
||||
if (ret) {
|
||||
dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
|
||||
__func__, ret);
|
||||
@@ -9743,6 +9906,7 @@ static void ufshcd_hba_exit(struct ufs_hba *hba)
|
||||
hba->is_powered = false;
|
||||
ufs_put_device_desc(hba);
|
||||
}
|
||||
sysfs_put(hba->dme_qos_sysfs_handle);
|
||||
}
|
||||
|
||||
static int ufshcd_execute_start_stop(struct scsi_device *sdev,
|
||||
@@ -9942,11 +10106,13 @@ static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
|
||||
#ifdef CONFIG_PM
|
||||
static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
|
||||
{
|
||||
bool vcc_on = false;
|
||||
int ret = 0;
|
||||
|
||||
if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
|
||||
!hba->dev_info.is_lu_power_on_wp) {
|
||||
ret = ufshcd_setup_vreg(hba, true);
|
||||
vcc_on = true;
|
||||
} else if (!ufshcd_is_ufs_dev_active(hba)) {
|
||||
if (!ufshcd_is_link_active(hba)) {
|
||||
ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
|
||||
@@ -9957,6 +10123,7 @@ static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
|
||||
goto vccq_lpm;
|
||||
}
|
||||
ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
|
||||
vcc_on = true;
|
||||
}
|
||||
goto out;
|
||||
|
||||
@@ -9965,6 +10132,15 @@ static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
|
||||
vcc_disable:
|
||||
ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
|
||||
out:
|
||||
/*
|
||||
* On platforms with a slow VCC ramp-up, a delay is needed after
|
||||
* turning on VCC to ensure the voltage is stable before the
|
||||
* reference clock is enabled.
|
||||
*/
|
||||
if (hba->quirks & UFSHCD_QUIRK_VCC_ON_DELAY && !ret && vcc_on &&
|
||||
hba->vreg_info.vcc && !hba->vreg_info.vcc->always_on)
|
||||
usleep_range(1000, 1100);
|
||||
|
||||
return ret;
|
||||
}
|
||||
#endif /* CONFIG_PM */
|
||||
@@ -11070,6 +11246,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
|
||||
goto out_disable;
|
||||
|
||||
ufs_sysfs_add_nodes(hba->dev);
|
||||
hba->dme_qos_sysfs_handle = sysfs_get_dirent(hba->dev->kobj.sd,
|
||||
"dme_qos_notification");
|
||||
async_schedule(ufshcd_async_scan, hba);
|
||||
|
||||
device_enable_async_suspend(dev);
|
||||
|
||||
@@ -443,7 +443,6 @@ static int ufs_versal2_phy_ratesel(struct ufs_hba *hba, u32 activelanes, u32 rx_
|
||||
}
|
||||
|
||||
static int ufs_versal2_pwr_change_notify(struct ufs_hba *hba, enum ufs_notify_change_status status,
|
||||
const struct ufs_pa_layer_attr *dev_max_params,
|
||||
struct ufs_pa_layer_attr *dev_req_params)
|
||||
{
|
||||
struct ufs_versal2_host *host = ufshcd_get_variant(hba);
|
||||
@@ -451,8 +450,6 @@ static int ufs_versal2_pwr_change_notify(struct ufs_hba *hba, enum ufs_notify_ch
|
||||
int ret = 0;
|
||||
|
||||
if (status == PRE_CHANGE) {
|
||||
memcpy(dev_req_params, dev_max_params, sizeof(struct ufs_pa_layer_attr));
|
||||
|
||||
/* If it is not a calibrated part, switch PWRMODE to SLOW_MODE */
|
||||
if (!host->attcompval0 && !host->attcompval1 && !host->ctlecompval0 &&
|
||||
!host->ctlecompval1) {
|
||||
|
||||
@@ -818,12 +818,10 @@ static u32 exynos_ufs_get_hs_gear(struct ufs_hba *hba)
|
||||
}
|
||||
|
||||
static int exynos_ufs_pre_pwr_mode(struct ufs_hba *hba,
|
||||
const struct ufs_pa_layer_attr *dev_max_params,
|
||||
struct ufs_pa_layer_attr *dev_req_params)
|
||||
{
|
||||
struct exynos_ufs *ufs = ufshcd_get_variant(hba);
|
||||
struct phy *generic_phy = ufs->phy;
|
||||
struct ufs_host_params host_params;
|
||||
int ret;
|
||||
|
||||
if (!dev_req_params) {
|
||||
@@ -832,18 +830,6 @@ static int exynos_ufs_pre_pwr_mode(struct ufs_hba *hba,
|
||||
goto out;
|
||||
}
|
||||
|
||||
ufshcd_init_host_params(&host_params);
|
||||
|
||||
/* This driver only support symmetric gear setting e.g. hs_tx_gear == hs_rx_gear */
|
||||
host_params.hs_tx_gear = exynos_ufs_get_hs_gear(hba);
|
||||
host_params.hs_rx_gear = exynos_ufs_get_hs_gear(hba);
|
||||
|
||||
ret = ufshcd_negotiate_pwr_params(&host_params, dev_max_params, dev_req_params);
|
||||
if (ret) {
|
||||
pr_err("%s: failed to determine capabilities\n", __func__);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (ufs->drv_data->pre_pwr_change)
|
||||
ufs->drv_data->pre_pwr_change(ufs, dev_req_params);
|
||||
|
||||
@@ -1677,17 +1663,30 @@ static int exynos_ufs_link_startup_notify(struct ufs_hba *hba,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int exynos_ufs_negotiate_pwr_mode(struct ufs_hba *hba,
|
||||
const struct ufs_pa_layer_attr *dev_max_params,
|
||||
struct ufs_pa_layer_attr *dev_req_params)
|
||||
{
|
||||
struct ufs_host_params host_params;
|
||||
|
||||
ufshcd_init_host_params(&host_params);
|
||||
|
||||
/* This driver only support symmetric gear setting e.g. hs_tx_gear == hs_rx_gear */
|
||||
host_params.hs_tx_gear = exynos_ufs_get_hs_gear(hba);
|
||||
host_params.hs_rx_gear = exynos_ufs_get_hs_gear(hba);
|
||||
|
||||
return ufshcd_negotiate_pwr_params(&host_params, dev_max_params, dev_req_params);
|
||||
}
|
||||
|
||||
static int exynos_ufs_pwr_change_notify(struct ufs_hba *hba,
|
||||
enum ufs_notify_change_status status,
|
||||
const struct ufs_pa_layer_attr *dev_max_params,
|
||||
struct ufs_pa_layer_attr *dev_req_params)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
switch (status) {
|
||||
case PRE_CHANGE:
|
||||
ret = exynos_ufs_pre_pwr_mode(hba, dev_max_params,
|
||||
dev_req_params);
|
||||
ret = exynos_ufs_pre_pwr_mode(hba, dev_req_params);
|
||||
break;
|
||||
case POST_CHANGE:
|
||||
ret = exynos_ufs_post_pwr_mode(hba, dev_req_params);
|
||||
@@ -2015,6 +2014,7 @@ static const struct ufs_hba_variant_ops ufs_hba_exynos_ops = {
|
||||
.exit = exynos_ufs_exit,
|
||||
.hce_enable_notify = exynos_ufs_hce_enable_notify,
|
||||
.link_startup_notify = exynos_ufs_link_startup_notify,
|
||||
.negotiate_pwr_mode = exynos_ufs_negotiate_pwr_mode,
|
||||
.pwr_change_notify = exynos_ufs_pwr_change_notify,
|
||||
.setup_clocks = exynos_ufs_setup_clocks,
|
||||
.setup_xfer_req = exynos_ufs_specify_nexus_t_xfer_req,
|
||||
|
||||
@@ -298,6 +298,17 @@ static void ufs_hisi_set_dev_cap(struct ufs_host_params *host_params)
|
||||
ufshcd_init_host_params(host_params);
|
||||
}
|
||||
|
||||
static int ufs_hisi_negotiate_pwr_mode(struct ufs_hba *hba,
|
||||
const struct ufs_pa_layer_attr *dev_max_params,
|
||||
struct ufs_pa_layer_attr *dev_req_params)
|
||||
{
|
||||
struct ufs_host_params host_params;
|
||||
|
||||
ufs_hisi_set_dev_cap(&host_params);
|
||||
|
||||
return ufshcd_negotiate_pwr_params(&host_params, dev_max_params, dev_req_params);
|
||||
}
|
||||
|
||||
static void ufs_hisi_pwr_change_pre_change(struct ufs_hba *hba)
|
||||
{
|
||||
struct ufs_hisi_host *host = ufshcd_get_variant(hba);
|
||||
@@ -362,10 +373,8 @@ static void ufs_hisi_pwr_change_pre_change(struct ufs_hba *hba)
|
||||
|
||||
static int ufs_hisi_pwr_change_notify(struct ufs_hba *hba,
|
||||
enum ufs_notify_change_status status,
|
||||
const struct ufs_pa_layer_attr *dev_max_params,
|
||||
struct ufs_pa_layer_attr *dev_req_params)
|
||||
{
|
||||
struct ufs_host_params host_params;
|
||||
int ret = 0;
|
||||
|
||||
if (!dev_req_params) {
|
||||
@@ -377,14 +386,6 @@ static int ufs_hisi_pwr_change_notify(struct ufs_hba *hba,
|
||||
|
||||
switch (status) {
|
||||
case PRE_CHANGE:
|
||||
ufs_hisi_set_dev_cap(&host_params);
|
||||
ret = ufshcd_negotiate_pwr_params(&host_params, dev_max_params, dev_req_params);
|
||||
if (ret) {
|
||||
dev_err(hba->dev,
|
||||
"%s: failed to determine capabilities\n", __func__);
|
||||
goto out;
|
||||
}
|
||||
|
||||
ufs_hisi_pwr_change_pre_change(hba);
|
||||
break;
|
||||
case POST_CHANGE:
|
||||
@@ -543,6 +544,7 @@ static const struct ufs_hba_variant_ops ufs_hba_hi3660_vops = {
|
||||
.name = "hi3660",
|
||||
.init = ufs_hi3660_init,
|
||||
.link_startup_notify = ufs_hisi_link_startup_notify,
|
||||
.negotiate_pwr_mode = ufs_hisi_negotiate_pwr_mode,
|
||||
.pwr_change_notify = ufs_hisi_pwr_change_notify,
|
||||
.suspend = ufs_hisi_suspend,
|
||||
.resume = ufs_hisi_resume,
|
||||
@@ -552,6 +554,7 @@ static const struct ufs_hba_variant_ops ufs_hba_hi3670_vops = {
|
||||
.name = "hi3670",
|
||||
.init = ufs_hi3670_init,
|
||||
.link_startup_notify = ufs_hisi_link_startup_notify,
|
||||
.negotiate_pwr_mode = ufs_hisi_negotiate_pwr_mode,
|
||||
.pwr_change_notify = ufs_hisi_pwr_change_notify,
|
||||
.suspend = ufs_hisi_suspend,
|
||||
.resume = ufs_hisi_resume,
|
||||
|
||||
@@ -1317,6 +1317,23 @@ static int ufs_mtk_init(struct ufs_hba *hba)
|
||||
return err;
|
||||
}
|
||||
|
||||
static int ufs_mtk_negotiate_pwr_mode(struct ufs_hba *hba,
|
||||
const struct ufs_pa_layer_attr *dev_max_params,
|
||||
struct ufs_pa_layer_attr *dev_req_params)
|
||||
{
|
||||
struct ufs_host_params host_params;
|
||||
|
||||
ufshcd_init_host_params(&host_params);
|
||||
host_params.hs_rx_gear = UFS_HS_G5;
|
||||
host_params.hs_tx_gear = UFS_HS_G5;
|
||||
|
||||
if (dev_max_params->pwr_rx == SLOW_MODE ||
|
||||
dev_max_params->pwr_tx == SLOW_MODE)
|
||||
host_params.desired_working_mode = UFS_PWM_MODE;
|
||||
|
||||
return ufshcd_negotiate_pwr_params(&host_params, dev_max_params, dev_req_params);
|
||||
}
|
||||
|
||||
static bool ufs_mtk_pmc_via_fastauto(struct ufs_hba *hba,
|
||||
struct ufs_pa_layer_attr *dev_req_params)
|
||||
{
|
||||
@@ -1372,26 +1389,10 @@ static void ufs_mtk_adjust_sync_length(struct ufs_hba *hba)
|
||||
}
|
||||
|
||||
static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
|
||||
const struct ufs_pa_layer_attr *dev_max_params,
|
||||
struct ufs_pa_layer_attr *dev_req_params)
|
||||
{
|
||||
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
|
||||
struct ufs_host_params host_params;
|
||||
int ret;
|
||||
|
||||
ufshcd_init_host_params(&host_params);
|
||||
host_params.hs_rx_gear = UFS_HS_G5;
|
||||
host_params.hs_tx_gear = UFS_HS_G5;
|
||||
|
||||
if (dev_max_params->pwr_rx == SLOW_MODE ||
|
||||
dev_max_params->pwr_tx == SLOW_MODE)
|
||||
host_params.desired_working_mode = UFS_PWM_MODE;
|
||||
|
||||
ret = ufshcd_negotiate_pwr_params(&host_params, dev_max_params, dev_req_params);
|
||||
if (ret) {
|
||||
pr_info("%s: failed to determine capabilities\n",
|
||||
__func__);
|
||||
}
|
||||
int ret = 0;
|
||||
|
||||
if (ufs_mtk_pmc_via_fastauto(hba, dev_req_params)) {
|
||||
ufs_mtk_adjust_sync_length(hba);
|
||||
@@ -1503,7 +1504,6 @@ static int ufs_mtk_auto_hibern8_disable(struct ufs_hba *hba)
|
||||
|
||||
static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba,
|
||||
enum ufs_notify_change_status stage,
|
||||
const struct ufs_pa_layer_attr *dev_max_params,
|
||||
struct ufs_pa_layer_attr *dev_req_params)
|
||||
{
|
||||
int ret = 0;
|
||||
@@ -1515,8 +1515,7 @@ static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba,
|
||||
reg = ufshcd_readl(hba, REG_AUTO_HIBERNATE_IDLE_TIMER);
|
||||
ufs_mtk_auto_hibern8_disable(hba);
|
||||
}
|
||||
ret = ufs_mtk_pre_pwr_change(hba, dev_max_params,
|
||||
dev_req_params);
|
||||
ret = ufs_mtk_pre_pwr_change(hba, dev_req_params);
|
||||
break;
|
||||
case POST_CHANGE:
|
||||
if (ufshcd_is_auto_hibern8_supported(hba))
|
||||
@@ -1960,6 +1959,8 @@ static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
|
||||
|
||||
static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba)
|
||||
{
|
||||
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
|
||||
|
||||
ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups);
|
||||
|
||||
if (ufs_mtk_is_broken_vcc(hba) && hba->vreg_info.vcc) {
|
||||
@@ -1971,6 +1972,15 @@ static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba)
|
||||
hba->dev_quirks &= ~UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM;
|
||||
}
|
||||
|
||||
/*
|
||||
* Add a delay after enabling UFS5 VCC to ensure the voltage
|
||||
* is stable before the refclk is enabled.
|
||||
*/
|
||||
if (hba->dev_info.wspecversion >= 0x0500 &&
|
||||
(host->ip_ver == IP_VER_MT6995_A0 ||
|
||||
host->ip_ver == IP_VER_MT6995_B0))
|
||||
hba->quirks |= UFSHCD_QUIRK_VCC_ON_DELAY;
|
||||
|
||||
ufs_mtk_vreg_fix_vcc(hba);
|
||||
ufs_mtk_vreg_fix_vccqx(hba);
|
||||
ufs_mtk_fix_ahit(hba);
|
||||
@@ -2318,6 +2328,7 @@ static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
|
||||
.setup_clocks = ufs_mtk_setup_clocks,
|
||||
.hce_enable_notify = ufs_mtk_hce_enable_notify,
|
||||
.link_startup_notify = ufs_mtk_link_startup_notify,
|
||||
.negotiate_pwr_mode = ufs_mtk_negotiate_pwr_mode,
|
||||
.pwr_change_notify = ufs_mtk_pwr_change_notify,
|
||||
.apply_dev_quirks = ufs_mtk_apply_dev_quirks,
|
||||
.fixup_dev_quirks = ufs_mtk_fixup_dev_quirks,
|
||||
|
||||
@@ -220,6 +220,10 @@ enum {
|
||||
IP_VER_MT6991_B0 = 0x10470000,
|
||||
IP_VER_MT6993 = 0x10480000,
|
||||
|
||||
/* UFSHCI 5.0 */
|
||||
IP_VER_MT6995_A0 = 0x10490000,
|
||||
IP_VER_MT6995_B0 = 0x10500000,
|
||||
|
||||
IP_VER_NONE = 0xFFFFFFFF
|
||||
};
|
||||
|
||||
|
||||
@@ -966,13 +966,21 @@ static void ufs_qcom_set_tx_hs_equalizer(struct ufs_hba *hba, u32 gear, u32 tx_l
|
||||
}
|
||||
}
|
||||
|
||||
static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
|
||||
enum ufs_notify_change_status status,
|
||||
const struct ufs_pa_layer_attr *dev_max_params,
|
||||
struct ufs_pa_layer_attr *dev_req_params)
|
||||
static int ufs_qcom_negotiate_pwr_mode(struct ufs_hba *hba,
|
||||
const struct ufs_pa_layer_attr *dev_max_params,
|
||||
struct ufs_pa_layer_attr *dev_req_params)
|
||||
{
|
||||
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
|
||||
struct ufs_host_params *host_params = &host->host_params;
|
||||
|
||||
return ufshcd_negotiate_pwr_params(host_params, dev_max_params, dev_req_params);
|
||||
}
|
||||
|
||||
static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
|
||||
enum ufs_notify_change_status status,
|
||||
struct ufs_pa_layer_attr *dev_req_params)
|
||||
{
|
||||
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
|
||||
int ret = 0;
|
||||
|
||||
if (!dev_req_params) {
|
||||
@@ -982,13 +990,6 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
|
||||
|
||||
switch (status) {
|
||||
case PRE_CHANGE:
|
||||
ret = ufshcd_negotiate_pwr_params(host_params, dev_max_params, dev_req_params);
|
||||
if (ret) {
|
||||
dev_err(hba->dev, "%s: failed to determine capabilities\n",
|
||||
__func__);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* During UFS driver probe, always update the PHY gear to match the negotiated
|
||||
* gear, so that, if quirk UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH is enabled,
|
||||
@@ -1068,10 +1069,188 @@ static void ufs_qcom_override_pa_tx_hsg1_sync_len(struct ufs_hba *hba)
|
||||
dev_err(hba->dev, "Failed (%d) set PA_TX_HSG1_SYNC_LENGTH\n", err);
|
||||
}
|
||||
|
||||
/**
|
||||
* ufs_qcom_double_t_adapt_l0l1l2l3 - Create a new adapt that doubles the
|
||||
* adaptation duration TADAPT_L0_L1_L2_L3 derived from the old adapt.
|
||||
*
|
||||
* @old_adapt: Original ADAPT_L0_L1_L2_L3 capability
|
||||
*
|
||||
* ADAPT_length_L0_L1_L2_L3 formula from M-PHY spec:
|
||||
* if (ADAPT_range_L0_L1_L2_L3 == COARSE) {
|
||||
* ADAPT_length_L0_L1_L2_L3 = [0, 12]
|
||||
* ADAPT_L0_L1_L2_L3 = 215 x 2^ADAPT_length_L0_L1_L2_L3
|
||||
* } else if (ADAPT_range_L0_L1_L2_L3 == FINE) {
|
||||
* ADAPT_length_L0_L1_L2_L3 = [0, 127]
|
||||
* TADAPT_L0_L1_L2_L3 = 215 x (ADAPT_length_L0_L1_L2_L3 + 1)
|
||||
* }
|
||||
*
|
||||
* To double the adaptation duration TADAPT_L0_L1_L2_L3:
|
||||
* 1. If adapt range is COARSE (1'b1), new adapt = old adapt + 1.
|
||||
* 2. If adapt range is FINE (1'b0):
|
||||
* a) If old adapt length is < 64, (new adapt + 1) = 2 * (old adapt + 1).
|
||||
* b) If old adapt length is >= 64, set new adapt to 0x88 using COARSE
|
||||
* range, because new adapt get from equation in a) shall exceed 127.
|
||||
*
|
||||
* Examples:
|
||||
* ADAPT_range_L0_L1_L2_L3 | ADAPT_length_L0_L1_L2_L3 | TADAPT_L0_L1_L2_L3 (PAM-4 UI)
|
||||
* 0 3 131072
|
||||
* 0 7 262144
|
||||
* 0 63 2097152
|
||||
* 0 64 2129920
|
||||
* 0 127 4194304
|
||||
* 1 8 8388608
|
||||
* 1 9 16777216
|
||||
* 1 10 33554432
|
||||
* 1 11 67108864
|
||||
* 1 12 134217728
|
||||
*
|
||||
* Return: new adapt.
|
||||
*/
|
||||
static u32 ufs_qcom_double_t_adapt_l0l1l2l3(u32 old_adapt)
|
||||
{
|
||||
u32 adapt_length = old_adapt & ADAPT_LENGTH_MASK;
|
||||
u32 new_adapt;
|
||||
|
||||
if (IS_ADAPT_RANGE_COARSE(old_adapt)) {
|
||||
new_adapt = (adapt_length + 1) | ADAPT_RANGE_BIT;
|
||||
} else {
|
||||
if (adapt_length < 64)
|
||||
new_adapt = (adapt_length << 1) + 1;
|
||||
else
|
||||
/*
|
||||
* 0x88 is the very coarse Adapt value which is two
|
||||
* times of the largest fine Adapt value (0x7F)
|
||||
*/
|
||||
new_adapt = 0x88;
|
||||
}
|
||||
|
||||
return new_adapt;
|
||||
}
|
||||
|
||||
static void ufs_qcom_limit_max_gear(struct ufs_hba *hba,
|
||||
enum ufs_hs_gear_tag gear)
|
||||
{
|
||||
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
|
||||
struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
|
||||
struct ufs_host_params *host_params = &host->host_params;
|
||||
|
||||
host_params->hs_tx_gear = gear;
|
||||
host_params->hs_rx_gear = gear;
|
||||
pwr_info->gear_tx = gear;
|
||||
pwr_info->gear_rx = gear;
|
||||
|
||||
dev_warn(hba->dev, "Limited max gear of host and device to HS-G%d\n", gear);
|
||||
}
|
||||
|
||||
static void ufs_qcom_fixup_tx_adapt_l0l1l2l3(struct ufs_hba *hba)
|
||||
{
|
||||
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
|
||||
struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
|
||||
struct ufs_host_params *host_params = &host->host_params;
|
||||
u32 old_adapt, new_adapt, actual_adapt;
|
||||
bool limit_speed = false;
|
||||
int err;
|
||||
|
||||
if (host->hw_ver.major != 0x7 || host->hw_ver.minor > 0x1 ||
|
||||
host_params->hs_tx_gear <= UFS_HS_G5 ||
|
||||
pwr_info->gear_tx <= UFS_HS_G5)
|
||||
return;
|
||||
|
||||
err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PEERRXHSG6ADAPTINITIALL0L1L2L3), &old_adapt);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
if (old_adapt > ADAPT_L0L1L2L3_LENGTH_MAX) {
|
||||
dev_err(hba->dev, "PA_PeerRxHsG6AdaptInitialL0L1L2L3 value (0x%x) exceeds MAX\n",
|
||||
old_adapt);
|
||||
err = -ERANGE;
|
||||
goto out;
|
||||
}
|
||||
|
||||
new_adapt = ufs_qcom_double_t_adapt_l0l1l2l3(old_adapt);
|
||||
dev_dbg(hba->dev, "Original PA_PeerRxHsG6AdaptInitialL0L1L2L3 = 0x%x, new value = 0x%x\n",
|
||||
old_adapt, new_adapt);
|
||||
|
||||
/*
|
||||
* 0x8C is the max possible value allowed by UniPro v3.0 spec, some HWs
|
||||
* can accept 0x8D but some cannot.
|
||||
*/
|
||||
if (new_adapt <= ADAPT_L0L1L2L3_LENGTH_MAX ||
|
||||
(new_adapt == ADAPT_L0L1L2L3_LENGTH_MAX + 1 && host->hw_ver.minor == 0x1)) {
|
||||
err = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PEERRXHSG6ADAPTINITIALL0L1L2L3),
|
||||
new_adapt);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PEERRXHSG6ADAPTINITIALL0L1L2L3),
|
||||
&actual_adapt);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
if (actual_adapt != new_adapt) {
|
||||
limit_speed = true;
|
||||
dev_warn(hba->dev, "PA_PeerRxHsG6AdaptInitialL0L1L2L3 0x%x, expect 0x%x\n",
|
||||
actual_adapt, new_adapt);
|
||||
}
|
||||
} else {
|
||||
limit_speed = true;
|
||||
dev_warn(hba->dev, "New PA_PeerRxHsG6AdaptInitialL0L1L2L3 (0x%x) is too large!\n",
|
||||
new_adapt);
|
||||
}
|
||||
|
||||
err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PEERRXHSG6ADAPTREFRESHL0L1L2L3), &old_adapt);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
if (old_adapt > ADAPT_L0L1L2L3_LENGTH_MAX) {
|
||||
dev_err(hba->dev, "PA_PeerRxHsG6AdaptRefreshL0L1L2L3 value (0x%x) exceeds MAX\n",
|
||||
old_adapt);
|
||||
err = -ERANGE;
|
||||
goto out;
|
||||
}
|
||||
|
||||
new_adapt = ufs_qcom_double_t_adapt_l0l1l2l3(old_adapt);
|
||||
dev_dbg(hba->dev, "Original PA_PeerRxHsG6AdaptRefreshL0L1L2L3 = 0x%x, new value = 0x%x\n",
|
||||
old_adapt, new_adapt);
|
||||
|
||||
/*
|
||||
* 0x8C is the max possible value allowed by UniPro v3.0 spec, some HWs
|
||||
* can accept 0x8D but some cannot.
|
||||
*/
|
||||
if (new_adapt <= ADAPT_L0L1L2L3_LENGTH_MAX ||
|
||||
(new_adapt == ADAPT_L0L1L2L3_LENGTH_MAX + 1 && host->hw_ver.minor == 0x1)) {
|
||||
err = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PEERRXHSG6ADAPTREFRESHL0L1L2L3),
|
||||
new_adapt);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PEERRXHSG6ADAPTREFRESHL0L1L2L3),
|
||||
&actual_adapt);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
if (actual_adapt != new_adapt) {
|
||||
limit_speed = true;
|
||||
dev_warn(hba->dev, "PA_PeerRxHsG6AdaptRefreshL0L1L2L3 0x%x, expect 0x%x\n",
|
||||
new_adapt, actual_adapt);
|
||||
}
|
||||
} else {
|
||||
limit_speed = true;
|
||||
dev_warn(hba->dev, "New PA_PeerRxHsG6AdaptRefreshL0L1L2L3 (0x%x) is too large!\n",
|
||||
new_adapt);
|
||||
}
|
||||
|
||||
out:
|
||||
if (limit_speed || err)
|
||||
ufs_qcom_limit_max_gear(hba, UFS_HS_G5);
|
||||
}
|
||||
|
||||
static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
ufs_qcom_fixup_tx_adapt_l0l1l2l3(hba);
|
||||
|
||||
if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME)
|
||||
err = ufs_qcom_quirk_host_pa_saveconfigtime(hba);
|
||||
|
||||
@@ -1205,6 +1384,8 @@ static void ufs_qcom_set_host_caps(struct ufs_hba *hba)
|
||||
|
||||
static void ufs_qcom_set_caps(struct ufs_hba *hba)
|
||||
{
|
||||
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
|
||||
|
||||
hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
|
||||
hba->caps |= UFSHCD_CAP_CLK_SCALING | UFSHCD_CAP_WB_WITH_CLK_SCALING;
|
||||
hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
|
||||
@@ -1212,6 +1393,9 @@ static void ufs_qcom_set_caps(struct ufs_hba *hba)
|
||||
hba->caps |= UFSHCD_CAP_AGGR_POWER_COLLAPSE;
|
||||
hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
|
||||
|
||||
if (host->hw_ver.major >= 0x7)
|
||||
hba->caps |= UFSHCD_CAP_TX_EQUALIZATION;
|
||||
|
||||
ufs_qcom_set_host_caps(hba);
|
||||
}
|
||||
|
||||
@@ -2326,6 +2510,387 @@ static u32 ufs_qcom_freq_to_gear_speed(struct ufs_hba *hba, unsigned long freq)
|
||||
return min_t(u32, gear, hba->max_pwr_info.info.gear_rx);
|
||||
}
|
||||
|
||||
static int ufs_qcom_host_eom_config(struct ufs_hba *hba, int lane,
|
||||
const struct ufs_eom_coord *eom_coord,
|
||||
u32 target_test_count)
|
||||
{
|
||||
enum ufs_eom_eye_mask eye_mask = eom_coord->eye_mask;
|
||||
int v_step = eom_coord->v_step;
|
||||
int t_step = eom_coord->t_step;
|
||||
u32 volt_step, timing_step;
|
||||
int ret;
|
||||
|
||||
if (abs(v_step) > UFS_QCOM_EOM_VOLTAGE_STEPS_MAX) {
|
||||
dev_err(hba->dev, "Invalid EOM Voltage Step: %d\n", v_step);
|
||||
return -ERANGE;
|
||||
}
|
||||
|
||||
if (abs(t_step) > UFS_QCOM_EOM_TIMING_STEPS_MAX) {
|
||||
dev_err(hba->dev, "Invalid EOM Timing Step: %d\n", t_step);
|
||||
return -ERANGE;
|
||||
}
|
||||
|
||||
if (v_step < 0)
|
||||
volt_step = RX_EYEMON_NEGATIVE_STEP_BIT | (u32)(-v_step);
|
||||
else
|
||||
volt_step = (u32)v_step;
|
||||
|
||||
if (t_step < 0)
|
||||
timing_step = RX_EYEMON_NEGATIVE_STEP_BIT | (u32)(-t_step);
|
||||
else
|
||||
timing_step = (u32)t_step;
|
||||
|
||||
ret = ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_EYEMON_ENABLE,
|
||||
UIC_ARG_MPHY_RX_GEN_SEL_INDEX(lane)),
|
||||
BIT(eye_mask) | RX_EYEMON_EXTENDED_VRANGE_BIT);
|
||||
if (ret) {
|
||||
dev_err(hba->dev, "Failed to enable Host EOM on Lane %d: %d\n",
|
||||
lane, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_EYEMON_TIMING_STEPS,
|
||||
UIC_ARG_MPHY_RX_GEN_SEL_INDEX(lane)),
|
||||
timing_step);
|
||||
if (ret) {
|
||||
dev_err(hba->dev, "Failed to set Host EOM timing step on Lane %d: %d\n",
|
||||
lane, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_EYEMON_VOLTAGE_STEPS,
|
||||
UIC_ARG_MPHY_RX_GEN_SEL_INDEX(lane)),
|
||||
volt_step);
|
||||
if (ret) {
|
||||
dev_err(hba->dev, "Failed to set Host EOM voltage step on Lane %d: %d\n",
|
||||
lane, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_EYEMON_TARGET_TEST_COUNT,
|
||||
UIC_ARG_MPHY_RX_GEN_SEL_INDEX(lane)),
|
||||
target_test_count);
|
||||
if (ret)
|
||||
dev_err(hba->dev, "Failed to set Host EOM target test count on Lane %d: %d\n",
|
||||
lane, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ufs_qcom_host_eom_may_stop(struct ufs_hba *hba, int lane,
|
||||
u32 target_test_count, u32 *err_count)
|
||||
{
|
||||
u32 start, tested_count, error_count;
|
||||
int ret;
|
||||
|
||||
ret = ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(RX_EYEMON_START,
|
||||
UIC_ARG_MPHY_RX_GEN_SEL_INDEX(lane)),
|
||||
&start);
|
||||
if (ret) {
|
||||
dev_err(hba->dev, "Failed to get Host EOM start status on Lane %d: %d\n",
|
||||
lane, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (start & 0x1)
|
||||
return -EAGAIN;
|
||||
|
||||
ret = ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(RX_EYEMON_TESTED_COUNT,
|
||||
UIC_ARG_MPHY_RX_GEN_SEL_INDEX(lane)),
|
||||
&tested_count);
|
||||
if (ret) {
|
||||
dev_err(hba->dev, "Failed to get Host EOM tested count on Lane %d: %d\n",
|
||||
lane, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(RX_EYEMON_ERROR_COUNT,
|
||||
UIC_ARG_MPHY_RX_GEN_SEL_INDEX(lane)),
|
||||
&error_count);
|
||||
if (ret) {
|
||||
dev_err(hba->dev, "Failed to get Host EOM error count on Lane %d: %d\n",
|
||||
lane, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* EOM can stop */
|
||||
if ((tested_count >= target_test_count - 3) || error_count > 0) {
|
||||
*err_count = error_count;
|
||||
|
||||
/* Disable EOM */
|
||||
ret = ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_EYEMON_ENABLE,
|
||||
UIC_ARG_MPHY_RX_GEN_SEL_INDEX(lane)),
|
||||
0x0);
|
||||
if (ret) {
|
||||
dev_err(hba->dev, "Failed to disable Host EOM on Lane %d: %d\n",
|
||||
lane, ret);
|
||||
return ret;
|
||||
}
|
||||
} else {
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ufs_qcom_host_eom_scan(struct ufs_hba *hba, int num_lanes,
|
||||
const struct ufs_eom_coord *eom_coord,
|
||||
u32 target_test_count, u32 *err_count)
|
||||
{
|
||||
bool eom_stopped[PA_MAXDATALANES] = { 0 };
|
||||
int lane, ret;
|
||||
u32 setting;
|
||||
|
||||
if (!err_count || !eom_coord)
|
||||
return -EINVAL;
|
||||
|
||||
if (target_test_count < UFS_QCOM_EOM_TARGET_TEST_COUNT_MIN) {
|
||||
dev_err(hba->dev, "Target test count (%u) too small for Host EOM\n",
|
||||
target_test_count);
|
||||
return -ERANGE;
|
||||
}
|
||||
|
||||
for (lane = 0; lane < num_lanes; lane++) {
|
||||
ret = ufs_qcom_host_eom_config(hba, lane, eom_coord,
|
||||
target_test_count);
|
||||
if (ret) {
|
||||
dev_err(hba->dev, "Failed to config Host RX EOM: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Trigger a PACP_PWR_req to kick start EOM, but not to really change
|
||||
* the Power Mode.
|
||||
*/
|
||||
ret = ufshcd_uic_change_pwr_mode(hba, FAST_MODE << 4 | FAST_MODE);
|
||||
if (ret) {
|
||||
dev_err(hba->dev, "Failed to change power mode to kick start Host EOM: %d\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
more_burst:
|
||||
/* Create burst on Host RX Lane. */
|
||||
ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_LOCALVERINFO), &setting);
|
||||
|
||||
for (lane = 0; lane < num_lanes; lane++) {
|
||||
if (eom_stopped[lane])
|
||||
continue;
|
||||
|
||||
ret = ufs_qcom_host_eom_may_stop(hba, lane, target_test_count,
|
||||
&err_count[lane]);
|
||||
if (!ret) {
|
||||
eom_stopped[lane] = true;
|
||||
} else if (ret == -EAGAIN) {
|
||||
/* Need more burst to excercise EOM */
|
||||
goto more_burst;
|
||||
} else {
|
||||
dev_err(hba->dev, "Failed to stop Host EOM: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
dev_dbg(hba->dev, "Host RX Lane %d EOM, v_step %d, t_step %d, error count %u\n",
|
||||
lane, eom_coord->v_step, eom_coord->t_step,
|
||||
err_count[lane]);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ufs_qcom_host_sw_rx_fom(struct ufs_hba *hba, int num_lanes, u32 *fom)
|
||||
{
|
||||
const struct ufs_eom_coord *eom_coord = sw_rx_fom_eom_coords_g6;
|
||||
u32 eom_err_count[PA_MAXDATALANES] = { 0 };
|
||||
u32 curr_ahit;
|
||||
int lane, i, ret;
|
||||
|
||||
if (!fom)
|
||||
return -EINVAL;
|
||||
|
||||
/* Stop the auto hibernate idle timer */
|
||||
curr_ahit = ufshcd_readl(hba, REG_AUTO_HIBERNATE_IDLE_TIMER);
|
||||
if (curr_ahit)
|
||||
ufshcd_writel(hba, 0, REG_AUTO_HIBERNATE_IDLE_TIMER);
|
||||
|
||||
ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXHSADAPTTYPE), PA_NO_ADAPT);
|
||||
if (ret) {
|
||||
dev_err(hba->dev, "Failed to select NO_ADAPT before starting Host EOM: %d\n", ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
for (i = 0; i < SW_RX_FOM_EOM_COORDS; i++, eom_coord++) {
|
||||
ret = ufs_qcom_host_eom_scan(hba, num_lanes, eom_coord,
|
||||
UFS_QCOM_EOM_TARGET_TEST_COUNT_G6,
|
||||
eom_err_count);
|
||||
if (ret) {
|
||||
dev_err(hba->dev, "Failed to run Host EOM scan: %d\n", ret);
|
||||
break;
|
||||
}
|
||||
|
||||
for (lane = 0; lane < num_lanes; lane++) {
|
||||
/* Bad coordinates have no weights */
|
||||
if (eom_err_count[lane])
|
||||
continue;
|
||||
fom[lane] += SW_RX_FOM_EOM_COORDS_WEIGHT;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
/* Restore the auto hibernate idle timer */
|
||||
if (curr_ahit)
|
||||
ufshcd_writel(hba, curr_ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ufs_qcom_get_rx_fom(struct ufs_hba *hba,
|
||||
struct ufs_pa_layer_attr *pwr_mode,
|
||||
struct tx_eqtr_iter *h_iter,
|
||||
struct tx_eqtr_iter *d_iter)
|
||||
{
|
||||
struct ufshcd_tx_eq_params *params __free(kfree) =
|
||||
kzalloc(sizeof(*params), GFP_KERNEL);
|
||||
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
|
||||
struct ufs_pa_layer_attr old_pwr_info;
|
||||
u32 fom[PA_MAXDATALANES] = { 0 };
|
||||
u32 gear = pwr_mode->gear_tx;
|
||||
u32 rate = pwr_mode->hs_rate;
|
||||
int lane, ret;
|
||||
|
||||
if (host->hw_ver.major != 0x7 || host->hw_ver.minor > 0x1 ||
|
||||
gear <= UFS_HS_G5 || !d_iter || !d_iter->is_updated)
|
||||
return 0;
|
||||
|
||||
if (gear < UFS_HS_G1 || gear > UFS_HS_GEAR_MAX)
|
||||
return -ERANGE;
|
||||
|
||||
if (!params)
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy(&old_pwr_info, &hba->pwr_info, sizeof(struct ufs_pa_layer_attr));
|
||||
|
||||
memcpy(params, &hba->tx_eq_params[gear - 1], sizeof(struct ufshcd_tx_eq_params));
|
||||
for (lane = 0; lane < pwr_mode->lane_rx; lane++) {
|
||||
params->device[lane].preshoot = d_iter->preshoot;
|
||||
params->device[lane].deemphasis = d_iter->deemphasis;
|
||||
}
|
||||
|
||||
/* Use TX EQTR settings as Device's TX Equalization settings. */
|
||||
ret = ufshcd_apply_tx_eq_settings(hba, params, gear);
|
||||
if (ret) {
|
||||
dev_err(hba->dev, "%s: Failed to apply TX EQ settings for HS-G%u: %d\n",
|
||||
__func__, gear, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Force PMC to target HS Gear to use new TX Equalization settings. */
|
||||
ret = ufshcd_change_power_mode(hba, pwr_mode, UFSHCD_PMC_POLICY_FORCE);
|
||||
if (ret) {
|
||||
dev_err(hba->dev, "%s: Failed to change power mode to HS-G%u, Rate-%s: %d\n",
|
||||
__func__, gear, ufs_hs_rate_to_str(rate), ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = ufs_qcom_host_sw_rx_fom(hba, pwr_mode->lane_rx, fom);
|
||||
if (ret) {
|
||||
dev_err(hba->dev, "Failed to get SW FOM of TX (PreShoot: %u, DeEmphasis: %u): %d\n",
|
||||
d_iter->preshoot, d_iter->deemphasis, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Restore Device's TX Equalization settings. */
|
||||
ret = ufshcd_apply_tx_eq_settings(hba, &hba->tx_eq_params[gear - 1], gear);
|
||||
if (ret) {
|
||||
dev_err(hba->dev, "%s: Failed to apply TX EQ settings for HS-G%u: %d\n",
|
||||
__func__, gear, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Restore Power Mode. */
|
||||
ret = ufshcd_change_power_mode(hba, &old_pwr_info, UFSHCD_PMC_POLICY_FORCE);
|
||||
if (ret) {
|
||||
dev_err(hba->dev, "%s: Failed to restore power mode to HS-G%u: %d\n",
|
||||
__func__, old_pwr_info.gear_tx, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
for (lane = 0; lane < pwr_mode->lane_rx; lane++)
|
||||
d_iter->fom[lane] = fom[lane];
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ufs_qcom_apply_tx_eqtr_settings(struct ufs_hba *hba,
|
||||
struct ufs_pa_layer_attr *pwr_mode,
|
||||
struct tx_eqtr_iter *h_iter,
|
||||
struct tx_eqtr_iter *d_iter)
|
||||
{
|
||||
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
|
||||
u32 setting = 0;
|
||||
int lane;
|
||||
|
||||
if (host->hw_ver.major != 0x7 || host->hw_ver.minor > 0x1)
|
||||
return 0;
|
||||
|
||||
for (lane = 0; lane < pwr_mode->lane_tx; lane++) {
|
||||
setting |= TX_HS_PRESHOOT_BITS(lane, h_iter->preshoot);
|
||||
setting |= TX_HS_DEEMPHASIS_BITS(lane, h_iter->deemphasis);
|
||||
}
|
||||
|
||||
return ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXEQG1SETTING), setting);
|
||||
}
|
||||
|
||||
static int ufs_qcom_tx_eqtr_notify(struct ufs_hba *hba,
|
||||
enum ufs_notify_change_status status,
|
||||
struct ufs_pa_layer_attr *pwr_mode)
|
||||
{
|
||||
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
|
||||
struct ufs_pa_layer_attr pwr_mode_hs_g1 = {
|
||||
.gear_rx = UFS_HS_G1,
|
||||
.gear_tx = UFS_HS_G1,
|
||||
.lane_rx = pwr_mode->lane_rx,
|
||||
.lane_tx = pwr_mode->lane_tx,
|
||||
.pwr_rx = FAST_MODE,
|
||||
.pwr_tx = FAST_MODE,
|
||||
.hs_rate = pwr_mode->hs_rate,
|
||||
};
|
||||
u32 gear = pwr_mode->gear_tx;
|
||||
u32 rate = pwr_mode->hs_rate;
|
||||
int ret;
|
||||
|
||||
if (host->hw_ver.major != 0x7 || host->hw_ver.minor > 0x1)
|
||||
return 0;
|
||||
|
||||
if (status == PRE_CHANGE) {
|
||||
ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TXEQG1SETTING),
|
||||
&host->saved_tx_eq_g1_setting);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* PMC to target HS Gear. */
|
||||
ret = ufshcd_change_power_mode(hba, pwr_mode,
|
||||
UFSHCD_PMC_POLICY_DONT_FORCE);
|
||||
if (ret)
|
||||
dev_err(hba->dev, "%s: Failed to PMC to target HS-G%u, Rate-%s: %d\n",
|
||||
__func__, gear, ufs_hs_rate_to_str(rate), ret);
|
||||
} else {
|
||||
ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXEQG1SETTING),
|
||||
host->saved_tx_eq_g1_setting);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* PMC back to HS-G1. */
|
||||
ret = ufshcd_change_power_mode(hba, &pwr_mode_hs_g1,
|
||||
UFSHCD_PMC_POLICY_DONT_FORCE);
|
||||
if (ret)
|
||||
dev_err(hba->dev, "%s: Failed to PMC to HS-G1, Rate-%s: %d\n",
|
||||
__func__, ufs_hs_rate_to_str(rate), ret);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* struct ufs_hba_qcom_vops - UFS QCOM specific variant operations
|
||||
*
|
||||
@@ -2341,6 +2906,7 @@ static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
|
||||
.setup_clocks = ufs_qcom_setup_clocks,
|
||||
.hce_enable_notify = ufs_qcom_hce_enable_notify,
|
||||
.link_startup_notify = ufs_qcom_link_startup_notify,
|
||||
.negotiate_pwr_mode = ufs_qcom_negotiate_pwr_mode,
|
||||
.pwr_change_notify = ufs_qcom_pwr_change_notify,
|
||||
.apply_dev_quirks = ufs_qcom_apply_dev_quirks,
|
||||
.fixup_dev_quirks = ufs_qcom_fixup_dev_quirks,
|
||||
@@ -2355,6 +2921,9 @@ static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
|
||||
.get_outstanding_cqs = ufs_qcom_get_outstanding_cqs,
|
||||
.config_esi = ufs_qcom_config_esi,
|
||||
.freq_to_gear_speed = ufs_qcom_freq_to_gear_speed,
|
||||
.get_rx_fom = ufs_qcom_get_rx_fom,
|
||||
.apply_tx_eqtr_settings = ufs_qcom_apply_tx_eqtr_settings,
|
||||
.tx_eqtr_notify = ufs_qcom_tx_eqtr_notify,
|
||||
};
|
||||
|
||||
static const struct ufs_hba_variant_ops ufs_hba_qcom_sa8255p_vops = {
|
||||
|
||||
@@ -33,6 +33,46 @@
|
||||
#define DL_VS_CLK_CFG_MASK GENMASK(9, 0)
|
||||
#define DME_VS_CORE_CLK_CTRL_DME_HW_CGC_EN BIT(9)
|
||||
|
||||
#define UFS_QCOM_EOM_VOLTAGE_STEPS_MAX 127
|
||||
#define UFS_QCOM_EOM_TIMING_STEPS_MAX 63
|
||||
#define UFS_QCOM_EOM_TARGET_TEST_COUNT_MIN 8
|
||||
#define UFS_QCOM_EOM_TARGET_TEST_COUNT_G6 0x3F
|
||||
|
||||
#define SW_RX_FOM_EOM_COORDS 23
|
||||
#define SW_RX_FOM_EOM_COORDS_WEIGHT (127 / SW_RX_FOM_EOM_COORDS)
|
||||
|
||||
struct ufs_eom_coord {
|
||||
int t_step;
|
||||
int v_step;
|
||||
u8 eye_mask;
|
||||
};
|
||||
|
||||
static const struct ufs_eom_coord sw_rx_fom_eom_coords_g6[SW_RX_FOM_EOM_COORDS] = {
|
||||
[0] = { -2, -15, UFS_EOM_EYE_MASK_M },
|
||||
[1] = { 0, -15, UFS_EOM_EYE_MASK_M },
|
||||
[2] = { 2, -15, UFS_EOM_EYE_MASK_M },
|
||||
[3] = { -4, -10, UFS_EOM_EYE_MASK_M },
|
||||
[4] = { -2, -10, UFS_EOM_EYE_MASK_M },
|
||||
[5] = { 0, -10, UFS_EOM_EYE_MASK_M },
|
||||
[6] = { 2, -10, UFS_EOM_EYE_MASK_M },
|
||||
[7] = { 4, -10, UFS_EOM_EYE_MASK_M },
|
||||
[8] = { -6, 0, UFS_EOM_EYE_MASK_M },
|
||||
[9] = { -4, 0, UFS_EOM_EYE_MASK_M },
|
||||
[10] = { -2, 0, UFS_EOM_EYE_MASK_M },
|
||||
[11] = { 0, 0, UFS_EOM_EYE_MASK_M },
|
||||
[12] = { 2, 0, UFS_EOM_EYE_MASK_M },
|
||||
[13] = { 4, 0, UFS_EOM_EYE_MASK_M },
|
||||
[14] = { 6, 0, UFS_EOM_EYE_MASK_M },
|
||||
[15] = { -4, 10, UFS_EOM_EYE_MASK_M },
|
||||
[16] = { -2, 10, UFS_EOM_EYE_MASK_M },
|
||||
[17] = { 0, 10, UFS_EOM_EYE_MASK_M },
|
||||
[18] = { 2, 10, UFS_EOM_EYE_MASK_M },
|
||||
[19] = { 4, 10, UFS_EOM_EYE_MASK_M },
|
||||
[20] = { -2, 15, UFS_EOM_EYE_MASK_M },
|
||||
[21] = { 0, 15, UFS_EOM_EYE_MASK_M },
|
||||
[22] = { 2, 15, UFS_EOM_EYE_MASK_M },
|
||||
};
|
||||
|
||||
/* Qualcomm MCQ Configuration */
|
||||
#define UFS_QCOM_MCQCAP_QCFGPTR 224 /* 0xE0 in hex */
|
||||
#define UFS_QCOM_MCQ_CONFIG_OFFSET (UFS_QCOM_MCQCAP_QCFGPTR * 0x200) /* 0x1C000 */
|
||||
@@ -308,6 +348,8 @@ struct ufs_qcom_host {
|
||||
u32 phy_gear;
|
||||
|
||||
bool esi_enabled;
|
||||
|
||||
u32 saved_tx_eq_g1_setting;
|
||||
};
|
||||
|
||||
struct ufs_qcom_drvdata {
|
||||
|
||||
@@ -6,7 +6,6 @@
|
||||
*/
|
||||
|
||||
#include <linux/clk.h>
|
||||
#include <linux/gpio.h>
|
||||
#include <linux/gpio/consumer.h>
|
||||
#include <linux/mfd/syscon.h>
|
||||
#include <linux/of.h>
|
||||
|
||||
@@ -161,14 +161,11 @@ static int ufs_sprd_common_init(struct ufs_hba *hba)
|
||||
|
||||
static int sprd_ufs_pwr_change_notify(struct ufs_hba *hba,
|
||||
enum ufs_notify_change_status status,
|
||||
const struct ufs_pa_layer_attr *dev_max_params,
|
||||
struct ufs_pa_layer_attr *dev_req_params)
|
||||
{
|
||||
struct ufs_sprd_host *host = ufshcd_get_variant(hba);
|
||||
|
||||
if (status == PRE_CHANGE) {
|
||||
memcpy(dev_req_params, dev_max_params,
|
||||
sizeof(struct ufs_pa_layer_attr));
|
||||
if (host->unipro_ver >= UFS_UNIPRO_VER_1_8)
|
||||
ufshcd_dme_configure_adapt(hba, dev_req_params->gear_tx,
|
||||
PA_INITIAL_ADAPT);
|
||||
|
||||
@@ -145,7 +145,8 @@ static int ufs_intel_set_lanes(struct ufs_hba *hba, u32 lanes)
|
||||
|
||||
pwr_info.lane_rx = lanes;
|
||||
pwr_info.lane_tx = lanes;
|
||||
ret = ufshcd_config_pwr_mode(hba, &pwr_info);
|
||||
ret = ufshcd_change_power_mode(hba, &pwr_info,
|
||||
UFSHCD_PMC_POLICY_DONT_FORCE);
|
||||
if (ret)
|
||||
dev_err(hba->dev, "%s: Setting %u lanes, err = %d\n",
|
||||
__func__, lanes, ret);
|
||||
@@ -154,17 +155,15 @@ static int ufs_intel_set_lanes(struct ufs_hba *hba, u32 lanes)
|
||||
|
||||
static int ufs_intel_lkf_pwr_change_notify(struct ufs_hba *hba,
|
||||
enum ufs_notify_change_status status,
|
||||
const struct ufs_pa_layer_attr *dev_max_params,
|
||||
struct ufs_pa_layer_attr *dev_req_params)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
switch (status) {
|
||||
case PRE_CHANGE:
|
||||
if (ufshcd_is_hs_mode(dev_max_params) &&
|
||||
if (ufshcd_is_hs_mode(dev_req_params) &&
|
||||
(hba->pwr_info.lane_rx != 2 || hba->pwr_info.lane_tx != 2))
|
||||
ufs_intel_set_lanes(hba, 2);
|
||||
memcpy(dev_req_params, dev_max_params, sizeof(*dev_req_params));
|
||||
break;
|
||||
case POST_CHANGE:
|
||||
if (ufshcd_is_hs_mode(dev_req_params)) {
|
||||
@@ -695,6 +694,7 @@ static const struct pci_device_id ufshcd_pci_tbl[] = {
|
||||
{ PCI_VDEVICE(INTEL, 0x7747), (kernel_ulong_t)&ufs_intel_mtl_hba_vops },
|
||||
{ PCI_VDEVICE(INTEL, 0xE447), (kernel_ulong_t)&ufs_intel_mtl_hba_vops },
|
||||
{ PCI_VDEVICE(INTEL, 0x4D47), (kernel_ulong_t)&ufs_intel_mtl_hba_vops },
|
||||
{ PCI_VDEVICE(INTEL, 0xD335), (kernel_ulong_t)&ufs_intel_mtl_hba_vops },
|
||||
{ } /* terminate list */
|
||||
};
|
||||
|
||||
|
||||
@@ -2030,6 +2030,7 @@ static const struct target_core_fabric_ops usbg_ops = {
|
||||
.tfc_wwn_attrs = usbg_wwn_attrs,
|
||||
.tfc_tpg_base_attrs = usbg_base_attrs,
|
||||
|
||||
.default_compl_type = TARGET_QUEUE_COMPL,
|
||||
.default_submit_type = TARGET_DIRECT_SUBMIT,
|
||||
.direct_submit_supp = 1,
|
||||
};
|
||||
|
||||
@@ -2950,6 +2950,8 @@ static const struct target_core_fabric_ops vhost_scsi_ops = {
|
||||
.tfc_tpg_base_attrs = vhost_scsi_tpg_attrs,
|
||||
.tfc_tpg_attrib_attrs = vhost_scsi_tpg_attrib_attrs,
|
||||
|
||||
.default_compl_type = TARGET_QUEUE_COMPL,
|
||||
.direct_compl_supp = 1,
|
||||
.default_submit_type = TARGET_QUEUE_SUBMIT,
|
||||
.direct_submit_supp = 1,
|
||||
};
|
||||
|
||||
@@ -1832,6 +1832,7 @@ static const struct target_core_fabric_ops scsiback_ops = {
|
||||
.tfc_tpg_base_attrs = scsiback_tpg_attrs,
|
||||
.tfc_tpg_param_attrs = scsiback_param_attrs,
|
||||
|
||||
.default_compl_type = TARGET_QUEUE_COMPL,
|
||||
.default_submit_type = TARGET_DIRECT_SUBMIT,
|
||||
.direct_submit_supp = 1,
|
||||
};
|
||||
|
||||
@@ -62,10 +62,6 @@ enum discover_event {
|
||||
|
||||
/* ---------- Expander Devices ---------- */
|
||||
|
||||
#define to_dom_device(_obj) container_of(_obj, struct domain_device, dev_obj)
|
||||
#define to_dev_attr(_attr) container_of(_attr, struct domain_dev_attribute,\
|
||||
attr)
|
||||
|
||||
enum routing_attribute {
|
||||
DIRECT_ROUTING,
|
||||
SUBTRACTIVE_ROUTING,
|
||||
|
||||
@@ -571,6 +571,7 @@ void scsi_put_internal_cmd(struct scsi_cmnd *scmd);
|
||||
extern void sdev_disable_disk_events(struct scsi_device *sdev);
|
||||
extern void sdev_enable_disk_events(struct scsi_device *sdev);
|
||||
extern int scsi_vpd_lun_id(struct scsi_device *, char *, size_t);
|
||||
extern int scsi_vpd_lun_serial(struct scsi_device *, char *, size_t);
|
||||
extern int scsi_vpd_tpg_id(struct scsi_device *, int *);
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
|
||||
@@ -660,6 +660,10 @@ struct Scsi_Host {
|
||||
*/
|
||||
unsigned nr_hw_queues;
|
||||
unsigned nr_maps;
|
||||
|
||||
/* Asynchronous scan in progress */
|
||||
bool async_scan __guarded_by(&scan_mutex);
|
||||
|
||||
unsigned active_mode:2;
|
||||
|
||||
/*
|
||||
@@ -678,9 +682,6 @@ struct Scsi_Host {
|
||||
/* Task mgmt function in progress */
|
||||
unsigned tmf_in_progress:1;
|
||||
|
||||
/* Asynchronous scan in progress */
|
||||
unsigned async_scan:1;
|
||||
|
||||
/* Don't resume host in EH */
|
||||
unsigned eh_noresume:1;
|
||||
|
||||
|
||||
@@ -111,6 +111,15 @@
|
||||
/* Peripheral Device Text Identification Information */
|
||||
#define PD_TEXT_ID_INFO_LEN 256
|
||||
|
||||
enum target_compl_type {
|
||||
/* Use the fabric driver's default completion type */
|
||||
TARGET_FABRIC_DEFAULT_COMPL,
|
||||
/* Complete from the backend calling context */
|
||||
TARGET_DIRECT_COMPL,
|
||||
/* Defer completion to the LIO workqueue */
|
||||
TARGET_QUEUE_COMPL,
|
||||
};
|
||||
|
||||
enum target_submit_type {
|
||||
/* Use the fabric driver's default submission type */
|
||||
TARGET_FABRIC_DEFAULT_SUBMIT,
|
||||
@@ -741,6 +750,7 @@ struct se_dev_attrib {
|
||||
u32 atomic_granularity;
|
||||
u32 atomic_max_with_boundary;
|
||||
u32 atomic_max_boundary;
|
||||
u8 complete_type;
|
||||
u8 submit_type;
|
||||
struct se_device *da_dev;
|
||||
struct config_group da_group;
|
||||
|
||||
@@ -118,15 +118,21 @@ struct target_core_fabric_ops {
|
||||
* its entirety before a command is aborted.
|
||||
*/
|
||||
unsigned int write_pending_must_be_called:1;
|
||||
/*
|
||||
* Set this if the driver does not require calling queue_data_in
|
||||
* queue_status and check_stop_free from a worker thread when
|
||||
* completing successful commands.
|
||||
*/
|
||||
unsigned int direct_compl_supp:1;
|
||||
/*
|
||||
* Set this if the driver supports submitting commands to the backend
|
||||
* from target_submit/target_submit_cmd.
|
||||
*/
|
||||
unsigned int direct_submit_supp:1;
|
||||
/*
|
||||
* Set this to a target_submit_type value.
|
||||
*/
|
||||
/* Set this to a target_submit_type value. */
|
||||
u8 default_submit_type;
|
||||
/* Set this to the target_compl_type value. */
|
||||
u8 default_compl_type;
|
||||
};
|
||||
|
||||
int target_register_template(const struct target_core_fabric_ops *fo);
|
||||
|
||||
@@ -1030,7 +1030,7 @@ struct fc_fn_li_desc {
|
||||
*/
|
||||
__be32 event_count; /* minimum number of event
|
||||
* occurrences during the event
|
||||
* threshold to caause the LI event
|
||||
* threshold to cause the LI event
|
||||
*/
|
||||
__be32 pname_count; /* number of portname_list elements */
|
||||
__be64 pname_list[]; /* list of N_Port_Names accessible
|
||||
|
||||
@@ -287,6 +287,86 @@ struct ufs_pwr_mode_info {
|
||||
struct ufs_pa_layer_attr info;
|
||||
};
|
||||
|
||||
#define UFS_MAX_LANES 2
|
||||
|
||||
/**
|
||||
* struct tx_eqtr_iter - TX Equalization Training iterator
|
||||
* @preshoot_bitmap: PreShoot bitmap
|
||||
* @deemphasis_bitmap: DeEmphasis bitmap
|
||||
* @preshoot: PreShoot value
|
||||
* @deemphasis: DeEmphasis value
|
||||
* @fom: Figure-of-Merit read out from RX_FOM
|
||||
* @is_updated: Flag to indicate if updated since previous iteration
|
||||
*/
|
||||
struct tx_eqtr_iter {
|
||||
unsigned long preshoot_bitmap;
|
||||
unsigned long deemphasis_bitmap;
|
||||
u8 preshoot;
|
||||
u8 deemphasis;
|
||||
u8 fom[UFS_MAX_LANES];
|
||||
bool is_updated;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct ufshcd_tx_eq_settings - TX Equalization settings
|
||||
* @preshoot: PreShoot value
|
||||
* @deemphasis: DeEmphasis value
|
||||
* @fom_val: Figure-of-Merit value read out from RX_FOM (Bit[6:0])
|
||||
* @precode_en: Flag to indicate whether need to enable pre-coding
|
||||
*/
|
||||
struct ufshcd_tx_eq_settings {
|
||||
u8 preshoot;
|
||||
u8 deemphasis;
|
||||
u8 fom_val;
|
||||
bool precode_en;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct ufshcd_tx_eqtr_data - Data used during TX Equalization Training procedure
|
||||
* @host: Optimal TX EQ settings identified for host TX Lanes during TX EQTR
|
||||
* @device: Optimal TX EQ settings identified for device TX Lanes during TX EQTR
|
||||
* @host_fom: Host TX EQTR FOM record
|
||||
* @device_fom: Device TX EQTR FOM record
|
||||
*/
|
||||
struct ufshcd_tx_eqtr_data {
|
||||
struct ufshcd_tx_eq_settings host[UFS_MAX_LANES];
|
||||
struct ufshcd_tx_eq_settings device[UFS_MAX_LANES];
|
||||
u8 host_fom[UFS_MAX_LANES][TX_HS_NUM_PRESHOOT][TX_HS_NUM_DEEMPHASIS];
|
||||
u8 device_fom[UFS_MAX_LANES][TX_HS_NUM_PRESHOOT][TX_HS_NUM_DEEMPHASIS];
|
||||
};
|
||||
|
||||
/**
|
||||
* struct ufshcd_tx_eqtr_record - TX Equalization Training record
|
||||
* @host_fom: Host TX EQTR FOM record
|
||||
* @device_fom: Device TX EQTR FOM record
|
||||
* @last_record_ts: Timestamp of the most recent TX EQTR record
|
||||
* @last_record_index: Index of the most recent TX EQTR record
|
||||
* @saved_adapt_eqtr: Saved Adaptation length setting for TX EQTR
|
||||
*/
|
||||
struct ufshcd_tx_eqtr_record {
|
||||
u8 host_fom[UFS_MAX_LANES][TX_HS_NUM_PRESHOOT][TX_HS_NUM_DEEMPHASIS];
|
||||
u8 device_fom[UFS_MAX_LANES][TX_HS_NUM_PRESHOOT][TX_HS_NUM_DEEMPHASIS];
|
||||
ktime_t last_record_ts;
|
||||
u16 last_record_index;
|
||||
u16 saved_adapt_eqtr;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct ufshcd_tx_eq_params - TX Equalization parameters structure
|
||||
* @host: TX EQ settings for host TX Lanes
|
||||
* @device: TX EQ settings for device TX Lanes
|
||||
* @eqtr_record: Pointer to TX EQTR record
|
||||
* @is_valid: True if parameter contains valid TX Equalization settings
|
||||
* @is_applied: True if settings have been applied to UniPro of both sides
|
||||
*/
|
||||
struct ufshcd_tx_eq_params {
|
||||
struct ufshcd_tx_eq_settings host[UFS_MAX_LANES];
|
||||
struct ufshcd_tx_eq_settings device[UFS_MAX_LANES];
|
||||
struct ufshcd_tx_eqtr_record *eqtr_record;
|
||||
bool is_valid;
|
||||
bool is_applied;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct ufs_hba_variant_ops - variant specific callbacks
|
||||
* @name: variant name
|
||||
@@ -302,11 +382,10 @@ struct ufs_pwr_mode_info {
|
||||
* variant specific Uni-Pro initialization.
|
||||
* @link_startup_notify: called before and after Link startup is carried out
|
||||
* to allow variant specific Uni-Pro initialization.
|
||||
* @negotiate_pwr_mode: called to negotiate power mode.
|
||||
* @pwr_change_notify: called before and after a power mode change
|
||||
* is carried out to allow vendor spesific capabilities
|
||||
* to be set. PRE_CHANGE can modify final_params based
|
||||
* on desired_pwr_mode, but POST_CHANGE must not alter
|
||||
* the final_params parameter
|
||||
* to be set.
|
||||
* @setup_xfer_req: called before any transfer request is issued
|
||||
* to set some things
|
||||
* @setup_task_mgmt: called before any task management request is issued
|
||||
@@ -331,6 +410,11 @@ struct ufs_pwr_mode_info {
|
||||
* @config_esi: called to config Event Specific Interrupt
|
||||
* @config_scsi_dev: called to configure SCSI device parameters
|
||||
* @freq_to_gear_speed: called to map clock frequency to the max supported gear speed
|
||||
* @apply_tx_eqtr_settings: called to apply settings for TX Equalization
|
||||
* Training settings.
|
||||
* @get_rx_fom: called to get Figure of Merit (FOM) value.
|
||||
* @tx_eqtr_notify: called before and after TX Equalization Training procedure
|
||||
* to allow platform vendor specific configs to take place.
|
||||
*/
|
||||
struct ufs_hba_variant_ops {
|
||||
const char *name;
|
||||
@@ -347,10 +431,12 @@ struct ufs_hba_variant_ops {
|
||||
enum ufs_notify_change_status);
|
||||
int (*link_startup_notify)(struct ufs_hba *,
|
||||
enum ufs_notify_change_status);
|
||||
int (*pwr_change_notify)(struct ufs_hba *,
|
||||
enum ufs_notify_change_status status,
|
||||
const struct ufs_pa_layer_attr *desired_pwr_mode,
|
||||
struct ufs_pa_layer_attr *final_params);
|
||||
int (*negotiate_pwr_mode)(struct ufs_hba *hba,
|
||||
const struct ufs_pa_layer_attr *desired_pwr_mode,
|
||||
struct ufs_pa_layer_attr *final_params);
|
||||
int (*pwr_change_notify)(struct ufs_hba *hba,
|
||||
enum ufs_notify_change_status status,
|
||||
struct ufs_pa_layer_attr *final_params);
|
||||
void (*setup_xfer_req)(struct ufs_hba *hba, int tag,
|
||||
bool is_scsi_cmd);
|
||||
void (*setup_task_mgmt)(struct ufs_hba *, int, u8);
|
||||
@@ -380,6 +466,17 @@ struct ufs_hba_variant_ops {
|
||||
int (*config_esi)(struct ufs_hba *hba);
|
||||
void (*config_scsi_dev)(struct scsi_device *sdev);
|
||||
u32 (*freq_to_gear_speed)(struct ufs_hba *hba, unsigned long freq);
|
||||
int (*get_rx_fom)(struct ufs_hba *hba,
|
||||
struct ufs_pa_layer_attr *pwr_mode,
|
||||
struct tx_eqtr_iter *h_iter,
|
||||
struct tx_eqtr_iter *d_iter);
|
||||
int (*apply_tx_eqtr_settings)(struct ufs_hba *hba,
|
||||
struct ufs_pa_layer_attr *pwr_mode,
|
||||
struct tx_eqtr_iter *h_iter,
|
||||
struct tx_eqtr_iter *d_iter);
|
||||
int (*tx_eqtr_notify)(struct ufs_hba *hba,
|
||||
enum ufs_notify_change_status status,
|
||||
struct ufs_pa_layer_attr *pwr_mode);
|
||||
};
|
||||
|
||||
/* clock gating state */
|
||||
@@ -528,6 +625,17 @@ enum ufshcd_state {
|
||||
UFSHCD_STATE_ERROR,
|
||||
};
|
||||
|
||||
/**
|
||||
* enum ufshcd_pmc_policy - Power Mode change policy
|
||||
* @UFSHCD_PMC_POLICY_DONT_FORCE: Do not force a Power Mode change.
|
||||
* @UFSHCD_PMC_POLICY_FORCE: Force a Power Mode change even if current Power
|
||||
* Mode is same as target Power Mode.
|
||||
*/
|
||||
enum ufshcd_pmc_policy {
|
||||
UFSHCD_PMC_POLICY_DONT_FORCE,
|
||||
UFSHCD_PMC_POLICY_FORCE,
|
||||
};
|
||||
|
||||
enum ufshcd_quirks {
|
||||
/* Interrupt aggregation support is broken */
|
||||
UFSHCD_QUIRK_BROKEN_INTR_AGGR = 1 << 0,
|
||||
@@ -690,6 +798,12 @@ enum ufshcd_quirks {
|
||||
* because it causes link startup to become unreliable.
|
||||
*/
|
||||
UFSHCD_QUIRK_PERFORM_LINK_STARTUP_ONCE = 1 << 26,
|
||||
|
||||
/*
|
||||
* On some platforms, the VCC regulator has a slow ramp-up time. Add a
|
||||
* delay after enabling VCC to ensure it's stable.
|
||||
*/
|
||||
UFSHCD_QUIRK_VCC_ON_DELAY = 1 << 27,
|
||||
};
|
||||
|
||||
enum ufshcd_caps {
|
||||
@@ -767,6 +881,13 @@ enum ufshcd_caps {
|
||||
* WriteBooster when scaling the clock down.
|
||||
*/
|
||||
UFSHCD_CAP_WB_WITH_CLK_SCALING = 1 << 12,
|
||||
|
||||
/*
|
||||
* This capability allows the host controller driver to apply TX
|
||||
* Equalization settings discovered from UFS attributes, variant
|
||||
* specific operations and TX Equaliztion Training procedure.
|
||||
*/
|
||||
UFSHCD_CAP_TX_EQUALIZATION = 1 << 13,
|
||||
};
|
||||
|
||||
struct ufs_hba_variant_params {
|
||||
@@ -881,7 +1002,6 @@ enum ufshcd_mcq_opr {
|
||||
* @saved_uic_err: sticky UIC error mask
|
||||
* @ufs_stats: various error counters
|
||||
* @force_reset: flag to force eh_work perform a full reset
|
||||
* @force_pmc: flag to force a power mode change
|
||||
* @silence_err_logs: flag to silence error logs
|
||||
* @dev_cmd: ufs device management command information
|
||||
* @last_dme_cmd_tstamp: time stamp of the last completed DME command
|
||||
@@ -943,7 +1063,21 @@ enum ufshcd_mcq_opr {
|
||||
* @critical_health_count: count of critical health exceptions
|
||||
* @dev_lvl_exception_count: count of device level exceptions since last reset
|
||||
* @dev_lvl_exception_id: vendor specific information about the device level exception event.
|
||||
* @dme_qos_notification: Bitfield of pending DME Quality of Service (QoS)
|
||||
* events. Bits[3:1] reflect the corresponding bits of UIC DME Error Code
|
||||
* field within the Host Controller's UECDME register. Bit[0] is a flag
|
||||
* indicating that the DME QoS Monitor has been reset by the host.
|
||||
* @dme_qos_sysfs_handle: handle for 'dme_qos_notification' sysfs entry
|
||||
* @rpmbs: list of OP-TEE RPMB devices (one per RPMB region)
|
||||
* @host_preshoot_cap: a bitfield to indicate supported PreShoot dBs of host's TX lanes, cache of
|
||||
* host M-PHY TX_HS_PreShoot_Setting_Capability Attribute (ID 0x15)
|
||||
* @host_deemphasis_cap: a bitfield to indicate supported DeEmphasis dBs of host's TX lanes, cache
|
||||
* of host M-PHY TX_HS_DeEmphasis_Setting_Capability Attribute (ID 0x12)
|
||||
* @device_preshoot_cap: a bitfield to indicate supported PreShoot dBs of device's TX lanes, cache
|
||||
* of device M-PHY TX_HS_PreShoot_Setting_Capability Attribute (ID 0x15)
|
||||
* @device_deemphasis_cap: a bitfield to indicate supported DeEmphasis dBs of device's TX lanes,
|
||||
* cache of device M-PHY TX_HS_DeEmphasis_Setting_Capability Attribute (ID 0x12)
|
||||
* @tx_eq_params: TX Equalization settings
|
||||
*/
|
||||
struct ufs_hba {
|
||||
void __iomem *mmio_base;
|
||||
@@ -1035,7 +1169,6 @@ struct ufs_hba {
|
||||
u32 saved_uic_err;
|
||||
struct ufs_stats ufs_stats;
|
||||
bool force_reset;
|
||||
bool force_pmc;
|
||||
bool silence_err_logs;
|
||||
|
||||
/* Device management request data */
|
||||
@@ -1116,8 +1249,18 @@ struct ufs_hba {
|
||||
int critical_health_count;
|
||||
atomic_t dev_lvl_exception_count;
|
||||
u64 dev_lvl_exception_id;
|
||||
|
||||
atomic_t dme_qos_notification;
|
||||
struct kernfs_node *dme_qos_sysfs_handle;
|
||||
|
||||
u32 vcc_off_delay_us;
|
||||
struct list_head rpmbs;
|
||||
|
||||
u8 host_preshoot_cap;
|
||||
u8 host_deemphasis_cap;
|
||||
u8 device_preshoot_cap;
|
||||
u8 device_deemphasis_cap;
|
||||
struct ufshcd_tx_eq_params tx_eq_params[UFS_HS_GEAR_MAX];
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -1262,6 +1405,13 @@ static inline bool ufshcd_enable_wb_if_scaling_up(struct ufs_hba *hba)
|
||||
return hba->caps & UFSHCD_CAP_WB_WITH_CLK_SCALING;
|
||||
}
|
||||
|
||||
static inline bool ufshcd_is_tx_eq_supported(struct ufs_hba *hba)
|
||||
{
|
||||
return hba->caps & UFSHCD_CAP_TX_EQUALIZATION &&
|
||||
hba->ufs_version >= ufshci_version(5, 0) &&
|
||||
hba->dev_info.wspecversion >= 0x500;
|
||||
}
|
||||
|
||||
#define ufsmcq_writel(hba, val, reg) \
|
||||
writel((val), (hba)->mcq_base + (reg))
|
||||
#define ufsmcq_readl(hba, reg) \
|
||||
@@ -1277,6 +1427,18 @@ static inline bool ufshcd_enable_wb_if_scaling_up(struct ufs_hba *hba)
|
||||
#define ufshcd_readl(hba, reg) \
|
||||
readl((hba)->mmio_base + (reg))
|
||||
|
||||
static inline const char *ufs_hs_rate_to_str(enum ufs_hs_gear_rate rate)
|
||||
{
|
||||
switch (rate) {
|
||||
case PA_HS_MODE_A:
|
||||
return "A";
|
||||
case PA_HS_MODE_B:
|
||||
return "B";
|
||||
default:
|
||||
return "Unknown";
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ufshcd_rmwl - perform read/modify/write for a controller register
|
||||
* @hba: per adapter instance
|
||||
@@ -1361,9 +1523,16 @@ extern int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
|
||||
u8 attr_set, u32 mib_val, u8 peer);
|
||||
extern int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
|
||||
u32 *mib_val, u8 peer);
|
||||
extern int ufshcd_change_power_mode(struct ufs_hba *hba,
|
||||
struct ufs_pa_layer_attr *pwr_mode,
|
||||
enum ufshcd_pmc_policy pmc_policy);
|
||||
extern int ufshcd_config_pwr_mode(struct ufs_hba *hba,
|
||||
struct ufs_pa_layer_attr *desired_pwr_mode);
|
||||
struct ufs_pa_layer_attr *desired_pwr_mode,
|
||||
enum ufshcd_pmc_policy pmc_policy);
|
||||
extern int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode);
|
||||
extern int ufshcd_apply_tx_eq_settings(struct ufs_hba *hba,
|
||||
struct ufshcd_tx_eq_params *params,
|
||||
u32 gear);
|
||||
|
||||
/* UIC command interfaces for DME primitives */
|
||||
#define DME_LOCAL 0
|
||||
|
||||
@@ -115,6 +115,7 @@ enum {
|
||||
enum {
|
||||
REG_CQIS = 0x0,
|
||||
REG_CQIE = 0x4,
|
||||
REG_MCQIACR = 0x8,
|
||||
};
|
||||
|
||||
enum {
|
||||
@@ -188,6 +189,7 @@ static inline u32 ufshci_version(u32 major, u32 minor)
|
||||
#define SYSTEM_BUS_FATAL_ERROR 0x20000
|
||||
#define CRYPTO_ENGINE_FATAL_ERROR 0x40000
|
||||
#define MCQ_CQ_EVENT_STATUS 0x100000
|
||||
#define MCQ_IAG_EVENT_STATUS 0x200000
|
||||
|
||||
#define UFSHCD_UIC_HIBERN8_MASK (UIC_HIBERNATE_ENTER |\
|
||||
UIC_HIBERNATE_EXIT)
|
||||
@@ -271,6 +273,7 @@ enum {
|
||||
/* UECDME - Host UIC Error Code DME 48h */
|
||||
#define UIC_DME_ERROR 0x80000000
|
||||
#define UIC_DME_ERROR_CODE_MASK 0x1
|
||||
#define UIC_DME_QOS_MASK 0xE
|
||||
|
||||
/* UTRIACR - Interrupt Aggregation control register - 0x4Ch */
|
||||
#define INT_AGGR_TIMEOUT_VAL_MASK 0xFF
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user