提交 b228e58a 编写于 作者: D Denis Bolotin 提交者: Greg Kroah-Hartman

qed: Fix SPQ entries not returned to pool in error flows

[ Upstream commit fb5e7438e7a3c8966e04ccb0760170e9e06f3699 ]

qed_sp_destroy_request() API was added for SPQ users that need to
free/return the entry they acquired in their error flows.
Signed-off-by: NDenis Bolotin <denis.bolotin@cavium.com>
Signed-off-by: NMichal Kalderon <michal.kalderon@cavium.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
Signed-off-by: NSasha Levin <sashal@kernel.org>
上级 b2e5004e
......@@ -147,7 +147,8 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
"Cannot satisfy CQ amount. CQs requested %d, CQs available %d. Aborting function start\n",
fcoe_pf_params->num_cqs,
p_hwfn->hw_info.feat_num[QED_FCOE_CQ]);
return -EINVAL;
rc = -EINVAL;
goto err;
}
p_data->mtu = cpu_to_le16(fcoe_pf_params->mtu);
......@@ -156,14 +157,14 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_FCOE, &dummy_cid);
if (rc)
return rc;
goto err;
cxt_info.iid = dummy_cid;
rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
if (rc) {
DP_NOTICE(p_hwfn, "Cannot find context info for dummy cid=%d\n",
dummy_cid);
return rc;
goto err;
}
p_cxt = cxt_info.p_cxt;
SET_FIELD(p_cxt->tstorm_ag_context.flags3,
......@@ -240,6 +241,10 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
rc = qed_spq_post(p_hwfn, p_ent, NULL);
return rc;
err:
qed_sp_destroy_request(p_hwfn, p_ent);
return rc;
}
static int
......
......@@ -200,6 +200,7 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
"Cannot satisfy CQ amount. Queues requested %d, CQs available %d. Aborting function start\n",
p_params->num_queues,
p_hwfn->hw_info.feat_num[QED_ISCSI_CQ]);
qed_sp_destroy_request(p_hwfn, p_ent);
return -EINVAL;
}
......
......@@ -740,8 +740,7 @@ int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params);
if (rc) {
/* Return spq entry which is taken in qed_sp_init_request()*/
qed_spq_return_entry(p_hwfn, p_ent);
qed_sp_destroy_request(p_hwfn, p_ent);
return rc;
}
......@@ -1355,6 +1354,7 @@ qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
DP_NOTICE(p_hwfn,
"%d is not supported yet\n",
p_filter_cmd->opcode);
qed_sp_destroy_request(p_hwfn, *pp_ent);
return -EINVAL;
}
......@@ -2056,13 +2056,13 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
} else {
rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
if (rc)
return rc;
goto err;
if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) {
rc = qed_fw_l2_queue(p_hwfn, p_params->qid,
&abs_rx_q_id);
if (rc)
return rc;
goto err;
p_ramrod->rx_qid_valid = 1;
p_ramrod->rx_qid = cpu_to_le16(abs_rx_q_id);
......@@ -2083,6 +2083,10 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
(u64)p_params->addr, p_params->length);
return qed_spq_post(p_hwfn, p_ent, NULL);
err:
qed_sp_destroy_request(p_hwfn, p_ent);
return rc;
}
int qed_get_rxq_coalesce(struct qed_hwfn *p_hwfn,
......
......@@ -1514,6 +1514,7 @@ qed_rdma_register_tid(void *rdma_cxt,
default:
rc = -EINVAL;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
qed_sp_destroy_request(p_hwfn, p_ent);
return rc;
}
SET_FIELD(p_ramrod->flags1,
......
......@@ -745,6 +745,7 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
DP_NOTICE(p_hwfn,
"qed destroy responder failed: cannot allocate memory (ramrod). rc = %d\n",
rc);
qed_sp_destroy_request(p_hwfn, p_ent);
return rc;
}
......
......@@ -399,6 +399,17 @@ struct qed_sp_init_data {
struct qed_spq_comp_cb *p_comp_data;
};
/**
* @brief Returns a SPQ entry to the pool / frees the entry if allocated.
* Should be called on in error flows after initializing the SPQ entry
* and before posting it.
*
* @param p_hwfn
* @param p_ent
*/
void qed_sp_destroy_request(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent);
int qed_sp_init_request(struct qed_hwfn *p_hwfn,
struct qed_spq_entry **pp_ent,
u8 cmd,
......
......@@ -47,6 +47,19 @@
#include "qed_sp.h"
#include "qed_sriov.h"
void qed_sp_destroy_request(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent)
{
/* qed_spq_get_entry() can either get an entry from the free_pool,
* or, if no entries are left, allocate a new entry and add it to
* the unlimited_pending list.
*/
if (p_ent->queue == &p_hwfn->p_spq->unlimited_pending)
kfree(p_ent);
else
qed_spq_return_entry(p_hwfn, p_ent);
}
int qed_sp_init_request(struct qed_hwfn *p_hwfn,
struct qed_spq_entry **pp_ent,
u8 cmd, u8 protocol, struct qed_sp_init_data *p_data)
......@@ -111,14 +124,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
return 0;
err:
/* qed_spq_get_entry() can either get an entry from the free_pool,
* or, if no entries are left, allocate a new entry and add it to
* the unlimited_pending list.
*/
if (p_ent->queue == &p_hwfn->p_spq->unlimited_pending)
kfree(p_ent);
else
qed_spq_return_entry(p_hwfn, p_ent);
qed_sp_destroy_request(p_hwfn, p_ent);
return -EINVAL;
}
......
......@@ -101,6 +101,7 @@ static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf)
default:
DP_NOTICE(p_hwfn, "Unknown VF personality %d\n",
p_hwfn->hw_info.personality);
qed_sp_destroy_request(p_hwfn, p_ent);
return -EINVAL;
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册