2026-01-29 22:25:33 +08:00

670 lines
17 KiB
C

// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2022 - 2024 Mucse Corporation. */
#include <linux/pci.h>
#include <linux/delay.h>
#include "rnpm.h"
#include "rnpm_type.h"
#include "rnpm_common.h"
#include "rnpm_mbx.h"
#include "rnpm_mbx_fw.h"
// #define MBX_RD_DEBUG
/* == VEC == */
#define VF2PF_MBOX_VEC(VF) (0xa5100 + 4 * (VF))
#define CPU2PF_MBOX_VEC (0xa5300)
/* == PF <--> VF mailbox ==== */
/* 64bytes */
#define SHARE_MEM_BYTES 64
/* for PF1 rtl will remap 6000 to 0xb000 */
#define PF_VF_SHM(vf) ((0xa6000) + (64 * (vf)))
#define PF2VF_COUNTER(vf) (PF_VF_SHM(vf) + 0)
#define VF2PF_COUNTER(vf) (PF_VF_SHM(vf) + 4)
#define PF_VF_SHM_DATA(vf) (PF_VF_SHM(vf) + 8)
#define PF2VF_MBOX_CTRL(vf) ((0xa7100) + (4 * (vf)))
#define PF_VF_MBOX_MASK_LO ((0xa7200))
#define PF_VF_MBOX_MASK_HI ((0xa7300))
//=== CPU <--> PF ===
#define CPU_PF_SHM (0xaa000)
#define CPU2PF_COUNTER (CPU_PF_SHM + 0)
#define PF2CPU_COUNTER (CPU_PF_SHM + 4)
#define CPU_PF_SHM_DATA (CPU_PF_SHM + 8)
#define PF2CPU_MBOX_CTRL (0xaa100)
#define CPU_PF_MBOX_MASK (0xaa300)
#define MBOX_CTRL_REQ (1 << 0) // WO
//#define MBOX_CTRL_VF_HOLD_SHM (1<<2) // VF:WR, PF:RO
#define MBOX_CTRL_PF_HOLD_SHM (1 << 3) // VF:RO, PF:WR
//#define MBOX_CTRL_PF_CPU_HOLD_SHM (1<<3) // for pf <--> cpu
#define MBOX_IRQ_EN 0
#define MBOX_IRQ_DISABLE 1
#define mbx_prd32(hw, reg) prnp_rd_reg((hw)->hw_addr + (reg))
#define mbx_rd32(hw, reg) rnpm_rd_reg((hw)->hw_addr + (reg))
#define mbx_pwr32(hw, reg, val) p_rnp_wr_reg((hw)->hw_addr + (reg), (val))
#define mbx_wr32(hw, reg, val) rnpm_wr_reg((hw)->hw_addr + (reg), (val))
/**
* rnpm_read_mbx - Reads a message from the mailbox
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
* @mbx_id: id of mailbox/vfnum to read
*
* returns SUCCESS if it successfully read message from buffer
**/
s32 rnpm_read_mbx(struct rnpm_hw *hw, u32 *msg, u16 size, enum MBX_ID mbx_id)
{
struct rnpm_mbx_info *mbx = &hw->mbx;
s32 ret_val = RNPM_ERR_MBX;
/* limit read to size of mailbox */
if (size > mbx->size)
size = mbx->size;
if (mbx->ops.read)
ret_val = mbx->ops.read(hw, msg, size, mbx_id);
else
TRACE();
return ret_val;
}
/**
* rnpm_write_mbx - Write a message to the mailbox
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
* @mbx_id: id of mailbox to write
*
* returns SUCCESS if it successfully copied message into the buffer
**/
s32 rnpm_write_mbx(struct rnpm_hw *hw, u32 *msg, u16 size, enum MBX_ID mbx_id)
{
struct rnpm_mbx_info *mbx = &hw->mbx;
s32 ret_val = 0;
if (size > mbx->size)
ret_val = RNPM_ERR_MBX;
else if (mbx->ops.write)
ret_val = mbx->ops.write(hw, msg, size, mbx_id);
return ret_val;
}
static inline u16 rnpm_mbx_get_req(struct rnpm_hw *hw, int reg)
{
/* memory barrior */
mb();
return mbx_rd32(hw, reg) & 0xffff;
}
static inline u16 rnpm_mbx_get_ack(struct rnpm_hw *hw, int reg)
{
/* memory barrior */
mb();
return (mbx_rd32(hw, reg) >> 16) & 0xffff;
}
static inline void rnpm_mbx_inc_pf_req(struct rnpm_hw *hw, enum MBX_ID mbx_id)
{
u16 req;
int reg =
(mbx_id == MBX_CM3CPU) ? PF2CPU_COUNTER : PF2VF_COUNTER(mbx_id);
u32 v = mbx_rd32(hw, reg);
req = (v & 0xffff);
req++;
v &= ~(0x0000ffff);
v |= req;
/* memory barrior */
mb();
mbx_wr32(hw, reg, v);
/* update stats */
hw->mbx.stats.msgs_tx++;
}
static inline void rnpm_mbx_inc_pf_ack(struct rnpm_hw *hw, enum MBX_ID mbx_id)
{
u16 ack;
int reg =
(mbx_id == MBX_CM3CPU) ? PF2CPU_COUNTER : PF2VF_COUNTER(mbx_id);
u32 v = mbx_rd32(hw, reg);
ack = (v >> 16) & 0xffff;
ack++;
v &= ~(0xffff0000);
v |= (ack << 16);
/* memory barrior */
mb();
mbx_wr32(hw, reg, v);
/* update stats */
hw->mbx.stats.msgs_rx++;
}
/**
* rnpm_check_for_msg - checks to see if someone sent us mail
* @hw: pointer to the HW structure
* @mbx_id: id of mailbox to check
*
* returns SUCCESS if the Status bit was found or else ERR_MBX
**/
s32 rnpm_check_for_msg(struct rnpm_hw *hw, enum MBX_ID mbx_id)
{
struct rnpm_mbx_info *mbx = &hw->mbx;
s32 ret_val = RNPM_ERR_MBX;
if (mbx->ops.check_for_msg)
ret_val = mbx->ops.check_for_msg(hw, mbx_id);
return ret_val;
}
/**
* rnpm_check_for_ack - checks to see if someone sent us ACK
* @hw: pointer to the HW structure
* @mbx_id: id of mailbox to check
*
* returns SUCCESS if the Status bit was found or else ERR_MBX
**/
s32 rnpm_check_for_ack(struct rnpm_hw *hw, enum MBX_ID mbx_id)
{
struct rnpm_mbx_info *mbx = &hw->mbx;
s32 ret_val = RNPM_ERR_MBX;
if (mbx->ops.check_for_ack)
ret_val = mbx->ops.check_for_ack(hw, mbx_id);
return ret_val;
}
/**
* rnpm_poll_for_msg - Wait for message notification
* @hw: pointer to the HW structure
* @mbx_id: id of mailbox to write
*
* returns SUCCESS if it successfully received a message notification
**/
static s32 rnpm_poll_for_msg(struct rnpm_hw *hw, enum MBX_ID mbx_id)
{
struct rnpm_mbx_info *mbx = &hw->mbx;
int countdown = mbx->timeout;
if (!countdown || !mbx->ops.check_for_msg)
goto out;
while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) {
countdown--;
if (!countdown)
break;
udelay(mbx->usec_delay);
}
out:
return countdown ? 0 : -ETIME;
}
/**
* rnpm_poll_for_ack - Wait for message acknowledgment
* @hw: pointer to the HW structure
* @mbx_id: id of mailbox to write
*
* returns SUCCESS if it successfully received a message acknowledgment
**/
static s32 rnpm_poll_for_ack(struct rnpm_hw *hw, enum MBX_ID mbx_id)
{
struct rnpm_mbx_info *mbx = &hw->mbx;
int countdown = mbx->timeout;
if (!countdown || !mbx->ops.check_for_ack)
goto out;
while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) {
countdown--;
if (!countdown)
break;
udelay(mbx->usec_delay);
}
out:
return countdown ? 0 : -ETIME;
}
/**
* rnpm_read_posted_mbx - Wait for message notification and receive message
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
* @mbx_id: id of mailbox to write
*
* returns SUCCESS if it successfully received a message notification and
* copied it into the receive buffer.
**/
static s32 rnpm_read_posted_mbx(struct rnpm_hw *hw, u32 *msg, u16 size,
enum MBX_ID mbx_id)
{
struct rnpm_mbx_info *mbx = &hw->mbx;
s32 ret_val = RNPM_ERR_MBX;
if (!mbx->ops.read)
goto out;
ret_val = rnpm_poll_for_msg(hw, mbx_id);
/* if ack received read message, otherwise we timed out */
if (!ret_val)
ret_val = mbx->ops.read(hw, msg, size, mbx_id);
out:
return ret_val;
}
/**
* rnpm_write_posted_mbx - Write a message to the mailbox, wait for ack
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
* @mbx_id: id of mailbox to write
*
* returns SUCCESS if it successfully copied message into the buffer and
* received an ack to that message within delay * timeout period
**/
static s32 rnpm_write_posted_mbx(struct rnpm_hw *hw, u32 *msg, u16 size,
enum MBX_ID mbx_id)
{
struct rnpm_mbx_info *mbx = &hw->mbx;
s32 ret_val = RNPM_ERR_MBX;
/* exit if either we can't write or there isn't a defined timeout */
if (!mbx->ops.write || !mbx->timeout)
goto out;
/* send msg and hold buffer lock */
ret_val = mbx->ops.write(hw, msg, size, mbx_id);
/* if msg sent wait until we receive an ack */
if (!ret_val)
ret_val = rnpm_poll_for_ack(hw, mbx_id);
out:
return ret_val;
}
/**
* rnpm_check_for_msg_pf - checks to see if the VF has sent mail
* @hw: pointer to the HW structure
* @vf_number: the VF index
*
* returns SUCCESS if the VF has set the Status bit or else ERR_MBX
**/
static s32 rnpm_check_for_msg_pf(struct rnpm_hw *hw, enum MBX_ID mbx_id)
{
s32 ret_val = RNPM_ERR_MBX;
if (mbx_id == MBX_CM3CPU) {
if (rnpm_mbx_get_req(hw, CPU2PF_COUNTER) != hw->mbx.cpu_req) {
ret_val = 0;
hw->mbx.stats.reqs++;
}
} else {
if (rnpm_mbx_get_req(hw, VF2PF_COUNTER(mbx_id)) !=
hw->mbx.vf_req[mbx_id]) {
ret_val = 0;
hw->mbx.stats.reqs++;
}
}
return ret_val;
}
/**
* rnpm_check_for_ack_pf - checks to see if the VF has ACKed
* @hw: pointer to the HW structure
* @vf_number: the VF index
*
* returns SUCCESS if the VF has set the Status bit or else ERR_MBX
**/
static s32 rnpm_check_for_ack_pf(struct rnpm_hw *hw, enum MBX_ID mbx_id)
{
s32 ret_val = RNPM_ERR_MBX;
if (mbx_id == MBX_CM3CPU) {
if (rnpm_mbx_get_ack(hw, CPU2PF_COUNTER) != hw->mbx.cpu_ack) {
ret_val = 0;
hw->mbx.stats.acks++;
}
} else {
if (rnpm_mbx_get_ack(hw, VF2PF_COUNTER(mbx_id)) !=
hw->mbx.vf_ack[mbx_id]) {
ret_val = 0;
hw->mbx.stats.acks++;
}
}
return ret_val;
}
/**
* rnpm_obtain_mbx_lock_pf - obtain mailbox lock
* @hw: pointer to the HW structure
* @mbx_id: the VF index or CPU
*
* return SUCCESS if we obtained the mailbox lock
**/
static s32 rnpm_obtain_mbx_lock_pf(struct rnpm_hw *hw, enum MBX_ID mbx_id)
{
int try_cnt = 5000;
s32 ret_val = -EPERM;
u32 CTRL_REG = (mbx_id == MBX_CM3CPU) ? PF2CPU_MBOX_CTRL :
PF2VF_MBOX_CTRL(mbx_id);
while (try_cnt-- > 0) {
/* Take ownership of the buffer */
mbx_wr32(hw, CTRL_REG, MBOX_CTRL_PF_HOLD_SHM);
/* memory barrior */
mb();
/* reserve mailbox for cm3 use */
if (mbx_rd32(hw, CTRL_REG) & MBOX_CTRL_PF_HOLD_SHM)
return 0;
udelay(100);
}
rnpm_err("%s: failed to get:%d lock\n", __func__, mbx_id);
return ret_val;
}
/**
* rnpm_write_mbx_pf - Places a message in the mailbox
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
* @mbx_id: the VF index
*
* returns SUCCESS if it successfully copied message into the buffer
**/
static s32 rnpm_write_mbx_pf(struct rnpm_hw *hw, u32 *msg, u16 size,
enum MBX_ID mbx_id)
{
s32 ret_val = 0;
u16 i;
u32 DATA_REG = (mbx_id == MBX_CM3CPU) ? CPU_PF_SHM_DATA :
PF_VF_SHM_DATA(mbx_id);
u32 CTRL_REG = (mbx_id == MBX_CM3CPU) ? PF2CPU_MBOX_CTRL :
PF2VF_MBOX_CTRL(mbx_id);
u32 wait_msg_free_cnt = 4;
if (size > RNPM_VFMAILBOX_SIZE) {
rnpm_info("%s: size:%d should <%d\n", __func__, size,
RNPM_VFMAILBOX_SIZE);
return -EINVAL;
}
if (rnpm_logd_level(LOG_MBX_OUT)) {
rnpm_logd(LOG_MBX_OUT, "%x mbx_out:", hw->pfvfnum);
for (i = 0; i < 4; i++)
rnpm_logd(LOG_MBX_OUT, " 0x%08x ", msg[i]);
rnpm_logd(LOG_MBX_OUT, "\n");
}
retry:
wait_msg_free_cnt--;
if (wait_msg_free_cnt > 0 && mbx_rd32(hw, DATA_REG) != 0) {
udelay(1000);
// unlock
goto retry;
}
/* lock the mailbox to prevent pf/vf/cpu race condition */
ret_val = rnpm_obtain_mbx_lock_pf(hw, mbx_id);
if (ret_val) {
rnpm_err(
"%s: get mbx:%d wlock failed. ret:%d. req:0x%08x-0x%08x\n",
__func__, mbx_id, ret_val, msg[0], msg[1]);
goto out_no_write;
}
/* copy the caller specified message to the mailbox memory buffer */
for (i = 0; i < size; i++) {
#ifdef MBX_WR_DEBUG
mbx_pwr32(hw, DATA_REG + i * 4, msg[i]);
#else
mbx_wr32(hw, DATA_REG + i * 4, msg[i]);
#endif
}
/* flush msg and acks as we are overwriting the message buffer */
if (mbx_id == MBX_CM3CPU) {
hw->mbx.cpu_ack = rnpm_mbx_get_ack(hw, CPU2PF_COUNTER);
} else {
hw->mbx.vf_ack[mbx_id] =
rnpm_mbx_get_ack(hw, VF2PF_COUNTER(mbx_id));
}
rnpm_mbx_inc_pf_req(hw, mbx_id);
/* Interrupt VF/CM3 to tell it a message has
* been sent and release buffer
*/
udelay(30);
mbx_wr32(hw, CTRL_REG, MBOX_CTRL_REQ);
out_no_write:
/* sometimes happen */
/*printk("cannot get lock\n"); */
return ret_val;
}
/**
* rnpm_read_mbx_pf - Read a message from the mailbox
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
* @vf_number: the VF index
*
* This function copies a message from the mailbox buffer to the caller's
* memory buffer. The presumption is that the caller knows that there was
* a message due to a VF/CPU request so no polling for message is needed.
**/
static s32 rnpm_read_mbx_pf(struct rnpm_hw *hw, u32 *msg, u16 size,
enum MBX_ID mbx_id)
{
s32 ret_val = -EIO;
u32 i;
u32 BUF_REG = (mbx_id == MBX_CM3CPU) ? CPU_PF_SHM_DATA :
PF_VF_SHM_DATA(mbx_id);
u32 CTRL_REG = (mbx_id == MBX_CM3CPU) ? PF2CPU_MBOX_CTRL :
PF2VF_MBOX_CTRL(mbx_id);
if (size > RNPM_VFMAILBOX_SIZE) {
rnpm_info("%s: size:%d should <%d\n", __func__, size,
RNPM_VFMAILBOX_SIZE);
return -EINVAL;
}
/* lock the mailbox to prevent pf/vf race condition */
ret_val = rnpm_obtain_mbx_lock_pf(hw, mbx_id);
if (ret_val) {
ret_val = -EPERM;
goto out_no_read;
}
/* memory barrior */
mb();
/* copy the message from the mailbox memory buffer */
for (i = 0; i < size; i++) {
#ifdef MBX_RD_DEBUG
msg[i] = mbx_prd32(hw, BUF_REG + 4 * i);
#else
msg[i] = mbx_rd32(hw, BUF_REG + 4 * i);
#endif
}
// zero opcode
mbx_wr32(hw, BUF_REG, 0);
/* update req. used by rnpmvf_check_for_msg_vf */
if (mbx_id == MBX_CM3CPU) {
hw->mbx.cpu_req = rnpm_mbx_get_req(hw, CPU2PF_COUNTER);
} else {
hw->mbx.vf_req[mbx_id] =
rnpm_mbx_get_req(hw, VF2PF_COUNTER(mbx_id));
}
/* this ack maybe too earier? */
/* Acknowledge receipt and release mailbox, then we're done */
rnpm_mbx_inc_pf_ack(hw, mbx_id);
/* free ownership of the buffer */
mbx_wr32(hw, CTRL_REG, 0);
if (rnpm_logd_level(LOG_MBX_IN)) {
rnpm_logd(LOG_MBX_IN, "%x mbx_in :", hw->pfvfnum);
for (i = 0; i < 16; i++)
rnpm_logd(LOG_MBX_IN, "0x%08x ", msg[i]);
rnpm_logd(LOG_MBX_IN, "\n");
}
out_no_read:
return ret_val;
}
static void rnpm_mbx_reset(struct rnpm_hw *hw)
{
int idx, v;
for (idx = 0; idx < RNP_MAX_VF_FUNCTIONS; idx++) {
v = mbx_rd32(hw, VF2PF_COUNTER(idx));
hw->mbx.vf_req[idx] = v & 0xffff;
hw->mbx.vf_ack[idx] = (v >> 16) & 0xffff;
// release pf<->vf pfu buffer lock
mbx_wr32(hw, PF2VF_MBOX_CTRL(idx), 0);
}
// reset pf->cm3 status
v = mbx_rd32(hw, CPU2PF_COUNTER);
hw->mbx.cpu_req = v & 0xffff;
hw->mbx.cpu_ack = (v >> 16) & 0xffff;
// release pf->cm3 buffer lock
mbx_wr32(hw, PF2CPU_MBOX_CTRL, 0);
wr32(hw, PF_VF_MBOX_MASK_LO, 0); // allow vf to vectors
wr32(hw, PF_VF_MBOX_MASK_HI, 0); // enable irq
// allow CM3CPU to PF MBX IRQ
wr32(hw, CPU_PF_MBOX_MASK, 0);
}
static int rnpm_mbx_configure_pf(struct rnpm_hw *hw, int nr_vec, bool enable)
{
int idx = 0;
u32 v;
// dump_stack();
if (enable) {
// hw->mbx.irq_enabled = true;
for (idx = 0; idx < RNPM_MAX_VF_FUNCTIONS; idx++) {
v = mbx_rd32(hw, VF2PF_COUNTER(idx));
hw->mbx.vf_req[idx] = v & 0xffff;
hw->mbx.vf_ack[idx] = (v >> 16) & 0xffff;
/* release pf<->vf pfu buffer lock */
mbx_wr32(hw, PF2VF_MBOX_CTRL(idx), 0);
}
/* reset pf->cm3 status */
v = mbx_rd32(hw, CPU2PF_COUNTER);
hw->mbx.cpu_req = v & 0xffff;
hw->mbx.cpu_ack = (v >> 16) & 0xffff;
/* release pf->cm3 buffer lock */
mbx_wr32(hw, PF2CPU_MBOX_CTRL, 0);
/* allow VF to PF MBX IRQ */
for (idx = 0; idx < RNPM_MAX_VF_FUNCTIONS; idx++) {
/* vf to pf req interrupt */
mbx_wr32(hw, VF2PF_MBOX_VEC(idx), nr_vec);
}
wr32(hw, PF_VF_MBOX_MASK_LO, 0); // allow vf to vectors
wr32(hw, PF_VF_MBOX_MASK_HI, 0); // enable irq
// bind cm3cpu mbx to irq
// cm3 and VF63 share #63 irq
wr32(hw, CPU2PF_MBOX_VEC, nr_vec);
// allow CM3CPU to PF MBX IRQ
wr32(hw, CPU_PF_MBOX_MASK, 0);
} else {
// hw->mbx.irq_enabled = false;
wr32(hw, PF_VF_MBOX_MASK_LO, 0xffffffff); // disable irq
wr32(hw, PF_VF_MBOX_MASK_HI, 0xffffffff); // disable irq
// disable CM3CPU to PF MBX IRQ
wr32(hw, CPU_PF_MBOX_MASK, 0xffffffff);
// reset vf->pf status/ctrl
for (idx = 0; idx < RNPM_MAX_VF_FUNCTIONS; idx++)
mbx_wr32(hw, PF2VF_MBOX_CTRL(idx), 0);
// reset pf->cm3 ctrl
mbx_wr32(hw, PF2CPU_MBOX_CTRL, 0);
wr32(hw, RNPM_DMA_DUMY, 0);
}
return 0;
}
/**
* rnpm_init_mbx_params_pf - set initial values for pf mailbox
* @hw: pointer to the HW structure
*
* Initializes the hw->mbx struct to correct values for pf mailbox
*/
s32 rnpm_init_mbx_params_pf(struct rnpm_hw *hw)
{
struct rnpm_mbx_info *mbx = &hw->mbx;
mbx->usec_delay = 100;
// wait 5s
mbx->timeout = (4 * 1000 * 1000) / mbx->usec_delay;
mbx->stats.msgs_tx = 0;
mbx->stats.msgs_rx = 0;
mbx->stats.reqs = 0;
mbx->stats.acks = 0;
mbx->stats.rsts = 0;
mbx->size = RNPM_VFMAILBOX_SIZE;
mbx->reply_dma_size = 4096;
mbx->reply_dma = dma_alloc_coherent(&hw->pdev->dev, mbx->reply_dma_size,
&mbx->reply_dma_phy, GFP_ATOMIC);
if (!mbx->reply_dma) {
mbx->reply_dma =
dma_alloc_coherent(&hw->pdev->dev, mbx->reply_dma_size,
&mbx->reply_dma_phy, GFP_ATOMIC);
if (!mbx->reply_dma) {
rnpm_err("%s: dma_alloc_coherent failed! %p\n", __func__,
mbx->reply_dma);
mbx->reply_dma = NULL;
mbx->reply_dma_size = 0;
}
}
rnpm_mbx_reset(hw);
return 0;
}
struct rnpm_mbx_operations mbx_ops_generic = {
.init_params = rnpm_init_mbx_params_pf,
.read = rnpm_read_mbx_pf,
.write = rnpm_write_mbx_pf,
.read_posted = rnpm_read_posted_mbx,
.write_posted = rnpm_write_posted_mbx,
.check_for_msg = rnpm_check_for_msg_pf,
.check_for_ack = rnpm_check_for_ack_pf,
.configure = rnpm_mbx_configure_pf,
};