net: thunderx: rework xcast message structure to make it fit into 64 bit [Linux 5.0]

This Linux kernel change "net: thunderx: rework xcast message structure to make it fit into 64 bit" is included in the Linux 5.0 release. This change is authored by Vadim Lomovtsev <vlomovtsev [at] marvell.com> on Wed Feb 20 11:02:44 2019 +0000. The commit for this change in Linux stable tree is 5354439 (patch).

net: thunderx: rework xcast message structure to make it fit into 64 bit

To communicate to PF each of ThunderX NIC VF uses mailbox which is
pair of 64 bit registers available to both VFn and PF.

This commit is to change the xcast message structure in order to
fit it into 64 bit.

Signed-off-by: Vadim Lomovtsev <[email protected]>
Signed-off-by: David S. Miller <[email protected]>

There are 16 lines of Linux source code added/deleted in this change. Code changes to Linux kernel are as follows.

 drivers/net/ethernet/cavium/thunder/nic.h        | 6 ++----
 drivers/net/ethernet/cavium/thunder/nic_main.c   | 4 ++--
 drivers/net/ethernet/cavium/thunder/nicvf_main.c | 6 +++---
 3 files changed, 7 insertions(+), 9 deletions(-)

diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index 376a96b..2273436 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -577,10 +577,8 @@ struct set_ptp {

 struct xcast {
    u8    msg;
-   union {
-       u8    mode;
-       u64   mac;
-   } data;
+   u8    mode;
+   u64   mac:48;
 };

 /* 128 bit shared memory between PF and each VF */
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index 90497a2..620dbe0 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -1094,7 +1094,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
        bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
        lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
        bgx_set_dmac_cam_filter(nic->node, bgx, lmac,
-                   mbx.xcast.data.mac,
+                   mbx.xcast.mac,
                    vf < NIC_VF_PER_MBX_REG ? vf :
                    vf - NIC_VF_PER_MBX_REG);
        break;
@@ -1106,7 +1106,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
        }
        bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
        lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
-       bgx_set_xcast_mode(nic->node, bgx, lmac, mbx.xcast.data.mode);
+       bgx_set_xcast_mode(nic->node, bgx, lmac, mbx.xcast.mode);
        break;
    default:
        dev_err(&nic->pdev->dev,
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 45f0650..da5986c 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -1961,7 +1961,7 @@ static void __nicvf_set_rx_mode_task(u8 mode, struct xcast_addr_list *mc_addrs,
         * its' own LMAC to the filter to accept packets for it.
         */
        mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST;
-       mbx.xcast.data.mac = 0;
+       mbx.xcast.mac = 0;
        if (nicvf_send_msg_to_pf(nic, &mbx) < 0)
            goto free_mc;
    }
@@ -1971,7 +1971,7 @@ static void __nicvf_set_rx_mode_task(u8 mode, struct xcast_addr_list *mc_addrs,
        /* now go through kernel list of MACs and add them one by one */
        for (idx = 0; idx < mc_addrs->count; idx++) {
            mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST;
-           mbx.xcast.data.mac = mc_addrs->mc[idx];
+           mbx.xcast.mac = mc_addrs->mc[idx];
            if (nicvf_send_msg_to_pf(nic, &mbx) < 0)
                goto free_mc;
        }
@@ -1979,7 +1979,7 @@ static void __nicvf_set_rx_mode_task(u8 mode, struct xcast_addr_list *mc_addrs,

    /* and finally set rx mode for PF accordingly */
    mbx.xcast.msg = NIC_MBOX_MSG_SET_XCAST;
-   mbx.xcast.data.mode = mode;
+   mbx.xcast.mode = mode;

    nicvf_send_msg_to_pf(nic, &mbx);
 free_mc:

Leave a Reply

Your email address will not be published. Required fields are marked *