[PATCH 10/23] net: mana: Define max values for SGL entries
authorLong Li <longli@microsoft.com>
Thu, 3 Nov 2022 19:16:26 +0000 (12:16 -0700)
committerSalvatore Bonaccorso <carnil@debian.org>
Fri, 29 Sep 2023 04:15:31 +0000 (05:15 +0100)
The number of maximum SGl entries should be computed from the maximum
WQE size for the intended queue type and the corresponding OOB data
size. This guarantees the hardware queue can successfully queue requests
up to the queue depth exposed to the upper layer.

Reviewed-by: Dexuan Cui <decui@microsoft.com>
Signed-off-by: Long Li <longli@microsoft.com>
Link: https://lore.kernel.org/r/1667502990-2559-9-git-send-email-longli@linuxonhyperv.com
Acked-by: Haiyang Zhang <haiyangz@microsoft.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
(cherry picked from commit aa56549792fb348892fbbae67f6f0c71bb750b65)
Signed-off-by: Bastian Blank <waldi@debian.org>
Gbp-Pq: Topic features/all/ethernet-microsoft
Gbp-Pq: Name 0010-net-mana-Define-max-values-for-SGL-entries.patch

drivers/net/ethernet/microsoft/mana/mana_en.c
include/net/mana/gdma.h
include/net/mana/mana.h

index c3d75eab73659f3068f61861b9d51f6814b7c918..288030a0d46debd7c3eb03b098e4094b2dd00a30 100644 (file)
@@ -190,7 +190,7 @@ int mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        pkg.wqe_req.client_data_unit = 0;
 
        pkg.wqe_req.num_sge = 1 + skb_shinfo(skb)->nr_frags;
-       WARN_ON_ONCE(pkg.wqe_req.num_sge > 30);
+       WARN_ON_ONCE(pkg.wqe_req.num_sge > MAX_TX_WQE_SGL_ENTRIES);
 
        if (pkg.wqe_req.num_sge <= ARRAY_SIZE(pkg.sgl_array)) {
                pkg.wqe_req.sgl = pkg.sgl_array;
index 86d8a9e36005bf268c115ac153602d1378559dca..11fc1cc67c0104402d3315b93358667788e16ad4 100644 (file)
@@ -431,6 +431,13 @@ struct gdma_wqe {
 #define MAX_TX_WQE_SIZE 512
 #define MAX_RX_WQE_SIZE 256
 
+#define MAX_TX_WQE_SGL_ENTRIES ((GDMA_MAX_SQE_SIZE -                      \
+                       sizeof(struct gdma_sge) - INLINE_OOB_SMALL_SIZE) / \
+                       sizeof(struct gdma_sge))
+
+#define MAX_RX_WQE_SGL_ENTRIES ((GDMA_MAX_RQE_SIZE -                      \
+                       sizeof(struct gdma_sge)) / sizeof(struct gdma_sge))
+
 struct gdma_cqe {
        u32 cqe_data[GDMA_COMP_DATA_SIZE / 4];
 
index 6e9e86fb4c026cd4b18dcd1fd996b38ccdf50e3b..713a8f8cca9a7be0567ab052da7d179900c0a480 100644 (file)
@@ -265,8 +265,6 @@ struct mana_cq {
        int budget;
 };
 
-#define GDMA_MAX_RQE_SGES 15
-
 struct mana_recv_buf_oob {
        /* A valid GDMA work request representing the data buffer. */
        struct gdma_wqe_request wqe_req;
@@ -276,7 +274,7 @@ struct mana_recv_buf_oob {
 
        /* SGL of the buffer going to be sent has part of the work request. */
        u32 num_sge;
-       struct gdma_sge sgl[GDMA_MAX_RQE_SGES];
+       struct gdma_sge sgl[MAX_RX_WQE_SGL_ENTRIES];
 
        /* Required to store the result of mana_gd_post_work_request.
         * gdma_posted_wqe_info.wqe_size_in_bu is required for progressing the