[PATCH 10/44] net: mana: Define max values for SGL entries
authorLong Li <longli@microsoft.com>
Thu, 3 Nov 2022 19:16:26 +0000 (12:16 -0700)
committerSalvatore Bonaccorso <carnil@debian.org>
Thu, 10 Apr 2025 19:32:42 +0000 (21:32 +0200)
The number of maximum SGl entries should be computed from the maximum
WQE size for the intended queue type and the corresponding OOB data
size. This guarantees the hardware queue can successfully queue requests
up to the queue depth exposed to the upper layer.

Reviewed-by: Dexuan Cui <decui@microsoft.com>
Signed-off-by: Long Li <longli@microsoft.com>
Link: https://lore.kernel.org/r/1667502990-2559-9-git-send-email-longli@linuxonhyperv.com
Acked-by: Haiyang Zhang <haiyangz@microsoft.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
(cherry picked from commit aa56549792fb348892fbbae67f6f0c71bb750b65)
Signed-off-by: Bastian Blank <waldi@debian.org>
Gbp-Pq: Topic features/all/ethernet-microsoft
Gbp-Pq: Name 0010-net-mana-Define-max-values-for-SGL-entries.patch

drivers/net/ethernet/microsoft/mana/mana_en.c
include/net/mana/gdma.h
include/net/mana/mana.h

index 220d44e55da929bebda1de5ec035e5e9658dcfab..65637aa333a45697b08c2ec43d1b077470d72c0c 100644 (file)
@@ -190,7 +190,7 @@ int mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        pkg.wqe_req.client_data_unit = 0;
 
        pkg.wqe_req.num_sge = 1 + skb_shinfo(skb)->nr_frags;
-       WARN_ON_ONCE(pkg.wqe_req.num_sge > 30);
+       WARN_ON_ONCE(pkg.wqe_req.num_sge > MAX_TX_WQE_SGL_ENTRIES);
 
        if (pkg.wqe_req.num_sge <= ARRAY_SIZE(pkg.sgl_array)) {
                pkg.wqe_req.sgl = pkg.sgl_array;
index 86d8a9e36005bf268c115ac153602d1378559dca..11fc1cc67c0104402d3315b93358667788e16ad4 100644 (file)
@@ -431,6 +431,13 @@ struct gdma_wqe {
 #define MAX_TX_WQE_SIZE 512
 #define MAX_RX_WQE_SIZE 256
 
+#define MAX_TX_WQE_SGL_ENTRIES ((GDMA_MAX_SQE_SIZE -                      \
+                       sizeof(struct gdma_sge) - INLINE_OOB_SMALL_SIZE) / \
+                       sizeof(struct gdma_sge))
+
+#define MAX_RX_WQE_SGL_ENTRIES ((GDMA_MAX_RQE_SIZE -                      \
+                       sizeof(struct gdma_sge)) / sizeof(struct gdma_sge))
+
 struct gdma_cqe {
        u32 cqe_data[GDMA_COMP_DATA_SIZE / 4];
 
index 1f6f502dee3b235e0bdf5c7cf0a52a99f6e434fe..a3baf498df67ead1b8b90026f3087cf7c45a9a18 100644 (file)
@@ -268,8 +268,6 @@ struct mana_cq {
        int budget;
 };
 
-#define GDMA_MAX_RQE_SGES 15
-
 struct mana_recv_buf_oob {
        /* A valid GDMA work request representing the data buffer. */
        struct gdma_wqe_request wqe_req;
@@ -279,7 +277,7 @@ struct mana_recv_buf_oob {
 
        /* SGL of the buffer going to be sent has part of the work request. */
        u32 num_sge;
-       struct gdma_sge sgl[GDMA_MAX_RQE_SGES];
+       struct gdma_sge sgl[MAX_RX_WQE_SGL_ENTRIES];
 
        /* Required to store the result of mana_gd_post_work_request.
         * gdma_posted_wqe_info.wqe_size_in_bu is required for progressing the