nvidia-gpu: add power sensor

This patch adds support to fetch power sensor value from gpu

Tested: Build an image for gb200nvl-obmc machine with the following
patches cherry picked. This patches are needed to enable the mctp stack.

https://gerrit.openbmc.org/c/openbmc/openbmc/+/79422

```
$ curl -s -k -u 'root:0penBmc' https://10.137.203.137/redfish/v1/Chassis/NVIDIA_GB200_1/Sensors/power_NVIDIA_GB200_GPU_0_Power_0
{
  "@odata.id": "/redfish/v1/Chassis/NVIDIA_GB200_1/Sensors/power_NVIDIA_GB200_GPU_0_Power_0",
  "@odata.type": "#Sensor.v1_2_0.Sensor",
  "Id": "power_NVIDIA_GB200_GPU_0_Power_0",
  "Name": "NVIDIA GB200 GPU 0 Power 0",
  "Reading": 27.181,
  "ReadingRangeMax": 4294967.295,
  "ReadingRangeMin": 0.0,
  "ReadingType": "Power",
  "ReadingUnits": "W",
  "Status": {
    "Health": "OK",
    "State": "Enabled"
  }
}%
```

Change-Id: Ic227a0056daa68ab2239a609ed20c7ed2f6bd2c5
Signed-off-by: Harshit Aghera <haghera@nvidia.com>
diff --git a/src/nvidia-gpu/NvidiaGpuDevice.cpp b/src/nvidia-gpu/NvidiaGpuDevice.cpp
index 509a353..dc30788 100644
--- a/src/nvidia-gpu/NvidiaGpuDevice.cpp
+++ b/src/nvidia-gpu/NvidiaGpuDevice.cpp
@@ -14,6 +14,7 @@
 #include <bits/basic_string.h>
 
 #include <MctpRequester.hpp>
+#include <NvidiaGpuPowerSensor.hpp>
 #include <NvidiaGpuThresholds.hpp>
 #include <boost/asio/io_context.hpp>
 #include <phosphor-logging/lg2.hpp>
@@ -56,6 +57,10 @@
         mctpRequester,
         std::bind_front(&GpuDevice::processTLimitThresholds, this));
 
+    powerSensor = std::make_shared<NvidiaGpuPowerSensor>(
+        conn, mctpRequester, name + "_Power_0", path, eid, gpuPowerSensorId,
+        objectServer, std::vector<thresholds::Threshold>{});
+
     lg2::info("Added GPU {NAME} Sensors with chassis path: {PATH}.", "NAME",
               name, "PATH", path);
 
@@ -92,6 +97,7 @@
     {
         tLimitSensor->update();
     }
+    powerSensor->update();
 
     waitTimer.expires_after(std::chrono::milliseconds(sensorPollMs));
     waitTimer.async_wait([this](const boost::system::error_code& ec) {
diff --git a/src/nvidia-gpu/NvidiaGpuDevice.hpp b/src/nvidia-gpu/NvidiaGpuDevice.hpp
index b9e0791..9bcf64e 100644
--- a/src/nvidia-gpu/NvidiaGpuDevice.hpp
+++ b/src/nvidia-gpu/NvidiaGpuDevice.hpp
@@ -8,6 +8,7 @@
 
 #include "MctpRequester.hpp"
 #include "NvidiaDeviceDiscovery.hpp"
+#include "NvidiaGpuPowerSensor.hpp"
 #include "NvidiaGpuSensor.hpp"
 
 #include <boost/asio/io_context.hpp>
@@ -58,6 +59,7 @@
 
     std::shared_ptr<NvidiaGpuTempSensor> tempSensor;
     std::shared_ptr<NvidiaGpuTempSensor> tLimitSensor;
+    std::shared_ptr<NvidiaGpuPowerSensor> powerSensor;
 
     SensorConfigs configs;
 
diff --git a/src/nvidia-gpu/NvidiaGpuMctpVdm.cpp b/src/nvidia-gpu/NvidiaGpuMctpVdm.cpp
index 7a48b30..2355bf9 100644
--- a/src/nvidia-gpu/NvidiaGpuMctpVdm.cpp
+++ b/src/nvidia-gpu/NvidiaGpuMctpVdm.cpp
@@ -217,5 +217,72 @@
 
     return 0;
 }
+
+int encodeGetCurrentPowerDrawRequest(uint8_t instanceId, uint8_t sensorId,
+                                     uint8_t averagingInterval,
+                                     std::span<uint8_t> buf)
+{
+    if (buf.size() < sizeof(GetCurrentPowerDrawRequest))
+    {
+        return EINVAL;
+    }
+
+    auto* msg = reinterpret_cast<GetCurrentPowerDrawRequest*>(buf.data());
+
+    ocp::accelerator_management::BindingPciVidInfo header{};
+    header.ocp_accelerator_management_msg_type =
+        static_cast<uint8_t>(ocp::accelerator_management::MessageType::REQUEST);
+    header.instance_id = instanceId &
+                         ocp::accelerator_management::instanceIdBitMask;
+    header.msg_type = static_cast<uint8_t>(MessageType::PLATFORM_ENVIRONMENTAL);
+
+    auto rc = packHeader(header, msg->hdr.msgHdr.hdr);
+
+    if (rc != 0)
+    {
+        return rc;
+    }
+
+    msg->hdr.command = static_cast<uint8_t>(
+        PlatformEnvironmentalCommands::GET_CURRENT_POWER_DRAW);
+    msg->hdr.data_size = sizeof(sensorId) + sizeof(averagingInterval);
+    msg->sensorId = sensorId;
+    msg->averagingInterval = averagingInterval;
+
+    return 0;
+}
+
+int decodeGetCurrentPowerDrawResponse(
+    std::span<const uint8_t> buf,
+    ocp::accelerator_management::CompletionCode& cc, uint16_t& reasonCode,
+    uint32_t& power)
+{
+    auto rc =
+        ocp::accelerator_management::decodeReasonCodeAndCC(buf, cc, reasonCode);
+
+    if (rc != 0 || cc != ocp::accelerator_management::CompletionCode::SUCCESS)
+    {
+        return rc;
+    }
+
+    if (buf.size() < sizeof(GetCurrentPowerDrawResponse))
+    {
+        return EINVAL;
+    }
+
+    const auto* response =
+        reinterpret_cast<const GetCurrentPowerDrawResponse*>(buf.data());
+
+    const uint16_t dataSize = le16toh(response->hdr.data_size);
+
+    if (dataSize != sizeof(uint32_t))
+    {
+        return EINVAL;
+    }
+
+    power = le32toh(response->power);
+
+    return 0;
+}
 // NOLINTEND(cppcoreguidelines-pro-type-reinterpret-cast)
 } // namespace gpu
diff --git a/src/nvidia-gpu/NvidiaGpuMctpVdm.hpp b/src/nvidia-gpu/NvidiaGpuMctpVdm.hpp
index f7c78b8..c7f7511 100644
--- a/src/nvidia-gpu/NvidiaGpuMctpVdm.hpp
+++ b/src/nvidia-gpu/NvidiaGpuMctpVdm.hpp
@@ -31,6 +31,7 @@
 {
     GET_TEMPERATURE_READING = 0x00,
     READ_THERMAL_PARAMETERS = 0x02,
+    GET_CURRENT_POWER_DRAW = 0x03,
 };
 
 enum class DeviceIdentification : uint8_t
@@ -60,6 +61,13 @@
 
 using ReadThermalParametersRequest = GetNumericSensorReadingRequest;
 
+struct GetCurrentPowerDrawRequest
+{
+    ocp::accelerator_management::CommonRequest hdr;
+    uint8_t sensorId;
+    uint8_t averagingInterval;
+} __attribute__((packed));
+
 struct GetTemperatureReadingResponse
 {
     ocp::accelerator_management::CommonResponse hdr;
@@ -72,6 +80,12 @@
     int32_t threshold;
 } __attribute__((packed));
 
+struct GetCurrentPowerDrawResponse
+{
+    ocp::accelerator_management::CommonResponse hdr;
+    uint32_t power;
+} __attribute__((packed));
+
 int packHeader(const ocp::accelerator_management::BindingPciVidInfo& hdr,
                ocp::accelerator_management::BindingPciVid& msg);
 
@@ -99,4 +113,12 @@
     ocp::accelerator_management::CompletionCode& cc, uint16_t& reasonCode,
     int32_t& threshold);
 
+int encodeGetCurrentPowerDrawRequest(uint8_t instanceId, uint8_t sensorId,
+                                     uint8_t averagingInterval,
+                                     std::span<uint8_t> buf);
+
+int decodeGetCurrentPowerDrawResponse(
+    std::span<const uint8_t> buf,
+    ocp::accelerator_management::CompletionCode& cc, uint16_t& reasonCode,
+    uint32_t& power);
 } // namespace gpu
diff --git a/src/nvidia-gpu/NvidiaGpuPowerSensor.cpp b/src/nvidia-gpu/NvidiaGpuPowerSensor.cpp
new file mode 100644
index 0000000..ffec3ad
--- /dev/null
+++ b/src/nvidia-gpu/NvidiaGpuPowerSensor.cpp
@@ -0,0 +1,135 @@
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION &
+ * AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+#include "NvidiaGpuPowerSensor.hpp"
+
+#include "MctpRequester.hpp"
+#include "SensorPaths.hpp"
+#include "Thresholds.hpp"
+#include "Utils.hpp"
+#include "sensor.hpp"
+
+#include <bits/basic_string.h>
+
+#include <NvidiaDeviceDiscovery.hpp>
+#include <NvidiaGpuMctpVdm.hpp>
+#include <OcpMctpVdm.hpp>
+#include <phosphor-logging/lg2.hpp>
+#include <sdbusplus/asio/connection.hpp>
+#include <sdbusplus/asio/object_server.hpp>
+
+#include <cstddef>
+#include <cstdint>
+#include <functional>
+#include <limits>
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+using namespace std::literals;
+
+// GPU Power Sensor Averaging Interval in seconds, 0 implies default
+constexpr uint8_t gpuPowerAveragingIntervalInSec{0};
+
+static constexpr double gpuPowerSensorMaxReading =
+    std::numeric_limits<uint32_t>::max() / 1000.0;
+static constexpr double gpuPowerSensorMinReading =
+    std::numeric_limits<uint32_t>::min();
+
+NvidiaGpuPowerSensor::NvidiaGpuPowerSensor(
+    std::shared_ptr<sdbusplus::asio::connection>& conn,
+    mctp::MctpRequester& mctpRequester, const std::string& name,
+    const std::string& sensorConfiguration, uint8_t eid, uint8_t sensorId,
+    sdbusplus::asio::object_server& objectServer,
+    std::vector<thresholds::Threshold>&& thresholdData) :
+    Sensor(escapeName(name), std::move(thresholdData), sensorConfiguration,
+           "power", false, true, gpuPowerSensorMaxReading,
+           gpuPowerSensorMinReading, conn),
+    eid(eid), sensorId{sensorId},
+    averagingInterval{gpuPowerAveragingIntervalInSec},
+    mctpRequester(mctpRequester), objectServer(objectServer)
+
+{
+    std::string dbusPath = sensorPathPrefix + "power/"s + escapeName(name);
+
+    sensorInterface = objectServer.add_interface(
+        dbusPath, "xyz.openbmc_project.Sensor.Value");
+
+    for (const auto& threshold : thresholds)
+    {
+        std::string interface = thresholds::getInterface(threshold.level);
+        thresholdInterfaces[static_cast<size_t>(threshold.level)] =
+            objectServer.add_interface(dbusPath, interface);
+    }
+
+    association = objectServer.add_interface(dbusPath, association::interface);
+
+    setInitialProperties(sensor_paths::unitWatts);
+}
+
+NvidiaGpuPowerSensor::~NvidiaGpuPowerSensor()
+{
+    for (const auto& iface : thresholdInterfaces)
+    {
+        objectServer.remove_interface(iface);
+    }
+    objectServer.remove_interface(association);
+    objectServer.remove_interface(sensorInterface);
+}
+
+void NvidiaGpuPowerSensor::checkThresholds()
+{
+    thresholds::checkThresholds(this);
+}
+
+void NvidiaGpuPowerSensor::processResponse(int sendRecvMsgResult)
+{
+    if (sendRecvMsgResult != 0)
+    {
+        lg2::error(
+            "Error updating Power Sensor for eid {EID} and sensor id {SID} : sending message over MCTP failed, rc={RC}",
+            "EID", eid, "SID", sensorId, "RC", sendRecvMsgResult);
+        return;
+    }
+
+    ocp::accelerator_management::CompletionCode cc{};
+    uint16_t reasonCode = 0;
+    uint32_t power = 0;
+
+    const int rc =
+        gpu::decodeGetCurrentPowerDrawResponse(response, cc, reasonCode, power);
+
+    if (rc != 0 || cc != ocp::accelerator_management::CompletionCode::SUCCESS)
+    {
+        lg2::error(
+            "Error updating Power Sensor eid {EID} and sensor id {SID} : decode failed, rc={RC}, cc={CC}, reasonCode={RESC}",
+            "EID", eid, "SID", sensorId, "RC", rc, "CC", cc, "RESC",
+            reasonCode);
+        return;
+    }
+
+    // Reading from the device is in milliwatts and unit set on the dbus
+    // is watts.
+    updateValue(power / 1000.0);
+}
+
+void NvidiaGpuPowerSensor::update()
+{
+    const int rc = gpu::encodeGetCurrentPowerDrawRequest(
+        0, sensorId, averagingInterval, request);
+
+    if (rc != 0)
+    {
+        lg2::error(
+            "Error updating Temperature Sensor for eid {EID} and sensor id {SID} : encode failed, rc={RC}",
+            "EID", eid, "SID", sensorId, "RC", rc);
+    }
+
+    mctpRequester.sendRecvMsg(
+        eid, request, response,
+        [this](int sendRecvMsgResult) { processResponse(sendRecvMsgResult); });
+}
diff --git a/src/nvidia-gpu/NvidiaGpuPowerSensor.hpp b/src/nvidia-gpu/NvidiaGpuPowerSensor.hpp
new file mode 100644
index 0000000..89b0ab4
--- /dev/null
+++ b/src/nvidia-gpu/NvidiaGpuPowerSensor.hpp
@@ -0,0 +1,59 @@
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION &
+ * AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+#pragma once
+
+#include "MctpRequester.hpp"
+#include "Thresholds.hpp"
+#include "sensor.hpp"
+
+#include <NvidiaGpuMctpVdm.hpp>
+#include <sdbusplus/asio/connection.hpp>
+#include <sdbusplus/asio/object_server.hpp>
+
+#include <array>
+#include <cstdint>
+#include <memory>
+#include <string>
+#include <vector>
+
+constexpr uint8_t gpuPowerSensorId{0};
+
+struct NvidiaGpuPowerSensor : public Sensor
+{
+  public:
+    NvidiaGpuPowerSensor(
+        std::shared_ptr<sdbusplus::asio::connection>& conn,
+        mctp::MctpRequester& mctpRequester, const std::string& name,
+        const std::string& sensorConfiguration, uint8_t eid, uint8_t sensorId,
+        sdbusplus::asio::object_server& objectServer,
+        std::vector<thresholds::Threshold>&& thresholdData);
+
+    ~NvidiaGpuPowerSensor() override;
+
+    void checkThresholds() override;
+
+    void update();
+
+  private:
+    void processResponse(int sendRecvMsgResult);
+
+    uint8_t eid{};
+
+    uint8_t sensorId;
+
+    uint8_t averagingInterval;
+
+    std::shared_ptr<sdbusplus::asio::connection> conn;
+
+    mctp::MctpRequester& mctpRequester;
+
+    sdbusplus::asio::object_server& objectServer;
+
+    std::array<uint8_t, sizeof(gpu::GetCurrentPowerDrawRequest)> request{};
+
+    std::array<uint8_t, sizeof(gpu::GetCurrentPowerDrawResponse)> response{};
+};
diff --git a/src/nvidia-gpu/meson.build b/src/nvidia-gpu/meson.build
index 4044eb3..2e275fc 100644
--- a/src/nvidia-gpu/meson.build
+++ b/src/nvidia-gpu/meson.build
@@ -3,6 +3,7 @@
     'NvidiaDeviceDiscovery.cpp',
     'NvidiaGpuDevice.cpp',
     'NvidiaGpuMctpVdm.cpp',
+    'NvidiaGpuPowerSensor.cpp',
     'NvidiaGpuSensor.cpp',
     'NvidiaGpuSensorMain.cpp',
     'NvidiaGpuThresholds.cpp',
diff --git a/src/nvidia-gpu/tests/NvidiaGpuSensorTest.cpp b/src/nvidia-gpu/tests/NvidiaGpuSensorTest.cpp
index 9455326..e319ebc 100644
--- a/src/nvidia-gpu/tests/NvidiaGpuSensorTest.cpp
+++ b/src/nvidia-gpu/tests/NvidiaGpuSensorTest.cpp
@@ -641,6 +641,160 @@
 
     EXPECT_EQ(result, EINVAL); // Should indicate error for invalid data size
 }
+
+// Tests for GpuMctpVdm::encodeGetCurrentPowerDrawRequest function
+TEST_F(GpuMctpVdmTests, EncodeGetCurrentPowerDrawRequestSuccess)
+{
+    const uint8_t instanceId = 6;
+    const uint8_t sensorId = 2;
+    const uint8_t averagingInterval = 10;
+    std::array<uint8_t, sizeof(gpu::GetCurrentPowerDrawRequest)> buf{};
+
+    int result = gpu::encodeGetCurrentPowerDrawRequest(instanceId, sensorId,
+                                                       averagingInterval, buf);
+
+    EXPECT_EQ(result, 0);
+
+    gpu::GetCurrentPowerDrawRequest request{};
+    std::memcpy(&request, buf.data(), sizeof(request));
+
+    EXPECT_EQ(request.hdr.msgHdr.hdr.pci_vendor_id,
+              htobe16(gpu::nvidiaPciVendorId));
+    EXPECT_EQ(request.hdr.msgHdr.hdr.instance_id &
+                  ocp::accelerator_management::instanceIdBitMask,
+              instanceId & ocp::accelerator_management::instanceIdBitMask);
+    EXPECT_NE(request.hdr.msgHdr.hdr.instance_id &
+                  ocp::accelerator_management::requestBitMask,
+              0);
+    EXPECT_EQ(request.hdr.msgHdr.hdr.ocp_accelerator_management_msg_type,
+              static_cast<uint8_t>(gpu::MessageType::PLATFORM_ENVIRONMENTAL));
+
+    // Verify request data
+    EXPECT_EQ(request.hdr.command,
+              static_cast<uint8_t>(
+                  gpu::PlatformEnvironmentalCommands::GET_CURRENT_POWER_DRAW));
+    EXPECT_EQ(request.hdr.data_size,
+              sizeof(sensorId) + sizeof(averagingInterval));
+    EXPECT_EQ(request.sensorId, sensorId);
+    EXPECT_EQ(request.averagingInterval, averagingInterval);
+}
+
+// Tests for GpuMctpVdm::decodeGetCurrentPowerDrawResponse function
+TEST_F(GpuMctpVdmTests, DecodeGetCurrentPowerDrawResponseSuccess)
+{
+    // Create a mock successful response
+    std::array<uint8_t, sizeof(gpu::GetCurrentPowerDrawResponse)> buf{};
+
+    gpu::GetCurrentPowerDrawResponse response{};
+    ocp::accelerator_management::BindingPciVidInfo headerInfo{};
+    headerInfo.ocp_accelerator_management_msg_type = static_cast<uint8_t>(
+        ocp::accelerator_management::MessageType::RESPONSE);
+    headerInfo.instance_id = 6;
+    headerInfo.msg_type =
+        static_cast<uint8_t>(gpu::MessageType::PLATFORM_ENVIRONMENTAL);
+
+    gpu::packHeader(headerInfo, response.hdr.msgHdr.hdr);
+
+    // Populate response data
+    response.hdr.command = static_cast<uint8_t>(
+        gpu::PlatformEnvironmentalCommands::GET_CURRENT_POWER_DRAW);
+    response.hdr.completion_code = static_cast<uint8_t>(
+        ocp::accelerator_management::CompletionCode::SUCCESS);
+    response.hdr.reserved = 0;
+    response.hdr.data_size = htole16(sizeof(uint32_t));
+
+    // Set a power value of 250W
+    response.power = htole32(250);
+
+    std::memcpy(buf.data(), &response, sizeof(response));
+
+    // Test decoding
+    ocp::accelerator_management::CompletionCode cc{};
+    uint16_t reasonCode{};
+    uint32_t power{};
+
+    int result =
+        gpu::decodeGetCurrentPowerDrawResponse(buf, cc, reasonCode, power);
+
+    EXPECT_EQ(result, 0);
+    EXPECT_EQ(cc, ocp::accelerator_management::CompletionCode::SUCCESS);
+    EXPECT_EQ(reasonCode, 0);
+    EXPECT_EQ(power, 250);
+}
+
+TEST_F(GpuMctpVdmTests, DecodeGetCurrentPowerDrawResponseError)
+{
+    std::array<uint8_t,
+               sizeof(ocp::accelerator_management::CommonNonSuccessResponse)>
+        buf{};
+
+    // Populate error response data
+    ocp::accelerator_management::CommonNonSuccessResponse errorResponse{};
+    ocp::accelerator_management::BindingPciVidInfo headerInfo{};
+    headerInfo.ocp_accelerator_management_msg_type = static_cast<uint8_t>(
+        ocp::accelerator_management::MessageType::RESPONSE);
+    headerInfo.instance_id = 6;
+    headerInfo.msg_type =
+        static_cast<uint8_t>(gpu::MessageType::PLATFORM_ENVIRONMENTAL);
+
+    gpu::packHeader(headerInfo, errorResponse.msgHdr.hdr);
+
+    errorResponse.command = static_cast<uint8_t>(
+        gpu::PlatformEnvironmentalCommands::GET_CURRENT_POWER_DRAW);
+    errorResponse.completion_code = static_cast<uint8_t>(
+        ocp::accelerator_management::CompletionCode::ERR_NOT_READY);
+    errorResponse.reason_code = htole16(0x9ABC);
+
+    std::memcpy(buf.data(), &errorResponse, sizeof(errorResponse));
+
+    // Test decoding
+    ocp::accelerator_management::CompletionCode cc{};
+    uint16_t reasonCode{};
+    uint32_t power{};
+
+    int result =
+        gpu::decodeGetCurrentPowerDrawResponse(buf, cc, reasonCode, power);
+
+    EXPECT_EQ(result, 0);
+    EXPECT_EQ(cc, ocp::accelerator_management::CompletionCode::ERR_NOT_READY);
+    EXPECT_EQ(reasonCode, 0x9ABC);
+}
+
+TEST_F(GpuMctpVdmTests, DecodeGetCurrentPowerDrawResponseInvalidSize)
+{
+    // Create a mock response with invalid data_size
+    std::array<uint8_t, sizeof(gpu::GetCurrentPowerDrawResponse)> buf{};
+
+    gpu::GetCurrentPowerDrawResponse response{};
+    ocp::accelerator_management::BindingPciVidInfo headerInfo{};
+    headerInfo.ocp_accelerator_management_msg_type = static_cast<uint8_t>(
+        ocp::accelerator_management::MessageType::RESPONSE);
+    headerInfo.instance_id = 6;
+    headerInfo.msg_type =
+        static_cast<uint8_t>(gpu::MessageType::PLATFORM_ENVIRONMENTAL);
+
+    gpu::packHeader(headerInfo, response.hdr.msgHdr.hdr);
+
+    response.hdr.command = static_cast<uint8_t>(
+        gpu::PlatformEnvironmentalCommands::GET_CURRENT_POWER_DRAW);
+    response.hdr.completion_code = static_cast<uint8_t>(
+        ocp::accelerator_management::CompletionCode::SUCCESS);
+    response.hdr.reserved = 0;
+    response.hdr.data_size = htole16(2); // Invalid - should be sizeof(uint32_t)
+    response.power = htole32(250);
+
+    std::memcpy(buf.data(), &response, sizeof(response));
+
+    // Test decoding
+    ocp::accelerator_management::CompletionCode cc{};
+    uint16_t reasonCode{};
+    uint32_t power{};
+
+    int result =
+        gpu::decodeGetCurrentPowerDrawResponse(buf, cc, reasonCode, power);
+
+    EXPECT_EQ(result, EINVAL); // Should indicate error for invalid data size
+}
 } // namespace gpu_mctp_tests
 
 int main(int argc, char** argv)