| Daniel Osawa | 51c1813 | 2025-11-26 09:21:20 -0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: Apache-2.0 |
| 2 | // SPDX-FileCopyrightText: Copyright OpenBMC Authors |
| 3 | #include <stdio.h> |
| 4 | #include <stdlib.h> |
| 5 | #include <stddef.h> |
| 6 | #include <string.h> |
| 7 | #include <json.h> |
| 8 | #include <libcper/Cper.h> |
| 9 | #include <libcper/cper-utils.h> |
| 10 | #include <libcper/sections/cper-section-nvidia-events.h> |
| 11 | #include <libcper/log.h> |
| 12 | #include <string.h> |
| 13 | |
| 14 | // NVIDIA Event Section GUID |
| 15 | EFI_GUID gEfiNvidiaEventErrorSectionGuid = { 0x9068e568, |
| 16 | 0x6ca0, |
| 17 | 0x11f0, |
| 18 | { 0xae, 0xaf, 0x15, 0x93, 0x43, |
| 19 | 0x59, 0x1e, 0xac } }; |
| 20 | |
| 21 | /** |
| 22 | * NVIDIA Event Binary Structure Layout: |
| 23 | * |
| 24 | * The NVIDIA event CPER section has the following binary memory layout: |
| 25 | * |
| 26 | * ┌─────────────────────────────────────────────────────────────────────────┐ |
| 27 | * │ EFI_NVIDIA_EVENT_HEADER (32 bytes) │ |
| 28 | * ├─────────────────────────────────────────────────────────────────────────┤ |
| 29 | * │ CHAR8 EventVersion │ |
| 30 | * │ CHAR8 EventContextCount ← Number of contexts that follow │ |
| 31 | * │ CHAR8 SourceDeviceType │ |
| 32 | * │ CHAR8 Reserved1 │ |
| 33 | * │ UINT16 EventType │ |
| 34 | * │ UINT16 EventSubtype │ |
| 35 | * │ UINT64 EventLinkId │ |
| 36 | * │ CHAR8 Signature[16] │ |
| 37 | * └─────────────────────────────────────────────────────────────────────────┘ |
| 38 | * ┌─────────────────────────────────────────────────────────────────────────┐ |
| 39 | * │ EFI_NVIDIA_EVENT_INFO_HEADER (3 bytes) │ |
| 40 | * ├─────────────────────────────────────────────────────────────────────────┤ |
| 41 | * │ UINT16 InfoVersion │ |
| 42 | * │ UINT8 InfoSize ← Total size (header + device data) │ |
| 43 | * └─────────────────────────────────────────────────────────────────────────┘ |
| 44 | * ┌─────────────────────────────────────────────────────────────────────────┐ |
| 45 | * │ Device-Specific Event Info (InfoSize - INFO_HEADER_SIZE bytes) │ |
| 46 | * ├─────────────────────────────────────────────────────────────────────────┤ |
| 47 | * │ e.g., EFI_NVIDIA_CPU_EVENT_INFO (29 bytes) │ |
| 48 | * │ UINT8 SocketNum │ |
| 49 | * │ UINT32 Architecture │ |
| 50 | * │ UINT32 Ecid[4] │ |
| 51 | * │ UINT64 InstanceBase │ |
| 52 | * └─────────────────────────────────────────────────────────────────────────┘ |
| 53 | * ┌─────────────────────────────────────────────────────────────────────────┐ |
| 54 | * │ EFI_NVIDIA_EVENT_CTX_HEADER (Context 0) (16 bytes) │ |
| 55 | * ├─────────────────────────────────────────────────────────────────────────┤ |
| 56 | * │ UINT32 CtxSize ← Total size of this context │ |
| 57 | * │ UINT16 CtxVersion │ |
| 58 | * │ UINT16 Reserved1 │ |
| 59 | * │ UINT16 DataFormatType ← OPAQUE(0)/TYPE_1(1)/TYPE_2(2)/etc. │ |
| 60 | * │ UINT16 DataFormatVersion │ |
| 61 | * │ UINT32 DataSize ← Size of Data[] array below │ |
| 62 | * │ UINT8 Data[0] ← Flexible array member │ |
| 63 | * ├─────────────────────────────────────────────────────────────────────────┤ |
| 64 | * │ Context Data[] (DataSize bytes) │ |
| 65 | * ├─────────────────────────────────────────────────────────────────────────┤ |
| 66 | * │ TYPE_1: Array of EFI_NVIDIA_EVENT_CTX_DATA_TYPE_1 (16 bytes each) │ |
| 67 | * │ UINT64 Key │ |
| 68 | * │ UINT64 Value │ |
| 69 | * │ │ |
| 70 | * │ TYPE_2: Array of EFI_NVIDIA_EVENT_CTX_DATA_TYPE_2 (8 bytes each) │ |
| 71 | * │ UINT32 Key │ |
| 72 | * │ UINT32 Value │ |
| 73 | * │ │ |
| 74 | * │ TYPE_3: Array of EFI_NVIDIA_EVENT_CTX_DATA_TYPE_3 (8 bytes each) │ |
| 75 | * │ UINT64 Value │ |
| 76 | * │ │ |
| 77 | * │ TYPE_4: Array of EFI_NVIDIA_EVENT_CTX_DATA_TYPE_4 (4 bytes each) │ |
| 78 | * │ UINT32 Value │ |
| 79 | * │ │ |
| 80 | * │ OPAQUE: Device-specific binary format │ |
| 81 | * └─────────────────────────────────────────────────────────────────────────┘ |
| 82 | * ┌─────────────────────────────────────────────────────────────────────────┐ |
| 83 | * │ PADDING (if needed) (align to 16-byte boundary) │ |
| 84 | * └─────────────────────────────────────────────────────────────────────────┘ |
| 85 | * ┌─────────────────────────────────────────────────────────────────────────┐ |
| 86 | * │ EFI_NVIDIA_EVENT_CTX_HEADER (Context 1) (8 bytes) │ |
| 87 | * │ ... (same structure as Context 0) │ |
| 88 | * └─────────────────────────────────────────────────────────────────────────┘ |
| 89 | * ... repeat for EventContextCount total contexts ... |
| 90 | * |
| 91 | * Note: Each context is padded to 16-byte alignment before the next context begins. |
| 92 | */ |
| 93 | |
| 94 | /** |
| 95 | * NVIDIA Event JSON IR Structure: |
| 96 | * |
| 97 | * Maps binary structures (above) to JSON using the field name constants (below). |
| 98 | * |
| 99 | * { |
| 100 | * "eventHeader": { ... } → EFI_NVIDIA_EVENT_HEADER |
| 101 | * "eventInfo": { ... } → EFI_NVIDIA_EVENT_INFO_* |
| 102 | * "eventContexts": [ → Array of contexts ("eventContext"*) |
| 103 | * { |
| 104 | * "data": { → EFI_NVIDIA_EVENT_CTX_DATA_* |
| 105 | * "keyValArray64": [ ... ] → TYPE_1 (16 bytes each: key64, val64) |
| 106 | * "keyValArray32": [ ... ] → TYPE_2 ( 8 bytes each: key32, val32) |
| 107 | * "valArray64": [ ... ] → TYPE_3 ( 8 bytes each: val64) |
| 108 | * "valArray32": [ ... ] → TYPE_4 ( 4 bytes each: val32) |
| 109 | * } |
| 110 | * }, |
| 111 | * { ... } |
| 112 | * ] |
| 113 | * } |
| 114 | */ |
| 115 | |
| 116 | // ============================================================================ |
| 117 | // Enums |
| 118 | typedef enum { |
| 119 | OPAQUE = 0, |
| 120 | TYPE_1 = 1, |
| 121 | TYPE_2 = 2, |
| 122 | TYPE_3 = 3, |
| 123 | TYPE_4 = 4, |
| 124 | // GPU-specific context data types |
| 125 | GPU_INIT_METADATA = 0x8000, |
| 126 | GPU_EVENT_LEGACY_XID = 0x8001, |
| 127 | GPU_RECOMMENDED_ACTIONS = 0x8002 |
| 128 | } NVIDIA_EVENT_CTX_DATA_TYPE; |
| 129 | |
| 130 | typedef enum { |
| 131 | CPU = 0, |
| 132 | GPU = 1, |
| 133 | DPU = 2, |
| 134 | NIC = 3, |
| 135 | SWX = 4, |
| 136 | BMC = 5 |
| 137 | } NVIDIA_EVENT_SRC_DEV; |
| 138 | |
| 139 | // Callback structures |
| 140 | typedef struct { |
| 141 | NVIDIA_EVENT_SRC_DEV srcDev; |
| 142 | UINT8 major_version; // Expected major version for this handler |
| 143 | UINT8 minor_version; // Expected minor version for this handler |
| 144 | void (*callback)(EFI_NVIDIA_EVENT_HEADER *, json_object *); |
| 145 | size_t (*callback_bin)(json_object *, FILE *); |
| 146 | size_t info_size; |
| 147 | } NV_EVENT_INFO_CALLBACKS; |
| 148 | |
| 149 | typedef struct { |
| 150 | NVIDIA_EVENT_SRC_DEV srcDev; |
| 151 | NVIDIA_EVENT_CTX_DATA_TYPE dataFormatType; |
| 152 | void (*callback)(EFI_NVIDIA_EVENT_HEADER *, size_t, size_t, |
| 153 | json_object *); |
| 154 | size_t (*callback_bin)(json_object *, size_t, FILE *); |
| 155 | } NV_EVENT_CTX_CALLBACKS; |
| 156 | |
| 157 | // Helper functions |
| 158 | // CPU info formatters |
| 159 | static void parse_cpu_info_to_ir(EFI_NVIDIA_EVENT_HEADER *event_header, |
| 160 | json_object *event_info_ir); |
| 161 | static size_t parse_cpu_info_to_bin(json_object *event_info_ir, FILE *out); |
| 162 | |
| 163 | // GPU info formatters |
| 164 | static void parse_gpu_info_to_ir(EFI_NVIDIA_EVENT_HEADER *event_header, |
| 165 | json_object *event_info_ir); |
| 166 | static size_t parse_gpu_info_to_bin(json_object *event_info_ir, FILE *out); |
| 167 | |
| 168 | // GPU context data formatters |
| 169 | static void parse_gpu_ctx_metadata_to_ir(EFI_NVIDIA_EVENT_HEADER *event_header, |
| 170 | size_t total_event_size, |
| 171 | size_t ctx_instance, |
| 172 | json_object *output_data_ir); |
| 173 | static size_t parse_gpu_ctx_metadata_to_bin(json_object *event_ir, |
| 174 | size_t ctx_instance, |
| 175 | FILE *output_file_stream); |
| 176 | static void |
| 177 | parse_gpu_ctx_legacy_xid_to_ir(EFI_NVIDIA_EVENT_HEADER *event_header, |
| 178 | size_t total_event_size, size_t ctx_instance, |
| 179 | json_object *output_data_ir); |
| 180 | static size_t parse_gpu_ctx_legacy_xid_to_bin(json_object *event_ir, |
| 181 | size_t ctx_instance, |
| 182 | FILE *output_file_stream); |
| 183 | static void parse_gpu_ctx_recommended_actions_to_ir( |
| 184 | EFI_NVIDIA_EVENT_HEADER *event_header, size_t total_event_size, |
| 185 | size_t ctx_instance, json_object *output_data_ir); |
| 186 | static size_t parse_gpu_ctx_recommended_actions_to_bin( |
| 187 | json_object *event_ir, size_t ctx_instance, FILE *output_file_stream); |
| 188 | |
| 189 | // Common context data type0 formatters |
| 190 | static void parse_common_ctx_type0_to_ir(EFI_NVIDIA_EVENT_HEADER *event_header, |
| 191 | size_t total_event_size, |
| 192 | size_t ctx_instance, |
| 193 | json_object *output_data_ir); |
| 194 | static size_t parse_common_ctx_type0_to_bin(json_object *event_ir, |
| 195 | size_t ctx_instance, |
| 196 | FILE *output_file_stream); |
| 197 | |
| 198 | // Common context data type1 formatters |
| 199 | static void parse_common_ctx_type1_to_ir(EFI_NVIDIA_EVENT_HEADER *event_header, |
| 200 | size_t total_event_size, |
| 201 | size_t ctx_instance, |
| 202 | json_object *output_data_ir); |
| 203 | static size_t parse_common_ctx_type1_to_bin(json_object *event_ir, |
| 204 | size_t ctx_instance, |
| 205 | FILE *output_file_stream); |
| 206 | |
| 207 | // Common context data type2 formatters |
| 208 | static void parse_common_ctx_type2_to_ir(EFI_NVIDIA_EVENT_HEADER *event_header, |
| 209 | size_t total_event_size, |
| 210 | size_t ctx_instance, |
| 211 | json_object *output_data_ir); |
| 212 | static size_t parse_common_ctx_type2_to_bin(json_object *event_ir, |
| 213 | size_t ctx_instance, |
| 214 | FILE *output_file_stream); |
| 215 | |
| 216 | // Common context data type3 formatters |
| 217 | static void parse_common_ctx_type3_to_ir(EFI_NVIDIA_EVENT_HEADER *event_header, |
| 218 | size_t total_event_size, |
| 219 | size_t ctx_instance, |
| 220 | json_object *output_data_ir); |
| 221 | static size_t parse_common_ctx_type3_to_bin(json_object *event_ir, |
| 222 | size_t ctx_instance, |
| 223 | FILE *output_file_stream); |
| 224 | |
| 225 | // Common context data type4 formatters |
| 226 | static void parse_common_ctx_type4_to_ir(EFI_NVIDIA_EVENT_HEADER *event_header, |
| 227 | size_t total_event_size, |
| 228 | size_t ctx_instance, |
| 229 | json_object *output_data_ir); |
| 230 | static size_t parse_common_ctx_type4_to_bin(json_object *event_ir, |
| 231 | size_t ctx_instance, |
| 232 | FILE *output_file_stream); |
| 233 | |
| 234 | // Helper: Get pointer to device-specific event info (after headers) |
| 235 | static inline void *get_event_info(EFI_NVIDIA_EVENT_HEADER *header) |
| 236 | { |
| 237 | return (UINT8 *)header + sizeof(EFI_NVIDIA_EVENT_HEADER) + |
| 238 | sizeof(EFI_NVIDIA_EVENT_INFO_HEADER); |
| 239 | } |
| 240 | |
| 241 | // Helper: Get pointer to event info header (after event header) |
| 242 | static inline EFI_NVIDIA_EVENT_INFO_HEADER * |
| 243 | get_event_info_header(EFI_NVIDIA_EVENT_HEADER *header) |
| 244 | { |
| 245 | return (EFI_NVIDIA_EVENT_INFO_HEADER *)((UINT8 *)header + |
| 246 | sizeof(EFI_NVIDIA_EVENT_HEADER)); |
| 247 | } |
| 248 | |
| 249 | // Helper: Extract major version from event info header (high byte) |
| 250 | static inline UINT8 get_info_major_version(EFI_NVIDIA_EVENT_INFO_HEADER *header) |
| 251 | { |
| 252 | return (UINT8)((header->InfoVersion >> 8) & 0xFF); |
| 253 | } |
| 254 | |
| 255 | // Helper: Extract minor version from event info header (low byte) |
| 256 | static inline UINT8 get_info_minor_version(EFI_NVIDIA_EVENT_INFO_HEADER *header) |
| 257 | { |
| 258 | return (UINT8)(header->InfoVersion & 0xFF); |
| 259 | } |
| 260 | |
| 261 | // Helper: Check if info major version matches - returns false and logs on mismatch |
| 262 | static bool check_info_major_version(UINT8 maj, UINT8 min, UINT8 exp_maj, |
| 263 | const char *operation) |
| 264 | { |
| 265 | if (maj != exp_maj) { |
| 266 | cper_print_log( |
| 267 | "Error: NVIDIA Event Info major version mismatch: " |
| 268 | "expected %d.x, got %d.%d. Skipping event info %s.\n", |
| 269 | (int)exp_maj, (int)maj, (int)min, operation); |
| 270 | return false; |
| 271 | } |
| 272 | return true; |
| 273 | } |
| 274 | |
| 275 | // Helper: Check if event header version matches - returns false and logs on mismatch |
| 276 | static bool check_event_header_version(UINT16 ver, UINT16 exp_ver, |
| 277 | const char *operation) |
| 278 | { |
| 279 | if (ver != exp_ver) { |
| 280 | cper_print_log("Error: NVIDIA Event Header version mismatch: " |
| 281 | "expected %d, got %d. Skipping event %s.\n", |
| 282 | (int)exp_ver, (int)ver, operation); |
| 283 | return false; |
| 284 | } |
| 285 | return true; |
| 286 | } |
| 287 | |
| 288 | // Helper: Write zero-padding to align to 16-byte boundary |
| 289 | static void write_padding_to_16_byte_alignment(size_t bytes_written, FILE *out) |
| 290 | { |
| 291 | size_t padding = (16 - (bytes_written % 16)) % 16; |
| 292 | if (padding > 0) { |
| 293 | UINT8 zeros[16] = { 0 }; |
| 294 | fwrite(zeros, 1, padding, out); |
| 295 | } |
| 296 | } |
| 297 | |
| 298 | // Event info handler callbacks for different device types. |
| 299 | // Note: The _to_bin callbacks should return the number of bytes written. |
| 300 | // The caller is responsible for adding 16-byte alignment padding. |
| 301 | NV_EVENT_INFO_CALLBACKS nv_event_types[] = { |
| 302 | { CPU, EFI_NVIDIA_CPU_EVENT_INFO_MAJ, EFI_NVIDIA_CPU_EVENT_INFO_MIN, |
| 303 | &parse_cpu_info_to_ir, &parse_cpu_info_to_bin, |
| 304 | sizeof(EFI_NVIDIA_CPU_EVENT_INFO) }, |
| 305 | { GPU, EFI_NVIDIA_GPU_EVENT_INFO_MAJ, EFI_NVIDIA_GPU_EVENT_INFO_MIN, |
| 306 | &parse_gpu_info_to_ir, &parse_gpu_info_to_bin, |
| 307 | sizeof(EFI_NVIDIA_GPU_EVENT_INFO) } |
| 308 | }; |
| 309 | |
| 310 | // Event context handler callbacks for device-specific opaque data formats. |
| 311 | // This is where custom/opaque context data type handlers should be registered. |
| 312 | // Add entries here for device types that need special handling beyond the standard TYPE_1-4 formats. |
| 313 | // Note: The _to_bin callbacks should return the number of bytes written. |
| 314 | // The caller is responsible for adding 16-byte alignment padding. |
| 315 | NV_EVENT_CTX_CALLBACKS event_ctx_handlers[] = { |
| 316 | // GPU-specific context data handlers |
| 317 | { GPU, GPU_INIT_METADATA, &parse_gpu_ctx_metadata_to_ir, |
| 318 | &parse_gpu_ctx_metadata_to_bin }, |
| 319 | { GPU, GPU_EVENT_LEGACY_XID, &parse_gpu_ctx_legacy_xid_to_ir, |
| 320 | &parse_gpu_ctx_legacy_xid_to_bin }, |
| 321 | { GPU, GPU_RECOMMENDED_ACTIONS, |
| 322 | &parse_gpu_ctx_recommended_actions_to_ir, |
| 323 | &parse_gpu_ctx_recommended_actions_to_bin } |
| 324 | }; |
| 325 | |
| 326 | // Retrieves a pointer to the nth event context within an NVIDIA event structure. |
| 327 | // Walks through the event header, event info, and variable-sized contexts with bounds checking. |
| 328 | // Returns NULL if the index is out of bounds or if buffer overflow would occur. |
| 329 | static inline EFI_NVIDIA_EVENT_CTX_HEADER * |
| 330 | get_event_context_n(EFI_NVIDIA_EVENT_HEADER *event_header, size_t n, |
| 331 | size_t total_size) |
| 332 | { |
| 333 | UINT8 *start = (UINT8 *)event_header; |
| 334 | UINT8 *ptr = start + sizeof(EFI_NVIDIA_EVENT_HEADER); |
| 335 | UINT8 *end = start + total_size; |
| 336 | |
| 337 | if (ptr + sizeof(EFI_NVIDIA_EVENT_INFO_HEADER) > end) { |
| 338 | return NULL; |
| 339 | } |
| 340 | |
| 341 | EFI_NVIDIA_EVENT_INFO_HEADER *info_header = |
| 342 | (EFI_NVIDIA_EVENT_INFO_HEADER *)ptr; |
| 343 | if (ptr + info_header->InfoSize > end) { |
| 344 | return NULL; |
| 345 | } |
| 346 | ptr += info_header->InfoSize; |
| 347 | for (size_t i = 0; i < n; i++) { |
| 348 | if (ptr + sizeof(EFI_NVIDIA_EVENT_CTX_HEADER) > end) { |
| 349 | return NULL; |
| 350 | } |
| 351 | EFI_NVIDIA_EVENT_CTX_HEADER *ctx = |
| 352 | (EFI_NVIDIA_EVENT_CTX_HEADER *)ptr; |
| 353 | if (ptr + ctx->CtxSize > end) { |
| 354 | return NULL; |
| 355 | } |
| 356 | ptr += ctx->CtxSize; |
| 357 | } |
| 358 | |
| 359 | if (ptr + sizeof(EFI_NVIDIA_EVENT_CTX_HEADER) > end) { |
| 360 | return NULL; |
| 361 | } |
| 362 | return (EFI_NVIDIA_EVENT_CTX_HEADER *)ptr; |
| 363 | } |
| 364 | |
| 365 | // Gets the nth event context from a JSON IR Event object. |
| 366 | // Returns NULL if the eventContexts field doesn't exist, isn't an object, |
| 367 | // or if n is out of bounds. |
| 368 | static inline json_object *get_event_context_n_ir(json_object *event_ir, |
| 369 | size_t n) |
| 370 | { |
| 371 | if (event_ir == NULL) { |
| 372 | return NULL; |
| 373 | } |
| 374 | |
| 375 | // Get the eventContexts object |
| 376 | json_object *event_contexts_ir = |
| 377 | json_object_object_get(event_ir, "eventContexts"); |
| 378 | if (event_contexts_ir == NULL) { |
| 379 | return NULL; |
| 380 | } |
| 381 | |
| 382 | // Check if it's an array (preferred structure) |
| 383 | if (json_object_is_type(event_contexts_ir, json_type_array)) { |
| 384 | size_t array_len = json_object_array_length(event_contexts_ir); |
| 385 | if (n >= array_len) { |
| 386 | return NULL; |
| 387 | } |
| 388 | return json_object_array_get_idx(event_contexts_ir, n); |
| 389 | } |
| 390 | |
| Daniel Osawa | 51c1813 | 2025-11-26 09:21:20 -0800 | [diff] [blame] | 391 | return NULL; |
| 392 | } |
| 393 | |
| 394 | // Gets the data object from the nth event context in a JSON IR Event object. |
| 395 | // Combines get_event_context_n_ir and extraction of the data field. |
| 396 | // Returns NULL if the context doesn't exist, is out of bounds, or has no data. |
| 397 | static inline json_object *get_event_context_n_data_ir(json_object *event_ir, |
| 398 | size_t n) |
| 399 | { |
| 400 | json_object *event_context_ir = get_event_context_n_ir(event_ir, n); |
| 401 | if (event_context_ir == NULL) { |
| 402 | return NULL; |
| 403 | } |
| 404 | |
| 405 | return json_object_object_get(event_context_ir, "data"); |
| 406 | } |
| 407 | |
| 408 | // Parses CPU-specific event info structure into JSON IR format. |
| 409 | // Extracts socket number, architecture, ECID array, and instance base. |
| 410 | /* |
| 411 | * Example JSON IR "data" output: |
| 412 | * { |
| 413 | * "SocketNum": 0, |
| 414 | * "Architecture": 2684420096, |
| 415 | * "Ecid1": 1234567890123456789, |
| 416 | * "Ecid2": 9876543210987654321, |
| 417 | * "Ecid3": 5555555555555555555, |
| 418 | * "Ecid4": 1111111111111111111, |
| 419 | * "InstanceBase": 281474976710656 |
| 420 | * } |
| 421 | */ |
| 422 | static void parse_cpu_info_to_ir(EFI_NVIDIA_EVENT_HEADER *event_header, |
| 423 | json_object *event_info_ir) |
| 424 | { |
| 425 | // Verify InfoSize is large enough for CPU event info |
| 426 | EFI_NVIDIA_EVENT_INFO_HEADER *info_header = |
| 427 | get_event_info_header(event_header); |
| 428 | size_t required_size = sizeof(EFI_NVIDIA_EVENT_INFO_HEADER) + |
| 429 | sizeof(EFI_NVIDIA_CPU_EVENT_INFO); |
| 430 | if (info_header->InfoSize < required_size) { |
| 431 | cper_print_log( |
| 432 | "Error: CPU event info size too small: got %d, need %zu\n", |
| 433 | info_header->InfoSize, required_size); |
| 434 | return; |
| 435 | } |
| 436 | |
| 437 | EFI_NVIDIA_CPU_EVENT_INFO *cpu_event_info = |
| 438 | (EFI_NVIDIA_CPU_EVENT_INFO *)get_event_info(event_header); |
| 439 | if (cpu_event_info == NULL) { |
| 440 | return; |
| 441 | } |
| 442 | |
| Ed Tanous | 6c5d2f3 | 2026-02-02 15:18:15 -0800 | [diff] [blame] | 443 | add_uint(event_info_ir, "SocketNum", cpu_event_info->SocketNum); |
| 444 | add_uint(event_info_ir, "Architecture", cpu_event_info->Architecture); |
| 445 | add_uint(event_info_ir, "Ecid1", cpu_event_info->Ecid[0]); |
| 446 | add_uint(event_info_ir, "Ecid2", cpu_event_info->Ecid[1]); |
| 447 | add_uint(event_info_ir, "Ecid3", cpu_event_info->Ecid[2]); |
| 448 | add_uint(event_info_ir, "Ecid4", cpu_event_info->Ecid[3]); |
| 449 | add_uint(event_info_ir, "InstanceBase", cpu_event_info->InstanceBase); |
| Daniel Osawa | 51c1813 | 2025-11-26 09:21:20 -0800 | [diff] [blame] | 450 | } |
| 451 | // Converts CPU-specific event info from JSON IR to CPER binary format. |
| 452 | // Writes socket number, architecture, ECID array, and instance base. |
| 453 | // Returns the number of bytes written. |
| 454 | /* |
| 455 | * ┌─────────────────────────────────────────────────────────────────────────┐ |
| 456 | * │ EFI_NVIDIA_CPU_EVENT_INFO (32 bytes) │ |
| 457 | * ├─────────────────────────────────────────────────────────────────────────┤ |
| 458 | * │ UINT8 SocketNum │ |
| 459 | * │ [padding - 3 bytes] │ |
| 460 | * │ UINT32 Architecture │ |
| 461 | * │ UINT32 Ecid[0] │ |
| 462 | * │ UINT32 Ecid[1] │ |
| 463 | * │ UINT32 Ecid[2] │ |
| 464 | * │ UINT32 Ecid[3] │ |
| 465 | * │ UINT64 InstanceBase │ |
| 466 | * └─────────────────────────────────────────────────────────────────────────┘ |
| 467 | */ |
| 468 | static size_t parse_cpu_info_to_bin(json_object *event_info_ir, FILE *out) |
| 469 | { |
| 470 | EFI_NVIDIA_CPU_EVENT_INFO cpu_event_info = { 0 }; |
| 471 | cpu_event_info.SocketNum = json_object_get_int64( |
| 472 | json_object_object_get(event_info_ir, "SocketNum")); |
| 473 | cpu_event_info.Architecture = json_object_get_int64( |
| 474 | json_object_object_get(event_info_ir, "Architecture")); |
| 475 | cpu_event_info.Ecid[0] = json_object_get_uint64( |
| 476 | json_object_object_get(event_info_ir, "Ecid1")); |
| 477 | cpu_event_info.Ecid[1] = json_object_get_uint64( |
| 478 | json_object_object_get(event_info_ir, "Ecid2")); |
| 479 | cpu_event_info.Ecid[2] = json_object_get_uint64( |
| 480 | json_object_object_get(event_info_ir, "Ecid3")); |
| 481 | cpu_event_info.Ecid[3] = json_object_get_uint64( |
| 482 | json_object_object_get(event_info_ir, "Ecid4")); |
| 483 | cpu_event_info.InstanceBase = json_object_get_uint64( |
| 484 | json_object_object_get(event_info_ir, "InstanceBase")); |
| 485 | return fwrite(&cpu_event_info, 1, sizeof(EFI_NVIDIA_CPU_EVENT_INFO), |
| 486 | out); |
| 487 | } |
| 488 | |
| 489 | // Parses GPU-specific event info structure into JSON IR format. |
| 490 | // Extracts version, size, event originator, partitions, and PDI. |
| 491 | /* |
| 492 | * Example JSON IR "data" output: |
| 493 | * { |
| 494 | * "EventOriginator": 2, |
| 495 | * "SourcePartition": 1, |
| 496 | * "SourceSubPartition": 0, |
| 497 | * "Pdi": 9876543210987654321 |
| 498 | * } |
| 499 | */ |
| 500 | static void parse_gpu_info_to_ir(EFI_NVIDIA_EVENT_HEADER *event_header, |
| 501 | json_object *event_info_ir) |
| 502 | { |
| 503 | // Verify InfoSize is large enough for GPU event info |
| 504 | EFI_NVIDIA_EVENT_INFO_HEADER *info_header = |
| 505 | get_event_info_header(event_header); |
| 506 | size_t required_size = sizeof(EFI_NVIDIA_EVENT_INFO_HEADER) + |
| 507 | sizeof(EFI_NVIDIA_GPU_EVENT_INFO); |
| 508 | if (info_header->InfoSize < required_size) { |
| 509 | cper_print_log( |
| 510 | "Error: GPU event info size too small: got %d, need %zu\n", |
| 511 | info_header->InfoSize, required_size); |
| 512 | return; |
| 513 | } |
| 514 | |
| Ed Tanous | 6c5d2f3 | 2026-02-02 15:18:15 -0800 | [diff] [blame] | 515 | EFI_NVIDIA_GPU_EVENT_INFO *info = |
| Daniel Osawa | 51c1813 | 2025-11-26 09:21:20 -0800 | [diff] [blame] | 516 | (EFI_NVIDIA_GPU_EVENT_INFO *)get_event_info(event_header); |
| Ed Tanous | 6c5d2f3 | 2026-02-02 15:18:15 -0800 | [diff] [blame] | 517 | if (info == NULL) { |
| Daniel Osawa | 51c1813 | 2025-11-26 09:21:20 -0800 | [diff] [blame] | 518 | return; |
| 519 | } |
| 520 | |
| Ed Tanous | 6c5d2f3 | 2026-02-02 15:18:15 -0800 | [diff] [blame] | 521 | add_uint(event_info_ir, "EventOriginator", info->EventOriginator); |
| 522 | add_uint(event_info_ir, "SourcePartition", info->SourcePartition); |
| 523 | add_uint(event_info_ir, "SourceSubPartition", info->SourceSubPartition); |
| 524 | add_uint(event_info_ir, "Pdi", info->Pdi); |
| Daniel Osawa | 51c1813 | 2025-11-26 09:21:20 -0800 | [diff] [blame] | 525 | } |
| 526 | |
| 527 | // Converts GPU-specific event info from JSON IR to CPER binary format. |
| 528 | // Writes version, size, event originator, partitions, and PDI. |
| 529 | // Returns the number of bytes written. |
| 530 | /* |
| 531 | * ┌─────────────────────────────────────────────────────────────────────────┐ |
| 532 | * │ EFI_NVIDIA_GPU_EVENT_INFO (16 bytes) │ |
| 533 | * ├─────────────────────────────────────────────────────────────────────────┤ |
| 534 | * │ UINT8 EventOriginator │ |
| 535 | * │ UINT16 SourcePartition │ |
| 536 | * │ UINT16 SourceSubPartition │ |
| 537 | * │ UINT64 Pdi │ |
| 538 | * └─────────────────────────────────────────────────────────────────────────┘ |
| 539 | */ |
| 540 | static size_t parse_gpu_info_to_bin(json_object *event_info_ir, FILE *out) |
| 541 | { |
| 542 | EFI_NVIDIA_GPU_EVENT_INFO gpu_event_info = { 0 }; |
| 543 | |
| 544 | gpu_event_info.EventOriginator = json_object_get_uint64( |
| 545 | json_object_object_get(event_info_ir, "EventOriginator")); |
| 546 | gpu_event_info.SourcePartition = json_object_get_int64( |
| 547 | json_object_object_get(event_info_ir, "SourcePartition")); |
| 548 | gpu_event_info.SourceSubPartition = json_object_get_int64( |
| 549 | json_object_object_get(event_info_ir, "SourceSubPartition")); |
| 550 | gpu_event_info.Pdi = json_object_get_uint64( |
| 551 | json_object_object_get(event_info_ir, "Pdi")); |
| 552 | |
| 553 | return fwrite(&gpu_event_info, 1, sizeof(EFI_NVIDIA_GPU_EVENT_INFO), |
| 554 | out); |
| 555 | } |
| 556 | |
| 557 | // GPU Context Data Handlers |
| 558 | |
| 559 | // Parses GPU Initialization Metadata (0x1000) context data to JSON IR. |
| 560 | // Extracts device info, firmware versions, PCI info, etc. |
| 561 | /* |
| 562 | * Example JSON IR "data" output (numeric fields in decimal): |
| 563 | * { |
| 564 | * "deviceName": "NVIDIA H100 80GB HBM3", |
| 565 | * "firmwareVersion": "96.00.5B.00.01", |
| 566 | * "pfDriverMicrocodeVersion": "535.183.01", |
| 567 | * "pfDriverVersion": "535.183.01", |
| 568 | * "vfDriverVersion": "535.183.01", |
| 569 | * "configuration": 123456789012345, |
| 570 | * "pdi": 9876543210987654321, |
| 571 | * "architectureId": 2684420096, |
| 572 | * "hardwareInfoType": 0, |
| 573 | * "pciInfo": { |
| 574 | * "class": 3, |
| 575 | * "subclass": 2, |
| 576 | * "rev": 161, |
| 577 | * "vendorId": 4318, |
| 578 | * "deviceId": 8711, |
| 579 | * "subsystemVendorId": 4318, |
| 580 | * "subsystemId": 5145, |
| 581 | * "bar0Start": 3758096384, |
| 582 | * "bar0Size": 16777216, |
| 583 | * "bar1Start": 2415919104, |
| 584 | * "bar1Size": 536870912, |
| 585 | * "bar2Start": 2416128000, |
| 586 | * "bar2Size": 33554432 |
| 587 | * } |
| 588 | * } |
| 589 | */ |
| 590 | static void parse_gpu_ctx_metadata_to_ir(EFI_NVIDIA_EVENT_HEADER *event_header, |
| 591 | size_t total_event_size, |
| 592 | size_t ctx_instance, |
| 593 | json_object *output_data_ir) |
| 594 | { |
| 595 | EFI_NVIDIA_EVENT_CTX_HEADER *ctx = get_event_context_n( |
| 596 | event_header, ctx_instance, total_event_size); |
| 597 | if (ctx == NULL) { |
| 598 | return; |
| 599 | } |
| 600 | |
| 601 | EFI_NVIDIA_GPU_CTX_METADATA *metadata = |
| 602 | (EFI_NVIDIA_GPU_CTX_METADATA *)ctx->Data; |
| 603 | |
| 604 | // String fields - use json_object_new_string to stop at first null (no null padding in JSON) |
| Ed Tanous | 6c5d2f3 | 2026-02-02 15:18:15 -0800 | [diff] [blame] | 605 | add_string(output_data_ir, "deviceName", metadata->DeviceName); |
| 606 | add_string(output_data_ir, "firmwareVersion", |
| 607 | metadata->FirmwareVersion); |
| 608 | add_string(output_data_ir, "pfDriverMicrocodeVersion", |
| 609 | metadata->PfDriverMicrocodeVersion); |
| 610 | add_string(output_data_ir, "pfDriverVersion", |
| 611 | metadata->PfDriverVersion); |
| 612 | add_string(output_data_ir, "vfDriverVersion", |
| 613 | metadata->VfDriverVersion); |
| Daniel Osawa | 51c1813 | 2025-11-26 09:21:20 -0800 | [diff] [blame] | 614 | |
| 615 | // Numeric fields |
| Ed Tanous | 6c5d2f3 | 2026-02-02 15:18:15 -0800 | [diff] [blame] | 616 | add_uint(output_data_ir, "configuration", metadata->Configuration); |
| 617 | add_uint(output_data_ir, "pdi", metadata->Pdi); |
| 618 | add_int(output_data_ir, "architectureId", metadata->ArchitectureId); |
| 619 | add_int(output_data_ir, "hardwareInfoType", metadata->HardwareInfoType); |
| Daniel Osawa | 51c1813 | 2025-11-26 09:21:20 -0800 | [diff] [blame] | 620 | |
| 621 | // PCI Info (if HardwareInfoType == 0) |
| 622 | if (metadata->HardwareInfoType == 0) { |
| 623 | json_object *pci_info = json_object_new_object(); |
| Ed Tanous | 6c5d2f3 | 2026-02-02 15:18:15 -0800 | [diff] [blame] | 624 | add_int(pci_info, "class", metadata->PciInfo.Class); |
| 625 | add_int(pci_info, "subclass", metadata->PciInfo.Subclass); |
| 626 | add_int(pci_info, "rev", metadata->PciInfo.Rev); |
| 627 | add_int(pci_info, "vendorId", metadata->PciInfo.VendorId); |
| 628 | add_int(pci_info, "deviceId", metadata->PciInfo.DeviceId); |
| 629 | add_int(pci_info, "subsystemVendorId", |
| 630 | metadata->PciInfo.SubsystemVendorId); |
| 631 | add_int(pci_info, "subsystemId", metadata->PciInfo.SubsystemId); |
| 632 | add_uint(pci_info, "bar0Start", metadata->PciInfo.Bar0Start); |
| 633 | add_uint(pci_info, "bar0Size", metadata->PciInfo.Bar0Size); |
| 634 | add_uint(pci_info, "bar1Start", metadata->PciInfo.Bar1Start); |
| 635 | add_uint(pci_info, "bar1Size", metadata->PciInfo.Bar1Size); |
| 636 | add_uint(pci_info, "bar2Start", metadata->PciInfo.Bar2Start); |
| 637 | add_uint(pci_info, "bar2Size", metadata->PciInfo.Bar2Size); |
| Daniel Osawa | 51c1813 | 2025-11-26 09:21:20 -0800 | [diff] [blame] | 638 | json_object_object_add(output_data_ir, "pciInfo", pci_info); |
| 639 | } |
| 640 | } |
| 641 | |
| 642 | // Converts GPU Initialization Metadata from JSON IR to binary. |
| 643 | // Returns the number of bytes written. |
| 644 | /* |
| 645 | * ┌─────────────────────────────────────────────────────────────────────────┐ |
| 646 | * │ EFI_NVIDIA_GPU_CTX_METADATA (192 bytes) │ |
| 647 | * ├─────────────────────────────────────────────────────────────────────────┤ |
| 648 | * │ CHAR8 DeviceName[48] │ |
| 649 | * │ CHAR8 FirmwareVersion[16] │ |
| 650 | * │ CHAR8 PfDriverMicrocodeVersion[16] │ |
| 651 | * │ CHAR8 PfDriverVersion[16] │ |
| 652 | * │ CHAR8 VfDriverVersion[16] │ |
| 653 | * │ UINT64 Configuration │ |
| 654 | * │ UINT64 Pdi │ |
| 655 | * │ UINT32 ArchitectureId │ |
| 656 | * │ UINT8 HardwareInfoType ← 0=PCI Info, 1-255=Reserved │ |
| 657 | * │ union (59 bytes): │ |
| 658 | * │ EFI_NVIDIA_GPU_CTX_METADATA_PCI_INFO PciInfo (when type = 0): │ |
| 659 | * │ UINT8 Class, Subclass, Rev │ |
| 660 | * │ UINT16 VendorId, DeviceId, SubsystemVendorId, SubsystemId │ |
| 661 | * │ UINT64 Bar0Start, Bar0Size, Bar1Start, Bar1Size, Bar2Start, ... │ |
| 662 | * │ UINT8 Reserved[59] ← for future hardware info types │ |
| 663 | * └─────────────────────────────────────────────────────────────────────────┘ |
| 664 | */ |
| 665 | static size_t parse_gpu_ctx_metadata_to_bin(json_object *event_ir, |
| 666 | size_t ctx_instance, |
| 667 | FILE *output_file_stream) |
| 668 | { |
| 669 | json_object *event_context_data_ir = |
| 670 | get_event_context_n_data_ir(event_ir, ctx_instance); |
| 671 | if (event_context_data_ir == NULL) { |
| 672 | return 0; |
| 673 | } |
| 674 | |
| 675 | EFI_NVIDIA_GPU_CTX_METADATA metadata = { 0 }; |
| 676 | |
| 677 | // String fields - use memcpy with strnlen to avoid strncpy truncation warnings |
| 678 | const char *str; |
| 679 | str = json_object_get_string( |
| 680 | json_object_object_get(event_context_data_ir, "deviceName")); |
| 681 | if (str) { |
| 682 | memcpy(metadata.DeviceName, str, |
| 683 | strnlen(str, sizeof(metadata.DeviceName))); |
| 684 | } |
| 685 | |
| 686 | str = json_object_get_string(json_object_object_get( |
| 687 | event_context_data_ir, "firmwareVersion")); |
| 688 | if (str) { |
| 689 | memcpy(metadata.FirmwareVersion, str, |
| 690 | strnlen(str, sizeof(metadata.FirmwareVersion))); |
| 691 | } |
| 692 | |
| 693 | str = json_object_get_string(json_object_object_get( |
| 694 | event_context_data_ir, "pfDriverMicrocodeVersion")); |
| 695 | if (str) { |
| 696 | memcpy(metadata.PfDriverMicrocodeVersion, str, |
| 697 | strnlen(str, sizeof(metadata.PfDriverMicrocodeVersion))); |
| 698 | } |
| 699 | |
| 700 | str = json_object_get_string(json_object_object_get( |
| 701 | event_context_data_ir, "pfDriverVersion")); |
| 702 | if (str) { |
| 703 | memcpy(metadata.PfDriverVersion, str, |
| 704 | strnlen(str, sizeof(metadata.PfDriverVersion))); |
| 705 | } |
| 706 | |
| 707 | str = json_object_get_string(json_object_object_get( |
| 708 | event_context_data_ir, "vfDriverVersion")); |
| 709 | if (str) { |
| 710 | memcpy(metadata.VfDriverVersion, str, |
| 711 | strnlen(str, sizeof(metadata.VfDriverVersion))); |
| 712 | } |
| 713 | |
| 714 | // Numeric fields |
| 715 | metadata.Configuration = json_object_get_uint64( |
| 716 | json_object_object_get(event_context_data_ir, "configuration")); |
| 717 | metadata.Pdi = json_object_get_uint64( |
| 718 | json_object_object_get(event_context_data_ir, "pdi")); |
| 719 | metadata.ArchitectureId = json_object_get_int64(json_object_object_get( |
| 720 | event_context_data_ir, "architectureId")); |
| 721 | metadata.HardwareInfoType = json_object_get_int64( |
| 722 | json_object_object_get(event_context_data_ir, |
| 723 | "hardwareInfoType")); |
| 724 | |
| 725 | // PCI Info (if present and HardwareInfoType == 0) |
| 726 | json_object *pci_info = |
| 727 | json_object_object_get(event_context_data_ir, "pciInfo"); |
| 728 | if (pci_info != NULL && metadata.HardwareInfoType == 0) { |
| 729 | metadata.PciInfo.Class = json_object_get_int64( |
| 730 | json_object_object_get(pci_info, "class")); |
| 731 | metadata.PciInfo.Subclass = json_object_get_int64( |
| 732 | json_object_object_get(pci_info, "subclass")); |
| 733 | metadata.PciInfo.Rev = json_object_get_int64( |
| 734 | json_object_object_get(pci_info, "rev")); |
| 735 | metadata.PciInfo.VendorId = json_object_get_int64( |
| 736 | json_object_object_get(pci_info, "vendorId")); |
| 737 | metadata.PciInfo.DeviceId = json_object_get_int64( |
| 738 | json_object_object_get(pci_info, "deviceId")); |
| 739 | metadata.PciInfo.SubsystemVendorId = json_object_get_int64( |
| 740 | json_object_object_get(pci_info, "subsystemVendorId")); |
| 741 | metadata.PciInfo.SubsystemId = json_object_get_int64( |
| 742 | json_object_object_get(pci_info, "subsystemId")); |
| 743 | metadata.PciInfo.Bar0Start = json_object_get_uint64( |
| 744 | json_object_object_get(pci_info, "bar0Start")); |
| 745 | metadata.PciInfo.Bar0Size = json_object_get_uint64( |
| 746 | json_object_object_get(pci_info, "bar0Size")); |
| 747 | metadata.PciInfo.Bar1Start = json_object_get_uint64( |
| 748 | json_object_object_get(pci_info, "bar1Start")); |
| 749 | metadata.PciInfo.Bar1Size = json_object_get_uint64( |
| 750 | json_object_object_get(pci_info, "bar1Size")); |
| 751 | metadata.PciInfo.Bar2Start = json_object_get_uint64( |
| 752 | json_object_object_get(pci_info, "bar2Start")); |
| 753 | metadata.PciInfo.Bar2Size = json_object_get_uint64( |
| 754 | json_object_object_get(pci_info, "bar2Size")); |
| 755 | } |
| 756 | |
| 757 | return fwrite(&metadata, 1, sizeof(EFI_NVIDIA_GPU_CTX_METADATA), |
| 758 | output_file_stream); |
| 759 | } |
| 760 | |
| 761 | // Parses GPU Event Legacy Xid (0x1001) context data to JSON IR. |
| 762 | // Extracts Xid code and message string. |
| 763 | /* |
| 764 | * Example JSON IR "data" output: |
| 765 | * { |
| 766 | * "xidCode": 79, |
| 767 | * "message": "GPU has fallen off the bus" |
| 768 | * } |
| 769 | */ |
| 770 | static void |
| 771 | parse_gpu_ctx_legacy_xid_to_ir(EFI_NVIDIA_EVENT_HEADER *event_header, |
| 772 | size_t total_event_size, size_t ctx_instance, |
| 773 | json_object *output_data_ir) |
| 774 | { |
| 775 | EFI_NVIDIA_EVENT_CTX_HEADER *ctx = get_event_context_n( |
| 776 | event_header, ctx_instance, total_event_size); |
| 777 | if (ctx == NULL) { |
| 778 | return; |
| 779 | } |
| 780 | |
| 781 | EFI_NVIDIA_GPU_CTX_LEGACY_XID *xid = |
| 782 | (EFI_NVIDIA_GPU_CTX_LEGACY_XID *)ctx->Data; |
| 783 | |
| Ed Tanous | 6c5d2f3 | 2026-02-02 15:18:15 -0800 | [diff] [blame] | 784 | add_int(output_data_ir, "xidCode", xid->XidCode); |
| Daniel Osawa | 51c1813 | 2025-11-26 09:21:20 -0800 | [diff] [blame] | 785 | // Use json_object_new_string to stop at first null terminator (no null padding in JSON) |
| Ed Tanous | 6c5d2f3 | 2026-02-02 15:18:15 -0800 | [diff] [blame] | 786 | add_string(output_data_ir, "message", xid->Message); |
| Daniel Osawa | 51c1813 | 2025-11-26 09:21:20 -0800 | [diff] [blame] | 787 | } |
| 788 | |
| 789 | // Converts GPU Event Legacy Xid from JSON IR to binary. |
| 790 | // Returns the number of bytes written. |
| 791 | /* |
| 792 | * ┌─────────────────────────────────────────────────────────────────────────┐ |
| 793 | * │ EFI_NVIDIA_GPU_CTX_LEGACY_XID (240 bytes) │ |
| 794 | * ├─────────────────────────────────────────────────────────────────────────┤ |
| 795 | * │ UINT32 XidCode ← Legacy Xid error code │ |
| 796 | * │ CHAR8 Message[236] ← NUL-terminated ASCII event message │ |
| 797 | * └─────────────────────────────────────────────────────────────────────────┘ |
| 798 | */ |
| 799 | static size_t parse_gpu_ctx_legacy_xid_to_bin(json_object *event_ir, |
| 800 | size_t ctx_instance, |
| 801 | FILE *output_file_stream) |
| 802 | { |
| 803 | json_object *event_context_data_ir = |
| 804 | get_event_context_n_data_ir(event_ir, ctx_instance); |
| 805 | if (event_context_data_ir == NULL) { |
| 806 | return 0; |
| 807 | } |
| 808 | |
| 809 | EFI_NVIDIA_GPU_CTX_LEGACY_XID xid = { 0 }; |
| 810 | |
| 811 | xid.XidCode = json_object_get_int64( |
| 812 | json_object_object_get(event_context_data_ir, "xidCode")); |
| 813 | |
| 814 | const char *message = json_object_get_string( |
| 815 | json_object_object_get(event_context_data_ir, "message")); |
| 816 | if (message) { |
| 817 | memcpy(xid.Message, message, |
| 818 | strnlen(message, sizeof(xid.Message))); |
| 819 | } |
| 820 | |
| 821 | return fwrite(&xid, 1, sizeof(EFI_NVIDIA_GPU_CTX_LEGACY_XID), |
| 822 | output_file_stream); |
| 823 | } |
| 824 | |
| 825 | // Parses GPU Recommended Actions (0x1002) context data to JSON IR. |
| 826 | // Extracts flags, recovery action, and diagnostic flow code. |
| 827 | /* |
| 828 | * Example JSON IR "data" output: |
| 829 | * { |
| 830 | * "flags": 3, |
| 831 | * "recoveryAction": 2, |
| 832 | * "diagnosticFlow": 0 |
| 833 | * } |
| 834 | */ |
| 835 | static void parse_gpu_ctx_recommended_actions_to_ir( |
| 836 | EFI_NVIDIA_EVENT_HEADER *event_header, size_t total_event_size, |
| 837 | size_t ctx_instance, json_object *output_data_ir) |
| 838 | { |
| 839 | EFI_NVIDIA_EVENT_CTX_HEADER *ctx = get_event_context_n( |
| 840 | event_header, ctx_instance, total_event_size); |
| 841 | if (ctx == NULL) { |
| 842 | return; |
| 843 | } |
| 844 | |
| 845 | EFI_NVIDIA_GPU_CTX_RECOMMENDED_ACTIONS *actions = |
| 846 | (EFI_NVIDIA_GPU_CTX_RECOMMENDED_ACTIONS *)ctx->Data; |
| 847 | |
| Ed Tanous | 6c5d2f3 | 2026-02-02 15:18:15 -0800 | [diff] [blame] | 848 | add_int(output_data_ir, "flags", actions->Flags); |
| 849 | add_int(output_data_ir, "recoveryAction", actions->RecoveryAction); |
| 850 | add_int(output_data_ir, "diagnosticFlow", actions->DiagnosticFlow); |
| Daniel Osawa | 51c1813 | 2025-11-26 09:21:20 -0800 | [diff] [blame] | 851 | } |
| 852 | |
| 853 | // Converts GPU Recommended Actions from JSON IR to binary. |
| 854 | // Returns the number of bytes written. |
| 855 | /* |
| 856 | * ┌─────────────────────────────────────────────────────────────────────────┐ |
| 857 | * │ EFI_NVIDIA_GPU_CTX_RECOMMENDED_ACTIONS (16 bytes) │ |
| 858 | * ├─────────────────────────────────────────────────────────────────────────┤ |
| 859 | * │ UINT8 Flags │ |
| 860 | * │ UINT8 Reserved1[3] ← padding │ |
| 861 | * │ UINT16 RecoveryAction │ |
| 862 | * │ UINT16 DiagnosticFlow ← 0=Unspecified │ |
| 863 | * │ UINT64 Reserved2 ← padding to 16-byte alignment │ |
| 864 | * └─────────────────────────────────────────────────────────────────────────┘ |
| 865 | */ |
| 866 | static size_t parse_gpu_ctx_recommended_actions_to_bin(json_object *event_ir, |
| 867 | size_t ctx_instance, |
| 868 | FILE *output_file_stream) |
| 869 | { |
| 870 | json_object *event_context_data_ir = |
| 871 | get_event_context_n_data_ir(event_ir, ctx_instance); |
| 872 | if (event_context_data_ir == NULL) { |
| 873 | return 0; |
| 874 | } |
| 875 | |
| 876 | EFI_NVIDIA_GPU_CTX_RECOMMENDED_ACTIONS actions = { 0 }; |
| 877 | |
| 878 | actions.Flags = json_object_get_int64( |
| 879 | json_object_object_get(event_context_data_ir, "flags")); |
| 880 | actions.RecoveryAction = json_object_get_int64(json_object_object_get( |
| 881 | event_context_data_ir, "recoveryAction")); |
| 882 | actions.DiagnosticFlow = json_object_get_int64(json_object_object_get( |
| 883 | event_context_data_ir, "diagnosticFlow")); |
| 884 | |
| 885 | return fwrite(&actions, 1, |
| 886 | sizeof(EFI_NVIDIA_GPU_CTX_RECOMMENDED_ACTIONS), |
| 887 | output_file_stream); |
| 888 | } |
| 889 | |
| 890 | // Parses event context data type 0: Opaque data. |
| 891 | // Extracts the opaque data from the context data. |
| 892 | /* |
| 893 | * Example JSON IR "data" output: |
| 894 | * { |
| 895 | * "data": "deadbeefcafebabe..." |
| 896 | * } |
| 897 | */ |
| 898 | static void parse_common_ctx_type0_to_ir(EFI_NVIDIA_EVENT_HEADER *event_header, |
| 899 | size_t total_event_size, |
| 900 | size_t ctx_instance, |
| 901 | json_object *output_data_ir) |
| 902 | { |
| 903 | // Get the nth context |
| 904 | EFI_NVIDIA_EVENT_CTX_HEADER *ctx = get_event_context_n( |
| 905 | event_header, ctx_instance, total_event_size); |
| 906 | if (ctx == NULL) { |
| 907 | cper_print_log( |
| 908 | "Error: Failed to get context %zu for opaque data\n", |
| 909 | ctx_instance); |
| 910 | return; |
| 911 | } |
| 912 | |
| 913 | // Verify the context doesn't extend past the event boundary |
| 914 | UINT8 *ctx_start = (UINT8 *)ctx; |
| 915 | UINT8 *event_start = (UINT8 *)event_header; |
| 916 | size_t ctx_offset = ctx_start - event_start; |
| 917 | size_t ctx_total_size = |
| 918 | sizeof(EFI_NVIDIA_EVENT_CTX_HEADER) + ctx->DataSize; |
| 919 | |
| 920 | if (ctx_offset + ctx_total_size > total_event_size) { |
| 921 | cper_print_log( |
| 922 | "Error: Opaque context %zu extends past event boundary\n", |
| 923 | ctx_instance); |
| 924 | return; |
| 925 | } |
| 926 | |
| 927 | // The opaque data starts right after the context header |
| 928 | UINT8 *opaque_data = (UINT8 *)ctx + sizeof(EFI_NVIDIA_EVENT_CTX_HEADER); |
| 929 | UINT32 data_size = ctx->DataSize; |
| 930 | |
| 931 | // Add the hex-encoded opaque data to JSON output |
| 932 | add_bytes_hex(output_data_ir, "data", opaque_data, data_size); |
| 933 | } |
| 934 | // Converts opaque context data from JSON IR to binary. |
| 935 | // Returns the number of bytes written. |
| 936 | /* |
| 937 | * ┌─────────────────────────────────────────────────────────────────────────┐ |
| 938 | * │ OPAQUE DATA (Context Data Type 0x0000) (variable bytes) │ |
| 939 | * ├─────────────────────────────────────────────────────────────────────────┤ |
| 940 | * │ UINT8 Data[] ← Device-specific binary data │ |
| 941 | * └─────────────────────────────────────────────────────────────────────────┘ |
| 942 | */ |
| 943 | static size_t parse_common_ctx_type0_to_bin(json_object *event_ir, |
| 944 | size_t ctx_instance, |
| 945 | FILE *output_file_stream) |
| 946 | { |
| 947 | // Get the context data using the helper function |
| 948 | json_object *event_context_data_ir = |
| 949 | get_event_context_n_data_ir(event_ir, ctx_instance); |
| 950 | |
| 951 | if (event_context_data_ir == NULL) { |
| 952 | cper_print_log( |
| 953 | "Error: Failed to get context %zu data for opaque conversion\n", |
| 954 | ctx_instance); |
| 955 | return 0; |
| 956 | } |
| 957 | |
| 958 | // Decode the hex data from the "data" field |
| 959 | size_t decoded_len = 0; |
| 960 | UINT8 *decoded = |
| 961 | get_bytes_hex(event_context_data_ir, "data", &decoded_len); |
| 962 | if (decoded == NULL) { |
| 963 | cper_print_log("Error: hex decode of opaque data failed\n"); |
| 964 | return 0; |
| 965 | } |
| 966 | |
| 967 | // Write the decoded binary data to the output stream |
| 968 | size_t bytes_written = |
| 969 | fwrite(decoded, 1, decoded_len, output_file_stream); |
| 970 | free(decoded); |
| 971 | |
| 972 | return bytes_written; |
| 973 | } |
| 974 | // Parses event context data type 1: 64-bit key/value pairs. |
| 975 | // Extracts an array of UINT64 key-value pairs from the context data. |
| 976 | /* |
| 977 | * Example JSON IR "data" output: |
| 978 | * { |
| 979 | * "keyValArray64": [ |
| 980 | * { "key64": 1234567890123456789, "val64": 9876543210987654321 }, |
| 981 | * { "key64": 5555555555555555555, "val64": 1111111111111111111 } |
| 982 | * ] |
| 983 | * } |
| 984 | */ |
| 985 | static void parse_common_ctx_type1_to_ir(EFI_NVIDIA_EVENT_HEADER *event_header, |
| 986 | size_t total_event_size, |
| 987 | size_t ctx_instance, |
| 988 | json_object *output_data_ir) |
| 989 | { |
| 990 | EFI_NVIDIA_EVENT_CTX_HEADER *ctx = get_event_context_n( |
| 991 | event_header, ctx_instance, total_event_size); |
| 992 | if (ctx == NULL) { |
| 993 | return; |
| 994 | } |
| 995 | |
| 996 | // Verify the context data doesn't extend past the event boundary |
| 997 | UINT8 *event_end = (UINT8 *)event_header + total_event_size; |
| 998 | UINT8 *data_end = (UINT8 *)ctx->Data + ctx->DataSize; |
| 999 | if (data_end > event_end) { |
| 1000 | cper_print_log( |
| 1001 | "Error: Type 1 context %zu extends past event boundary\n", |
| 1002 | ctx_instance); |
| 1003 | return; |
| 1004 | } |
| 1005 | |
| 1006 | EFI_NVIDIA_EVENT_CTX_DATA_TYPE_1 *data_type1 = |
| 1007 | (EFI_NVIDIA_EVENT_CTX_DATA_TYPE_1 *)ctx->Data; |
| 1008 | UINT8 num_elements = |
| 1009 | ctx->DataSize / sizeof(EFI_NVIDIA_EVENT_CTX_DATA_TYPE_1); |
| 1010 | |
| 1011 | json_object *kv64arr = json_object_new_array(); |
| 1012 | for (int i = 0; i < num_elements; i++, data_type1++) { |
| 1013 | json_object *kv = NULL; |
| 1014 | kv = json_object_new_object(); |
| Ed Tanous | 6c5d2f3 | 2026-02-02 15:18:15 -0800 | [diff] [blame] | 1015 | add_uint(kv, "key64", data_type1->Key); |
| 1016 | add_uint(kv, "val64", data_type1->Value); |
| Daniel Osawa | 51c1813 | 2025-11-26 09:21:20 -0800 | [diff] [blame] | 1017 | |
| 1018 | json_object_array_add(kv64arr, kv); |
| 1019 | } |
| 1020 | json_object_object_add(output_data_ir, "keyValArray64", kv64arr); |
| 1021 | } |
| 1022 | // Converts event context data type 1 from JSON IR to CPER binary format. |
| 1023 | // Writes an array of 64-bit key/value pairs to the output stream. |
| 1024 | // Returns the total number of bytes written. |
| 1025 | /* |
| 1026 | * ┌─────────────────────────────────────────────────────────────────────────┐ |
| 1027 | * │ EFI_NVIDIA_EVENT_CTX_DATA_TYPE_1 (Context Data Type 0x0001) (16 bytes) │ |
| 1028 | * ├─────────────────────────────────────────────────────────────────────────┤ |
| 1029 | * │ UINT64 Key ← 64-bit key │ |
| 1030 | * │ UINT64 Value ← 64-bit value │ |
| 1031 | * └─────────────────────────────────────────────────────────────────────────┘ |
| 1032 | * Note: This structure repeats for each key-value pair in the array |
| 1033 | */ |
| 1034 | static size_t parse_common_ctx_type1_to_bin(json_object *event_ir, |
| 1035 | size_t ctx_instance, |
| 1036 | FILE *output_file_stream) |
| 1037 | { |
| 1038 | json_object *event_context_data_ir = |
| 1039 | get_event_context_n_data_ir(event_ir, ctx_instance); |
| 1040 | if (event_context_data_ir == NULL) { |
| 1041 | return 0; |
| 1042 | } |
| 1043 | |
| 1044 | // Get the kv64-array that was created by parse_common_ctx_type1_to_ir |
| 1045 | json_object *kv64arr = |
| 1046 | json_object_object_get(event_context_data_ir, "keyValArray64"); |
| 1047 | if (kv64arr == NULL) { |
| 1048 | return 0; |
| 1049 | } |
| 1050 | |
| 1051 | size_t array_len = json_object_array_length(kv64arr); |
| 1052 | size_t bytes_written = 0; |
| 1053 | |
| 1054 | // Iterate through each key-value pair in the array |
| 1055 | for (size_t i = 0; i < array_len; i++) { |
| 1056 | json_object *kv = json_object_array_get_idx(kv64arr, i); |
| 1057 | if (kv == NULL) { |
| 1058 | continue; |
| 1059 | } |
| 1060 | |
| 1061 | // Create and populate the binary structure |
| 1062 | EFI_NVIDIA_EVENT_CTX_DATA_TYPE_1 data_type1 = { 0 }; |
| 1063 | data_type1.Key = json_object_get_uint64( |
| 1064 | json_object_object_get(kv, "key64")); |
| 1065 | data_type1.Value = json_object_get_uint64( |
| 1066 | json_object_object_get(kv, "val64")); |
| 1067 | |
| 1068 | // Write to binary file |
| 1069 | bytes_written += |
| 1070 | fwrite(&data_type1, 1, |
| 1071 | sizeof(EFI_NVIDIA_EVENT_CTX_DATA_TYPE_1), |
| 1072 | output_file_stream); |
| 1073 | } |
| 1074 | return bytes_written; |
| 1075 | } |
| 1076 | // Parses event context data type 2: 32-bit key/value pairs. |
| 1077 | // Extracts an array of UINT32 key-value pairs from the context data. |
| 1078 | /* |
| 1079 | * Example JSON IR "data" output: |
| 1080 | * { |
| 1081 | * "keyValArray32": [ |
| 1082 | * { "key32": 123456789, "val32": 987654321 }, |
| 1083 | * { "key32": 555555555, "val32": 111111111 } |
| 1084 | * ] |
| 1085 | * } |
| 1086 | */ |
| 1087 | static void parse_common_ctx_type2_to_ir(EFI_NVIDIA_EVENT_HEADER *event_header, |
| 1088 | size_t total_event_size, |
| 1089 | size_t ctx_instance, |
| 1090 | json_object *output_data_ir) |
| 1091 | { |
| 1092 | EFI_NVIDIA_EVENT_CTX_HEADER *ctx = get_event_context_n( |
| 1093 | event_header, ctx_instance, total_event_size); |
| 1094 | if (ctx == NULL) { |
| 1095 | return; |
| 1096 | } |
| 1097 | |
| 1098 | // Verify the context data doesn't extend past the event boundary |
| 1099 | UINT8 *event_end = (UINT8 *)event_header + total_event_size; |
| 1100 | UINT8 *data_end = (UINT8 *)ctx->Data + ctx->DataSize; |
| 1101 | if (data_end > event_end) { |
| 1102 | cper_print_log( |
| 1103 | "Error: Type 2 context %zu extends past event boundary\n", |
| 1104 | ctx_instance); |
| 1105 | return; |
| 1106 | } |
| 1107 | |
| 1108 | EFI_NVIDIA_EVENT_CTX_DATA_TYPE_2 *data_type2 = |
| 1109 | (EFI_NVIDIA_EVENT_CTX_DATA_TYPE_2 *)ctx->Data; |
| 1110 | UINT8 num_elements = |
| 1111 | ctx->DataSize / sizeof(EFI_NVIDIA_EVENT_CTX_DATA_TYPE_2); |
| 1112 | |
| 1113 | json_object *kv32arr = json_object_new_array(); |
| 1114 | for (int i = 0; i < num_elements; i++, data_type2++) { |
| 1115 | json_object *kv = NULL; |
| 1116 | kv = json_object_new_object(); |
| Ed Tanous | 6c5d2f3 | 2026-02-02 15:18:15 -0800 | [diff] [blame] | 1117 | add_uint(kv, "key32", data_type2->Key); |
| 1118 | add_uint(kv, "val32", data_type2->Value); |
| Daniel Osawa | 51c1813 | 2025-11-26 09:21:20 -0800 | [diff] [blame] | 1119 | |
| 1120 | json_object_array_add(kv32arr, kv); |
| 1121 | } |
| 1122 | json_object_object_add(output_data_ir, "keyValArray32", kv32arr); |
| 1123 | } |
| 1124 | // Converts event context data type 2 from JSON IR to CPER binary format. |
| 1125 | // Writes an array of 32-bit key/value pairs to the output stream. |
| 1126 | // Returns the total number of bytes written. |
| 1127 | /* |
| 1128 | * ┌─────────────────────────────────────────────────────────────────────────┐ |
| 1129 | * │ EFI_NVIDIA_EVENT_CTX_DATA_TYPE_2 (Context Data Type 0x0002) (8 bytes) │ |
| 1130 | * ├─────────────────────────────────────────────────────────────────────────┤ |
| 1131 | * │ UINT32 Key ← 32-bit key │ |
| 1132 | * │ UINT32 Value ← 32-bit value │ |
| 1133 | * └─────────────────────────────────────────────────────────────────────────┘ |
| 1134 | * Note: This structure repeats for each key-value pair in the array |
| 1135 | */ |
| 1136 | static size_t parse_common_ctx_type2_to_bin(json_object *event_ir, |
| 1137 | size_t ctx_instance, |
| 1138 | FILE *output_file_stream) |
| 1139 | { |
| 1140 | json_object *event_context_data_ir = |
| 1141 | get_event_context_n_data_ir(event_ir, ctx_instance); |
| 1142 | if (event_context_data_ir == NULL) { |
| 1143 | return 0; |
| 1144 | } |
| 1145 | |
| 1146 | // Get the kv32-array that was created by parse_common_ctx_type2_to_ir |
| 1147 | json_object *kv32arr = |
| 1148 | json_object_object_get(event_context_data_ir, "keyValArray32"); |
| 1149 | if (kv32arr == NULL) { |
| 1150 | return 0; |
| 1151 | } |
| 1152 | |
| 1153 | size_t array_len = json_object_array_length(kv32arr); |
| 1154 | size_t bytes_written = 0; |
| 1155 | |
| 1156 | // Iterate through each key-value pair in the array |
| 1157 | for (size_t i = 0; i < array_len; i++) { |
| 1158 | json_object *kv = json_object_array_get_idx(kv32arr, i); |
| 1159 | if (kv == NULL) { |
| 1160 | continue; |
| 1161 | } |
| 1162 | |
| 1163 | // Create and populate the binary structure |
| 1164 | EFI_NVIDIA_EVENT_CTX_DATA_TYPE_2 data_type2 = { 0 }; |
| 1165 | data_type2.Key = json_object_get_uint64( |
| 1166 | json_object_object_get(kv, "key32")); |
| 1167 | data_type2.Value = json_object_get_uint64( |
| 1168 | json_object_object_get(kv, "val32")); |
| 1169 | |
| 1170 | // Write to binary file |
| 1171 | bytes_written += |
| 1172 | fwrite(&data_type2, 1, |
| 1173 | sizeof(EFI_NVIDIA_EVENT_CTX_DATA_TYPE_2), |
| 1174 | output_file_stream); |
| 1175 | } |
| 1176 | return bytes_written; |
| 1177 | } |
| 1178 | // Parses event context data type 3: 64-bit values only. |
| 1179 | // Extracts an array of UINT64 values (no keys) from the context data. |
| 1180 | /* |
| 1181 | * Example JSON IR "data" output: |
| 1182 | * { |
| 1183 | * "valArray64": [ |
| 1184 | * { "val64": 1234567890123456789 }, |
| 1185 | * { "val64": 9876543210987654321 } |
| 1186 | * ] |
| 1187 | * } |
| 1188 | */ |
| 1189 | static void parse_common_ctx_type3_to_ir(EFI_NVIDIA_EVENT_HEADER *event_header, |
| 1190 | size_t total_event_size, |
| 1191 | size_t ctx_instance, |
| 1192 | json_object *output_data_ir) |
| 1193 | { |
| 1194 | EFI_NVIDIA_EVENT_CTX_HEADER *ctx = get_event_context_n( |
| 1195 | event_header, ctx_instance, total_event_size); |
| 1196 | if (ctx == NULL) { |
| 1197 | return; |
| 1198 | } |
| 1199 | |
| 1200 | // Verify the context data doesn't extend past the event boundary |
| 1201 | UINT8 *event_end = (UINT8 *)event_header + total_event_size; |
| 1202 | UINT8 *data_end = (UINT8 *)ctx->Data + ctx->DataSize; |
| 1203 | if (data_end > event_end) { |
| 1204 | cper_print_log( |
| 1205 | "Error: Type 3 context %zu extends past event boundary\n", |
| 1206 | ctx_instance); |
| 1207 | return; |
| 1208 | } |
| 1209 | |
| 1210 | EFI_NVIDIA_EVENT_CTX_DATA_TYPE_3 *data_type3 = |
| 1211 | (EFI_NVIDIA_EVENT_CTX_DATA_TYPE_3 *)ctx->Data; |
| 1212 | UINT8 num_elements = |
| 1213 | ctx->DataSize / sizeof(EFI_NVIDIA_EVENT_CTX_DATA_TYPE_3); |
| 1214 | |
| 1215 | json_object *val64arr = json_object_new_array(); |
| 1216 | for (int i = 0; i < num_elements; i++, data_type3++) { |
| 1217 | json_object *v = NULL; |
| 1218 | v = json_object_new_object(); |
| Ed Tanous | 6c5d2f3 | 2026-02-02 15:18:15 -0800 | [diff] [blame] | 1219 | add_uint(v, "val64", data_type3->Value); |
| Daniel Osawa | 51c1813 | 2025-11-26 09:21:20 -0800 | [diff] [blame] | 1220 | |
| 1221 | json_object_array_add(val64arr, v); |
| 1222 | } |
| 1223 | json_object_object_add(output_data_ir, "valArray64", val64arr); |
| 1224 | } |
| 1225 | // Converts event context data type 3 from JSON IR to CPER binary format. |
| 1226 | // Writes an array of 64-bit values (no keys) to the output stream. |
| 1227 | // Returns the total number of bytes written. |
| 1228 | /* |
| 1229 | * ┌─────────────────────────────────────────────────────────────────────────┐ |
| 1230 | * │ EFI_NVIDIA_EVENT_CTX_DATA_TYPE_3 (Context Data Type 0x0003) (8 bytes) │ |
| 1231 | * ├─────────────────────────────────────────────────────────────────────────┤ |
| 1232 | * │ UINT64 Value ← 64-bit value (no key) │ |
| 1233 | * └─────────────────────────────────────────────────────────────────────────┘ |
| 1234 | * Note: This structure repeats for each value in the array |
| 1235 | */ |
| 1236 | static size_t parse_common_ctx_type3_to_bin(json_object *event_ir, |
| 1237 | size_t ctx_instance, |
| 1238 | FILE *output_file_stream) |
| 1239 | { |
| 1240 | json_object *event_context_data_ir = |
| 1241 | get_event_context_n_data_ir(event_ir, ctx_instance); |
| 1242 | if (event_context_data_ir == NULL) { |
| 1243 | return 0; |
| 1244 | } |
| 1245 | |
| 1246 | // Get the v64-array that was created by parse_common_ctx_type3_to_ir |
| 1247 | json_object *v64arr = |
| 1248 | json_object_object_get(event_context_data_ir, "valArray64"); |
| 1249 | if (v64arr == NULL) { |
| 1250 | return 0; |
| 1251 | } |
| 1252 | |
| 1253 | size_t array_len = json_object_array_length(v64arr); |
| 1254 | size_t bytes_written = 0; |
| 1255 | |
| 1256 | // Iterate through each key-value pair in the array |
| 1257 | for (size_t i = 0; i < array_len; i++) { |
| 1258 | json_object *v = json_object_array_get_idx(v64arr, i); |
| 1259 | if (v == NULL) { |
| 1260 | continue; |
| 1261 | } |
| 1262 | |
| 1263 | // Create and populate the binary structure |
| 1264 | EFI_NVIDIA_EVENT_CTX_DATA_TYPE_3 data_type3 = { 0 }; |
| 1265 | data_type3.Value = json_object_get_uint64( |
| 1266 | json_object_object_get(v, "val64")); |
| 1267 | |
| 1268 | // Write to binary file |
| 1269 | bytes_written += |
| 1270 | fwrite(&data_type3, 1, |
| 1271 | sizeof(EFI_NVIDIA_EVENT_CTX_DATA_TYPE_3), |
| 1272 | output_file_stream); |
| 1273 | } |
| 1274 | return bytes_written; |
| 1275 | } |
| 1276 | // Parses event context data type 4: 32-bit values only. |
| 1277 | // Extracts an array of UINT32 values (no keys) from the context data. |
| 1278 | /* |
| 1279 | * Example JSON IR "data" output: |
| 1280 | * { |
| 1281 | * "valArray32": [ |
| 1282 | * { "val32": 123456789 }, |
| 1283 | * { "val32": 987654321 } |
| 1284 | * ] |
| 1285 | * } |
| 1286 | */ |
| 1287 | static void parse_common_ctx_type4_to_ir(EFI_NVIDIA_EVENT_HEADER *event_header, |
| 1288 | size_t total_event_size, |
| 1289 | size_t ctx_instance, |
| 1290 | json_object *output_data_ir) |
| 1291 | { |
| 1292 | EFI_NVIDIA_EVENT_CTX_HEADER *ctx = get_event_context_n( |
| 1293 | event_header, ctx_instance, total_event_size); |
| 1294 | if (ctx == NULL) { |
| 1295 | return; |
| 1296 | } |
| 1297 | |
| 1298 | // Verify the context data doesn't extend past the event boundary |
| 1299 | UINT8 *event_end = (UINT8 *)event_header + total_event_size; |
| 1300 | UINT8 *data_end = (UINT8 *)ctx->Data + ctx->DataSize; |
| 1301 | if (data_end > event_end) { |
| 1302 | cper_print_log( |
| 1303 | "Error: Type 4 context %zu extends past event boundary\n", |
| 1304 | ctx_instance); |
| 1305 | return; |
| 1306 | } |
| 1307 | |
| 1308 | EFI_NVIDIA_EVENT_CTX_DATA_TYPE_4 *data_type4 = |
| 1309 | (EFI_NVIDIA_EVENT_CTX_DATA_TYPE_4 *)ctx->Data; |
| 1310 | UINT8 num_elements = |
| 1311 | ctx->DataSize / sizeof(EFI_NVIDIA_EVENT_CTX_DATA_TYPE_4); |
| 1312 | |
| 1313 | json_object *val32arr = json_object_new_array(); |
| 1314 | for (int i = 0; i < num_elements; i++, data_type4++) { |
| 1315 | json_object *v = NULL; |
| 1316 | v = json_object_new_object(); |
| Ed Tanous | 6c5d2f3 | 2026-02-02 15:18:15 -0800 | [diff] [blame] | 1317 | add_uint(v, "val32", data_type4->Value); |
| Daniel Osawa | 51c1813 | 2025-11-26 09:21:20 -0800 | [diff] [blame] | 1318 | |
| 1319 | json_object_array_add(val32arr, v); |
| 1320 | } |
| 1321 | json_object_object_add(output_data_ir, "valArray32", val32arr); |
| 1322 | } |
| 1323 | // Converts event context data type 4 from JSON IR to CPER binary format. |
| 1324 | // Writes an array of 32-bit values (no keys) to the output stream. |
| 1325 | // Returns the total number of bytes written. |
| 1326 | /* |
| 1327 | * ┌─────────────────────────────────────────────────────────────────────────┐ |
| 1328 | * │ EFI_NVIDIA_EVENT_CTX_DATA_TYPE_4 (Context Data Type 0x0004) (4 bytes) │ |
| 1329 | * ├─────────────────────────────────────────────────────────────────────────┤ |
| 1330 | * │ UINT32 Value ← 32-bit value (no key) │ |
| 1331 | * └─────────────────────────────────────────────────────────────────────────┘ |
| 1332 | * Note: This structure repeats for each value in the array |
| 1333 | */ |
| 1334 | static size_t parse_common_ctx_type4_to_bin(json_object *event_ir, |
| 1335 | size_t ctx_instance, |
| 1336 | FILE *output_file_stream) |
| 1337 | { |
| 1338 | json_object *event_context_data_ir = |
| 1339 | get_event_context_n_data_ir(event_ir, ctx_instance); |
| 1340 | if (event_context_data_ir == NULL) { |
| 1341 | return 0; |
| 1342 | } |
| 1343 | |
| 1344 | // Get the v32-array that was created by parse_common_ctx_type4_to_ir |
| 1345 | json_object *v32arr = |
| 1346 | json_object_object_get(event_context_data_ir, "valArray32"); |
| 1347 | if (v32arr == NULL) { |
| 1348 | return 0; |
| 1349 | } |
| 1350 | |
| 1351 | size_t array_len = json_object_array_length(v32arr); |
| 1352 | size_t bytes_written = 0; |
| 1353 | |
| 1354 | // Iterate through each key-value pair in the array |
| 1355 | for (size_t i = 0; i < array_len; i++) { |
| 1356 | json_object *v = json_object_array_get_idx(v32arr, i); |
| 1357 | if (v == NULL) { |
| 1358 | continue; |
| 1359 | } |
| 1360 | |
| 1361 | // Create and populate the binary structure |
| 1362 | EFI_NVIDIA_EVENT_CTX_DATA_TYPE_4 data_type4 = { 0 }; |
| 1363 | data_type4.Value = json_object_get_uint64( |
| 1364 | json_object_object_get(v, "val32")); |
| 1365 | |
| 1366 | // Write to binary file |
| 1367 | bytes_written += |
| 1368 | fwrite(&data_type4, 1, |
| 1369 | sizeof(EFI_NVIDIA_EVENT_CTX_DATA_TYPE_4), |
| 1370 | output_file_stream); |
| 1371 | } |
| 1372 | return bytes_written; |
| 1373 | } |
| 1374 | // Converts a single NVIDIA event-based CPER section into JSON IR format. |
| 1375 | // Parses the event header, device-specific event info, and all event contexts. |
| 1376 | // Supports custom handlers for specific device types and data formats. |
| 1377 | /* |
| 1378 | * Example JSON IR output for a CPU device with Type 1 context: |
| 1379 | * { |
| 1380 | * "eventHeader": { |
| 1381 | * "signature": "CPU-FAULT", |
| 1382 | * "version": 1, |
| 1383 | * "sourceDeviceType": 0, |
| 1384 | * "type": 100, |
| 1385 | * "subtype": 200, |
| 1386 | * "linkId": 0 |
| 1387 | * }, |
| 1388 | * "eventInfo": { |
| 1389 | * "version": 0, |
| 1390 | * "SocketNum": 0, |
| 1391 | * "Architecture": 2684420096, |
| 1392 | * "Ecid1": 1234567890123456789, |
| 1393 | * "Ecid2": 9876543210987654321, |
| 1394 | * "Ecid3": 5555555555555555555, |
| 1395 | * "Ecid4": 1111111111111111111, |
| 1396 | * "InstanceBase": 281474976710656 |
| 1397 | * }, |
| Daniel Osawa | 5beecea | 2026-02-06 10:30:32 -0800 | [diff] [blame] | 1398 | * "eventContexts": [ |
| 1399 | * { |
| Daniel Osawa | 51c1813 | 2025-11-26 09:21:20 -0800 | [diff] [blame] | 1400 | * "version": 0, |
| 1401 | * "dataFormatType": 1, |
| 1402 | * "dataFormatVersion": 0, |
| 1403 | * "dataSize": 32, |
| 1404 | * "data": { |
| 1405 | * "keyValArray64": [ |
| 1406 | * { "key64": 1234567890123456789, "val64": 9876543210987654321 } |
| 1407 | * ] |
| 1408 | * } |
| 1409 | * } |
| Daniel Osawa | 5beecea | 2026-02-06 10:30:32 -0800 | [diff] [blame] | 1410 | * ] |
| Daniel Osawa | 51c1813 | 2025-11-26 09:21:20 -0800 | [diff] [blame] | 1411 | * } |
| 1412 | */ |
| 1413 | json_object *cper_section_nvidia_events_to_ir(const UINT8 *section, UINT32 size, |
| 1414 | char **desc_string) |
| 1415 | { |
| 1416 | EFI_NVIDIA_EVENT_HEADER *event_header = |
| 1417 | (EFI_NVIDIA_EVENT_HEADER *)section; |
| 1418 | // Check event header version compatibility |
| 1419 | if (!check_event_header_version(event_header->EventVersion, |
| 1420 | EFI_NVIDIA_EVENT_HEADER_VERSION, |
| 1421 | "parsing")) { |
| 1422 | return NULL; |
| 1423 | } |
| 1424 | |
| 1425 | json_object *event_ir = json_object_new_object(); |
| 1426 | |
| 1427 | // Parse event header fields |
| 1428 | json_object *event_header_ir = json_object_new_object(); |
| 1429 | json_object_object_add(event_ir, "eventHeader", event_header_ir); |
| 1430 | *desc_string = malloc(SECTION_DESC_STRING_SIZE); |
| 1431 | if (*desc_string == NULL) { |
| 1432 | cper_print_log( |
| 1433 | "Error: Failed to allocate memory for description string\n"); |
| 1434 | json_object_put(event_ir); |
| 1435 | return NULL; |
| 1436 | } |
| 1437 | int outstr_len = 0; |
| 1438 | const char *signature = event_header->Signature; |
| 1439 | int sig_len = cper_printable_string_length( |
| 1440 | event_header->Signature, sizeof(event_header->Signature)); |
| 1441 | if (sig_len <= 0) { |
| 1442 | signature = ""; |
| 1443 | sig_len = 0; |
| 1444 | } |
| 1445 | outstr_len = snprintf(*desc_string, SECTION_DESC_STRING_SIZE, |
| 1446 | "A %.*s Nvidia Event occurred", sig_len, |
| 1447 | signature); |
| 1448 | if (outstr_len < 0) { |
| 1449 | cper_print_log( |
| 1450 | "Error: Could not write to description string\n"); |
| 1451 | } else if (outstr_len > SECTION_DESC_STRING_SIZE) { |
| 1452 | cper_print_log("Error: Description string truncated: %s\n", |
| 1453 | *desc_string); |
| 1454 | } |
| 1455 | add_untrusted_string(event_header_ir, "signature", signature, 16); |
| Ed Tanous | 6c5d2f3 | 2026-02-02 15:18:15 -0800 | [diff] [blame] | 1456 | add_int(event_header_ir, "version", event_header->EventVersion); |
| Daniel Osawa | 51c1813 | 2025-11-26 09:21:20 -0800 | [diff] [blame] | 1457 | static const char *sourceDeviceType[2] = { "CPU", "GPU" }; |
| 1458 | add_dict(event_header_ir, "sourceDeviceType", |
| 1459 | event_header->SourceDeviceType, sourceDeviceType, |
| 1460 | sizeof(sourceDeviceType) / sizeof(sourceDeviceType[0])); |
| Ed Tanous | 6c5d2f3 | 2026-02-02 15:18:15 -0800 | [diff] [blame] | 1461 | add_int(event_header_ir, "type", event_header->EventType); |
| 1462 | add_int(event_header_ir, "subtype", event_header->EventSubtype); |
| Daniel Osawa | 51c1813 | 2025-11-26 09:21:20 -0800 | [diff] [blame] | 1463 | if (event_header->EventLinkId != 0) { |
| Ed Tanous | 6c5d2f3 | 2026-02-02 15:18:15 -0800 | [diff] [blame] | 1464 | add_uint(event_header_ir, "linkId", event_header->EventLinkId); |
| Daniel Osawa | 51c1813 | 2025-11-26 09:21:20 -0800 | [diff] [blame] | 1465 | } |
| 1466 | |
| 1467 | // Parse event info structure |
| 1468 | EFI_NVIDIA_EVENT_INFO_HEADER *event_info_header = |
| 1469 | get_event_info_header(event_header); |
| 1470 | json_object *event_info_ir = json_object_new_object(); |
| 1471 | json_object_object_add(event_ir, "eventInfo", event_info_ir); |
| Ed Tanous | 6c5d2f3 | 2026-02-02 15:18:15 -0800 | [diff] [blame] | 1472 | add_int(event_info_ir, "version", event_info_header->InfoVersion); |
| Daniel Osawa | 51c1813 | 2025-11-26 09:21:20 -0800 | [diff] [blame] | 1473 | |
| 1474 | // Extract major and minor version from event info header |
| 1475 | UINT8 info_minor = get_info_minor_version(event_info_header); |
| 1476 | UINT8 info_major = get_info_major_version(event_info_header); |
| 1477 | |
| 1478 | // Call device-specific handler to parse additional event info fields |
| 1479 | for (size_t i = 0; |
| 1480 | i < sizeof(nv_event_types) / sizeof(nv_event_types[0]); i++) { |
| 1481 | if ((NVIDIA_EVENT_SRC_DEV)event_header->SourceDeviceType == |
| 1482 | nv_event_types[i].srcDev) { |
| 1483 | // Check version compatibility |
| 1484 | if (!check_info_major_version( |
| 1485 | info_major, info_minor, |
| 1486 | nv_event_types[i].major_version, |
| 1487 | "parsing")) { |
| 1488 | break; |
| 1489 | } |
| 1490 | nv_event_types[i].callback(event_header, event_info_ir); |
| 1491 | break; |
| 1492 | } |
| 1493 | } |
| 1494 | // Parse all event contexts into an array |
| 1495 | json_object *event_contexts_ir = json_object_new_array(); |
| 1496 | json_object_object_add(event_ir, "eventContexts", event_contexts_ir); |
| 1497 | |
| 1498 | for (size_t i = 0; i < (size_t)event_header->EventContextCount; i++) { |
| 1499 | EFI_NVIDIA_EVENT_CTX_HEADER *ctx = |
| 1500 | get_event_context_n(event_header, i, size); |
| 1501 | if (ctx == NULL) { |
| 1502 | continue; |
| 1503 | } |
| 1504 | // Parse common context header fields |
| 1505 | json_object *event_context_ir = json_object_new_object(); |
| 1506 | // Add context to array |
| 1507 | json_object_array_add(event_contexts_ir, event_context_ir); |
| Ed Tanous | 6c5d2f3 | 2026-02-02 15:18:15 -0800 | [diff] [blame] | 1508 | add_int(event_context_ir, "version", ctx->CtxVersion); |
| 1509 | add_int(event_context_ir, "dataFormatType", |
| 1510 | ctx->DataFormatType); |
| 1511 | add_int(event_context_ir, "dataFormatVersion", |
| 1512 | ctx->DataFormatVersion); |
| 1513 | add_int(event_context_ir, "dataSize", ctx->DataSize); |
| Daniel Osawa | 51c1813 | 2025-11-26 09:21:20 -0800 | [diff] [blame] | 1514 | json_object *data_ir = json_object_new_object(); |
| 1515 | json_object_object_add(event_context_ir, "data", data_ir); |
| 1516 | // Check for device/format-specific custom handler |
| 1517 | bool handler_override_found = false; |
| 1518 | for (size_t handler_idx = 0; |
| 1519 | handler_idx < |
| 1520 | sizeof(event_ctx_handlers) / sizeof(event_ctx_handlers[0]); |
| 1521 | handler_idx++) { |
| 1522 | if (event_ctx_handlers[handler_idx].srcDev == |
| 1523 | (NVIDIA_EVENT_SRC_DEV) |
| 1524 | event_header->SourceDeviceType && |
| 1525 | event_ctx_handlers[handler_idx].dataFormatType == |
| 1526 | ctx->DataFormatType) { |
| 1527 | if (event_ctx_handlers[handler_idx].callback != |
| 1528 | NULL) { |
| 1529 | event_ctx_handlers[handler_idx].callback( |
| 1530 | event_header, size, i, data_ir); |
| 1531 | handler_override_found = true; |
| 1532 | break; |
| 1533 | } |
| 1534 | } |
| 1535 | } |
| 1536 | if (handler_override_found) { |
| 1537 | continue; |
| 1538 | } |
| 1539 | // Use default parser based on data format type |
| 1540 | switch (ctx->DataFormatType) { |
| 1541 | case TYPE_1: |
| 1542 | parse_common_ctx_type1_to_ir(event_header, size, i, |
| 1543 | data_ir); |
| 1544 | break; |
| 1545 | case TYPE_2: |
| 1546 | parse_common_ctx_type2_to_ir(event_header, size, i, |
| 1547 | data_ir); |
| 1548 | break; |
| 1549 | case TYPE_3: |
| 1550 | parse_common_ctx_type3_to_ir(event_header, size, i, |
| 1551 | data_ir); |
| 1552 | break; |
| 1553 | case TYPE_4: |
| 1554 | parse_common_ctx_type4_to_ir(event_header, size, i, |
| 1555 | data_ir); |
| 1556 | break; |
| 1557 | default: |
| 1558 | parse_common_ctx_type0_to_ir(event_header, size, i, |
| 1559 | data_ir); |
| 1560 | break; |
| 1561 | } |
| 1562 | } |
| 1563 | return event_ir; |
| 1564 | } |
| 1565 | // Converts a single NVIDIA event JSON IR structure back into CPER binary format. |
| 1566 | // Writes the event header, device-specific event info, and all event contexts to binary. |
| 1567 | // Handles 16-byte alignment padding as required by the CPER specification. |
| 1568 | /* |
| 1569 | * Binary output structure (NVIDIA Event-based CPER): |
| 1570 | * ┌─────────────────────────────────────────────────────────────────────────┐ |
| 1571 | * │ EFI_NVIDIA_EVENT_HEADER (32 bytes) │ |
| 1572 | * ├─────────────────────────────────────────────────────────────────────────┤ |
| 1573 | * │ EFI_NVIDIA_EVENT_INFO_HEADER (3 bytes) │ |
| 1574 | * ├─────────────────────────────────────────────────────────────────────────┤ |
| 1575 | * │ Device-Specific Event Info (variable size) │ |
| 1576 | * │ e.g., EFI_NVIDIA_CPU_EVENT_INFO (32 bytes) │ |
| 1577 | * │ or EFI_NVIDIA_GPU_EVENT_INFO (16 bytes) │ |
| 1578 | * ├─────────────────────────────────────────────────────────────────────────┤ |
| 1579 | * │ PADDING (if needed) (align to 16-byte boundary) │ |
| 1580 | * ├─────────────────────────────────────────────────────────────────────────┤ |
| 1581 | * │ EFI_NVIDIA_EVENT_CTX_HEADER (Context 0) (16 bytes) │ |
| 1582 | * ├─────────────────────────────────────────────────────────────────────────┤ |
| 1583 | * │ Context Data (Type-specific) (variable size) │ |
| 1584 | * ├─────────────────────────────────────────────────────────────────────────┤ |
| 1585 | * │ PADDING (if needed) (align to 16-byte boundary) │ |
| 1586 | * ├─────────────────────────────────────────────────────────────────────────┤ |
| 1587 | * │ EFI_NVIDIA_EVENT_CTX_HEADER (Context N) (16 bytes) │ |
| 1588 | * ├─────────────────────────────────────────────────────────────────────────┤ |
| 1589 | * │ Context Data (Type-specific) (variable size) │ |
| 1590 | * ├─────────────────────────────────────────────────────────────────────────┤ |
| 1591 | * │ PADDING (if needed) (align to 16-byte boundary) │ |
| 1592 | * └─────────────────────────────────────────────────────────────────────────┘ |
| 1593 | */ |
| 1594 | void ir_section_nvidia_events_to_cper(json_object *section, FILE *out) |
| 1595 | { |
| 1596 | json_object *event_header_ir = |
| 1597 | json_object_object_get(section, "eventHeader"); |
| 1598 | EFI_NVIDIA_EVENT_HEADER event_header = { 0 }; |
| 1599 | event_header.EventVersion = json_object_get_int64( |
| 1600 | json_object_object_get(event_header_ir, "version")); |
| 1601 | // Check event header version compatibility |
| 1602 | if (!check_event_header_version(event_header.EventVersion, |
| 1603 | EFI_NVIDIA_EVENT_HEADER_VERSION, |
| 1604 | "generation")) { |
| 1605 | return; |
| 1606 | } |
| 1607 | json_object *sourceDeviceType_obj; |
| 1608 | if (json_object_object_get_ex(event_header_ir, "sourceDeviceType", |
| 1609 | &sourceDeviceType_obj)) { |
| 1610 | json_object *raw_obj; |
| 1611 | if (json_object_object_get_ex(sourceDeviceType_obj, "raw", |
| 1612 | &raw_obj)) { |
| 1613 | event_header.SourceDeviceType = |
| 1614 | json_object_get_uint64(raw_obj); |
| 1615 | } |
| 1616 | } |
| 1617 | |
| 1618 | event_header.Reserved1 = 0; |
| 1619 | event_header.EventType = json_object_get_int64( |
| 1620 | json_object_object_get(event_header_ir, "type")); |
| 1621 | event_header.EventSubtype = json_object_get_int64( |
| 1622 | json_object_object_get(event_header_ir, "subtype")); |
| 1623 | event_header.EventLinkId = json_object_get_uint64( |
| 1624 | json_object_object_get(event_header_ir, "linkId")); |
| 1625 | |
| 1626 | // Signature is optional - only copy if present |
| 1627 | json_object *signature_obj = |
| 1628 | json_object_object_get(event_header_ir, "signature"); |
| 1629 | if (signature_obj != NULL) { |
| 1630 | const char *sig_str = json_object_get_string(signature_obj); |
| 1631 | if (sig_str != NULL) { |
| 1632 | // Copy up to 16 bytes, don't force null termination |
| 1633 | // (signature can be exactly 16 chars with no null terminator) |
| 1634 | size_t sig_len = strlen(sig_str); |
| 1635 | size_t copy_len = |
| 1636 | sig_len < sizeof(event_header.Signature) ? |
| 1637 | sig_len : |
| 1638 | sizeof(event_header.Signature); |
| 1639 | memcpy(event_header.Signature, sig_str, copy_len); |
| 1640 | // Only null-terminate if there's room |
| 1641 | if (sig_len < sizeof(event_header.Signature)) { |
| 1642 | event_header.Signature[sig_len] = '\0'; |
| 1643 | } |
| 1644 | } |
| 1645 | } |
| 1646 | |
| Daniel Osawa | 5beecea | 2026-02-06 10:30:32 -0800 | [diff] [blame] | 1647 | // Get event contexts and count them before writing header |
| 1648 | // (EventContextCount must be set before fwrite) |
| 1649 | json_object *event_contexts_ir = |
| 1650 | json_object_object_get(section, "eventContexts"); |
| 1651 | size_t ctx_count = 0; |
| 1652 | if (event_contexts_ir != NULL && |
| 1653 | json_object_is_type(event_contexts_ir, json_type_array)) { |
| 1654 | ctx_count = json_object_array_length(event_contexts_ir); |
| 1655 | event_header.EventContextCount = ctx_count; |
| 1656 | } |
| 1657 | |
| Daniel Osawa | 51c1813 | 2025-11-26 09:21:20 -0800 | [diff] [blame] | 1658 | fwrite(&event_header, sizeof(EFI_NVIDIA_EVENT_HEADER), 1, out); |
| 1659 | |
| 1660 | json_object *event_info_ir = |
| 1661 | json_object_object_get(section, "eventInfo"); |
| 1662 | EFI_NVIDIA_EVENT_INFO_HEADER event_info_header = { 0 }; |
| 1663 | event_info_header.InfoVersion = json_object_get_int64( |
| 1664 | json_object_object_get(event_info_ir, "version")); |
| 1665 | |
| 1666 | NV_EVENT_INFO_CALLBACKS *nv_event_info_callback = NULL; |
| 1667 | // Extract major and minor version from event info header |
| 1668 | UINT8 info_minor = get_info_minor_version(&event_info_header); |
| 1669 | UINT8 info_major = get_info_major_version(&event_info_header); |
| 1670 | for (size_t i = 0; |
| 1671 | i < sizeof(nv_event_types) / sizeof(nv_event_types[0]); i++) { |
| 1672 | NV_EVENT_INFO_CALLBACKS *callback = &nv_event_types[i]; |
| 1673 | NVIDIA_EVENT_SRC_DEV srcDev = |
| 1674 | (NVIDIA_EVENT_SRC_DEV)event_header.SourceDeviceType; |
| 1675 | if (srcDev != callback->srcDev) { |
| 1676 | continue; |
| 1677 | } |
| 1678 | // Check version compatibility |
| 1679 | if (!check_info_major_version(info_major, info_minor, |
| 1680 | callback->major_version, |
| 1681 | "generation")) { |
| 1682 | break; |
| 1683 | } |
| 1684 | nv_event_info_callback = callback; |
| 1685 | break; |
| 1686 | } |
| 1687 | if (nv_event_info_callback == NULL) { |
| 1688 | return; |
| 1689 | } |
| 1690 | |
| 1691 | event_info_header.InfoSize = sizeof(EFI_NVIDIA_EVENT_INFO_HEADER) + |
| 1692 | nv_event_info_callback->info_size; |
| 1693 | |
| 1694 | size_t bytes_written = fwrite(&event_info_header, 1, |
| 1695 | sizeof(EFI_NVIDIA_EVENT_INFO_HEADER), |
| 1696 | out); |
| 1697 | // Call device-specific handler to parse additional event info fields |
| 1698 | bytes_written += |
| 1699 | nv_event_info_callback->callback_bin(event_info_ir, out); |
| 1700 | |
| 1701 | write_padding_to_16_byte_alignment(bytes_written, out); |
| 1702 | |
| Daniel Osawa | 51c1813 | 2025-11-26 09:21:20 -0800 | [diff] [blame] | 1703 | // Check if eventContexts field exists before iterating |
| 1704 | if (event_contexts_ir == NULL) { |
| 1705 | cper_print_log( |
| 1706 | "Warning: Missing eventContexts field in Nvidia Event JSON\n"); |
| 1707 | return; |
| 1708 | } |
| 1709 | |
| Daniel Osawa | 51c1813 | 2025-11-26 09:21:20 -0800 | [diff] [blame] | 1710 | for (size_t ctx_instance = 0; ctx_instance < ctx_count; |
| 1711 | ctx_instance++) { |
| Daniel Osawa | 5beecea | 2026-02-06 10:30:32 -0800 | [diff] [blame] | 1712 | json_object *value = json_object_array_get_idx( |
| 1713 | event_contexts_ir, ctx_instance); |
| Daniel Osawa | 51c1813 | 2025-11-26 09:21:20 -0800 | [diff] [blame] | 1714 | if (value == NULL) { |
| 1715 | continue; |
| 1716 | } |
| 1717 | |
| 1718 | EFI_NVIDIA_EVENT_CTX_HEADER ctx = { 0 }; |
| 1719 | ctx.CtxVersion = (uint16_t)json_object_get_int64( |
| 1720 | json_object_object_get(value, "version")); |
| 1721 | ctx.DataFormatType = (uint16_t)json_object_get_int64( |
| 1722 | json_object_object_get(value, "dataFormatType")); |
| 1723 | ctx.DataFormatVersion = (uint16_t)json_object_get_int64( |
| 1724 | json_object_object_get(value, "dataFormatVersion")); |
| 1725 | ctx.DataSize = json_object_get_int( |
| 1726 | json_object_object_get(value, "dataSize")); |
| 1727 | bytes_written = fwrite( |
| 1728 | &ctx, 1, sizeof(EFI_NVIDIA_EVENT_CTX_HEADER), out); |
| 1729 | |
| 1730 | // Check for device/format-specific custom handler |
| 1731 | bool handler_override_found = false; |
| 1732 | for (size_t j = 0; j < sizeof(event_ctx_handlers) / |
| 1733 | sizeof(event_ctx_handlers[0]); |
| 1734 | j++) { |
| 1735 | if (event_ctx_handlers[j].srcDev == |
| 1736 | (NVIDIA_EVENT_SRC_DEV) |
| 1737 | event_header.SourceDeviceType && |
| 1738 | event_ctx_handlers[j].dataFormatType == |
| 1739 | ctx.DataFormatType) { |
| 1740 | bytes_written += |
| 1741 | event_ctx_handlers[j].callback_bin( |
| 1742 | section, ctx_instance, out); |
| 1743 | handler_override_found = true; |
| 1744 | break; |
| 1745 | } |
| 1746 | } |
| 1747 | // If no handler override found, use default parser based on data format type |
| 1748 | if (!handler_override_found) { |
| 1749 | switch (ctx.DataFormatType) { |
| 1750 | case TYPE_1: |
| 1751 | bytes_written += parse_common_ctx_type1_to_bin( |
| 1752 | section, ctx_instance, out); |
| 1753 | break; |
| 1754 | case TYPE_2: |
| 1755 | bytes_written += parse_common_ctx_type2_to_bin( |
| 1756 | section, ctx_instance, out); |
| 1757 | break; |
| 1758 | case TYPE_3: |
| 1759 | bytes_written += parse_common_ctx_type3_to_bin( |
| 1760 | section, ctx_instance, out); |
| 1761 | break; |
| 1762 | case TYPE_4: |
| 1763 | bytes_written += parse_common_ctx_type4_to_bin( |
| 1764 | section, ctx_instance, out); |
| 1765 | break; |
| 1766 | default: |
| 1767 | bytes_written += parse_common_ctx_type0_to_bin( |
| 1768 | section, ctx_instance, out); |
| 1769 | break; |
| 1770 | } |
| 1771 | } |
| 1772 | write_padding_to_16_byte_alignment(bytes_written, out); |
| 1773 | } |
| 1774 | } |