TensorRT Provider Options.
More...
#include <onnxruntime_c_api.h>
◆ device_id
| int OrtTensorRTProviderOptions::device_id |
CUDA device id (0 = default device)
◆ has_user_compute_stream
| int OrtTensorRTProviderOptions::has_user_compute_stream |
◆ trt_dla_core
| int OrtTensorRTProviderOptions::trt_dla_core |
◆ trt_dla_enable
| int OrtTensorRTProviderOptions::trt_dla_enable |
◆ trt_dump_subgraphs
| int OrtTensorRTProviderOptions::trt_dump_subgraphs |
◆ trt_engine_cache_enable
| int OrtTensorRTProviderOptions::trt_engine_cache_enable |
◆ trt_engine_cache_path
| const char* OrtTensorRTProviderOptions::trt_engine_cache_path |
◆ trt_engine_decryption_enable
| int OrtTensorRTProviderOptions::trt_engine_decryption_enable |
◆ trt_engine_decryption_lib_path
| const char* OrtTensorRTProviderOptions::trt_engine_decryption_lib_path |
◆ trt_force_sequential_engine_build
| int OrtTensorRTProviderOptions::trt_force_sequential_engine_build |
◆ trt_fp16_enable
| int OrtTensorRTProviderOptions::trt_fp16_enable |
◆ trt_int8_calibration_table_name
| const char* OrtTensorRTProviderOptions::trt_int8_calibration_table_name |
◆ trt_int8_enable
| int OrtTensorRTProviderOptions::trt_int8_enable |
◆ trt_int8_use_native_calibration_table
| int OrtTensorRTProviderOptions::trt_int8_use_native_calibration_table |
◆ trt_max_partition_iterations
| int OrtTensorRTProviderOptions::trt_max_partition_iterations |
◆ trt_max_workspace_size
| size_t OrtTensorRTProviderOptions::trt_max_workspace_size |
◆ trt_min_subgraph_size
| int OrtTensorRTProviderOptions::trt_min_subgraph_size |
◆ user_compute_stream
| void* OrtTensorRTProviderOptions::user_compute_stream |