onnxruntime: add option to link full protobuf, not protobuf-lite

This can be used to fix the import of tensorflow and onnxruntime in the
same python interpreter.

authored by

Martin Weinelt and committed by
Pavol Rusnak
5f441444 e7701fc6

+2 -1
+2 -1
pkgs/by-name/on/onnxruntime/package.nix
··· 23 23 pythonSupport ? true, 24 24 cudaSupport ? config.cudaSupport, 25 25 ncclSupport ? config.cudaSupport, 26 + withFullProtobuf ? false, 26 27 cudaPackages ? { }, 27 28 }@inputs: 28 29 ··· 195 196 (lib.cmakeFeature "ONNX_CUSTOM_PROTOC_EXECUTABLE" (lib.getExe protobuf)) 196 197 (lib.cmakeBool "onnxruntime_BUILD_SHARED_LIB" true) 197 198 (lib.cmakeBool "onnxruntime_BUILD_UNIT_TESTS" doCheck) 198 - (lib.cmakeBool "onnxruntime_USE_FULL_PROTOBUF" false) 199 + (lib.cmakeBool "onnxruntime_USE_FULL_PROTOBUF" withFullProtobuf) 199 200 (lib.cmakeBool "onnxruntime_USE_CUDA" cudaSupport) 200 201 (lib.cmakeBool "onnxruntime_USE_NCCL" (cudaSupport && ncclSupport)) 201 202 (lib.cmakeBool "onnxruntime_ENABLE_LTO" (!cudaSupport || cudaPackages.cudaOlder "12.8"))