Tensor Comprehensions
|
This is the complete list of members for tc::CudaCache, including all inherited members.
Cache< CudaCache > class | tc::CudaCache | friend |
cacheEnabled() | tc::Cache< CudaCache > | static |
cacheKernel(const std::string &id, const MappingOptions &options, const std::vector< const DLTensor * > &inputs, const std::vector< const DLTensor * > &outputs, const std::string &kernelSpecializedName, const std::vector< int > &kernelParameters, const std::string &cudaSource, const Grid &grid, const Block &block) | tc::CudaCache | |
clear() | tc::Cache< CudaCache > | |
CudaCache()=default | tc::CudaCache | |
CudaCache(const CudaCacheProto &buf) | tc::CudaCache | |
disableCache() | tc::Cache< CudaCache > | static |
dumpCacheToProtobuf(const std::string &filename) | tc::Cache< CudaCache > | static |
enableCache() | tc::Cache< CudaCache > | static |
entries_ | tc::CudaCache | private |
getCache() | tc::Cache< CudaCache > | static |
getGlobalSharedCache() | tc::CudaCache | privatestatic |
loadCacheFromProtobuf(const std::string &filename) | tc::Cache< CudaCache > | static |
loadCacheFromProtobuf(const Protobuf &buf) | tc::Cache< CudaCache > | static |
mtx_ | tc::Cache< CudaCache > | mutableprotected |
numberAttemptedRetrievals | tc::Cache< CudaCache > | mutable |
numberCacheAttemps | tc::Cache< CudaCache > | mutable |
numberSuccessfulRetrievals | tc::Cache< CudaCache > | mutable |
Protobuf typedef | tc::CudaCache | private |
removeEntriesNotInOptionsCache(const OptionsCache &oc) | tc::CudaCache | |
retrieveKernel(const std::string &id, const MappingOptions &options, const std::vector< const DLTensor * > &inputs, const std::vector< const DLTensor * > &outputs) const | tc::CudaCache | |
searchKernel(const std::string &id, const MappingOptions &options, const std::vector< detail::TensorInfo > &inputs, const std::vector< detail::TensorInfo > &outputs) | tc::CudaCache | private |
searchKernel(const std::string &id, const MappingOptions &options, const std::vector< const DLTensor * > &inputs, const std::vector< const DLTensor * > &outputs) | tc::CudaCache | private |
searchKernel(const std::string &id, const MappingOptions &options, const std::vector< const DLTensor * > &inputs, const std::vector< const DLTensor * > &outputs) const | tc::CudaCache | private |
searchKernelImpl(C &c, const std::string &id, const MappingOptions &options, const std::vector< TensorTy > &inputs, const std::vector< TensorTy > &outputs) -> decltype(c.searchKernel(id, options, inputs, outputs)) | tc::CudaCache | privatestatic |
searchKernelImpl(C &c, const std::string &id, const MappingOptions &options, const std::vector< InputTy > &inputs, const std::vector< InputTy > &outputs) -> decltype(c.searchKernel(id, options, inputs, outputs)) | tc::CudaCache | |
size() const | tc::Cache< CudaCache > | |
toProtobuf() const | tc::CudaCache |