#include <compilation_cache.h>
 | 
|   | ManualCudaCache ()=default | 
|   | 
|   | ManualCudaCache (const ManualCudaCacheProto &buf) | 
|   | 
| ManualCudaCacheProto  | toProtobuf () const  | 
|   | 
| void  | cacheKernel (const std::string &id, const std::vector< const DLTensor * > &inputs, const std::vector< const DLTensor * > &outputs, const std::string &kernelSpecializedName, const std::vector< int > &kernelParameters, const std::string &cudaSource, const Grid &grid, const Block &block) | 
|   | 
std::unique_ptr 
< CudaCache::RetrievalResult >  | retrieveKernel (const std::string &id, const std::vector< const DLTensor * > &inputs, const std::vector< const DLTensor * > &outputs) const  | 
|   | 
| template<typename C , typename TensorTy >  | 
| auto  | searchKernelImpl (C &c, const std::string &id, const std::vector< TensorTy > &inputs, const std::vector< TensorTy > &outputs) -> decltype(c.searchKernel(id, inputs, outputs)) | 
|   | 
| size_t  | size () const | 
|   | 
| void  | clear () | 
|   | 
 | 
static std::shared_ptr 
< ManualCudaCache > &  | getGlobalSharedCache () | 
|   | 
| template<typename C , typename InputTy >  | 
| static auto  | searchKernelImpl (C &c, const std::string &id, const std::vector< InputTy > &inputs, const std::vector< InputTy > &outputs) -> decltype(c.searchKernel(id, inputs, outputs)) | 
|   | 
  
  
      
        
          | tc::ManualCudaCache::ManualCudaCache  | 
          ( | 
           | ) | 
           | 
         
       
   | 
  
default   | 
  
 
 
      
        
          | tc::ManualCudaCache::ManualCudaCache  | 
          ( | 
          const ManualCudaCacheProto &  | 
          buf | ) | 
           | 
        
      
 
 
      
        
          | void tc::ManualCudaCache::cacheKernel  | 
          ( | 
          const std::string &  | 
          id,  | 
        
        
           | 
           | 
          const std::vector< const DLTensor * > &  | 
          inputs,  | 
        
        
           | 
           | 
          const std::vector< const DLTensor * > &  | 
          outputs,  | 
        
        
           | 
           | 
          const std::string &  | 
          kernelSpecializedName,  | 
        
        
           | 
           | 
          const std::vector< int > &  | 
          kernelParameters,  | 
        
        
           | 
           | 
          const std::string &  | 
          cudaSource,  | 
        
        
           | 
           | 
          const Grid &  | 
          grid,  | 
        
        
           | 
           | 
          const Block &  | 
          block  | 
        
        
           | 
          ) | 
           |  | 
        
      
 
 
  
  
      
        
          | static std::shared_ptr<ManualCudaCache>& tc::ManualCudaCache::getGlobalSharedCache  | 
          ( | 
           | ) | 
           | 
         
       
   | 
  
staticprivate   | 
  
 
 
      
        
          | std::unique_ptr<CudaCache::RetrievalResult> tc::ManualCudaCache::retrieveKernel  | 
          ( | 
          const std::string &  | 
          id,  | 
        
        
           | 
           | 
          const std::vector< const DLTensor * > &  | 
          inputs,  | 
        
        
           | 
           | 
          const std::vector< const DLTensor * > &  | 
          outputs  | 
        
        
           | 
          ) | 
           |  const | 
        
      
 
 
  
  
      
        
          | CachedEntry* tc::ManualCudaCache::searchKernel  | 
          ( | 
          const std::string &  | 
          id,  | 
         
        
           | 
           | 
          const std::vector< const DLTensor * > &  | 
          inputs,  | 
         
        
           | 
           | 
          const std::vector< const DLTensor * > &  | 
          outputs  | 
         
        
           | 
          ) | 
           |  | 
         
       
   | 
  
private   | 
  
 
 
  
  
      
        
          | const CachedEntry* tc::ManualCudaCache::searchKernel  | 
          ( | 
          const std::string &  | 
          id,  | 
         
        
           | 
           | 
          const std::vector< const DLTensor * > &  | 
          inputs,  | 
         
        
           | 
           | 
          const std::vector< const DLTensor * > &  | 
          outputs  | 
         
        
           | 
          ) | 
           |  const | 
         
       
   | 
  
private   | 
  
 
 
template<typename C , typename TensorTy > 
      
        
          | auto tc::ManualCudaCache::searchKernelImpl  | 
          ( | 
          C &  | 
          c,  | 
        
        
           | 
           | 
          const std::string &  | 
          id,  | 
        
        
           | 
           | 
          const std::vector< TensorTy > &  | 
          inputs,  | 
        
        
           | 
           | 
          const std::vector< TensorTy > &  | 
          outputs  | 
        
        
           | 
          ) | 
           |  -> decltype(c.searchKernel(id, inputs, outputs))  | 
        
      
 
 
template<typename C , typename InputTy > 
  
  
      
        
          | static auto tc::ManualCudaCache::searchKernelImpl  | 
          ( | 
          C &  | 
          c,  | 
         
        
           | 
           | 
          const std::string &  | 
          id,  | 
         
        
           | 
           | 
          const std::vector< InputTy > &  | 
          inputs,  | 
         
        
           | 
           | 
          const std::vector< InputTy > &  | 
          outputs  | 
         
        
           | 
          ) | 
           |  ->  decltype(c.searchKernel(id, inputs, outputs)) | 
         
       
   | 
  
staticprivate   | 
  
 
 
      
        
          | ManualCudaCacheProto tc::ManualCudaCache::toProtobuf  | 
          ( | 
           | ) | 
           const | 
        
      
 
 
The documentation for this class was generated from the following files: