前提・実現したいこと
dockerコンテナ内でubuntu16.04のイメージを獲得し,
python3.6.7で環境構築している際に,cuda9.0を入れてみたところパスが通っていないとエラーが出ました.
エラーは対象のlib内でmakeコマンドを入力した際に,別のエラーで,setup_linux.pyを実行しろとのことだったので,実行した結果今回のエラーが出ました.
発生している問題・エラーメッセージ
OSError: The nvcc binary could not be located in your $PATH. Either add it to your path, or set $CUDAHOME
該当のソースコード
python3.6.7
1import os 2from os.path import join as pjoin 3from setuptools import setup 4from distutils.extension import Extension 5from Cython.Distutils import build_ext 6import numpy as np 7 8 9def find_in_path(name, path): 10 "Find a file in a search path" 11 # Adapted fom 12 # http://code.activestate.com/recipes/52224-find-a-file-given-a-search-path/ 13 for dir in path.split(os.pathsep): 14 binpath = pjoin(dir, name) 15 if os.path.exists(binpath): 16 return os.path.abspath(binpath) 17 return None 18 19 20def locate_cuda(): 21 """Locate the CUDA environment on the system 22 Returns a dict with keys 'home', 'nvcc', 'include', and 'lib64' 23 and values giving the absolute path to each directory. 24 Starts by looking for the CUDAHOME env variable. If not found, everything 25 is based on finding 'nvcc' in the PATH. 26 """ 27 # first check if the CUDAHOME env variable is in use 28 if 'CUDAHOME' in os.environ: 29 home = os.environÆ'CUDAHOME'Å 30 nvcc = pjoin(home, 'bin', 'nvcc') 31 else: 32 # otherwise, search the PATH for NVCC 33 default_path = pjoin(os.sep, 'usr', 'local', 'cuda', 'bin') 34 nvcc = find_in_path('nvcc', os.environÆ'PATH'Å + os.pathsep + default_path) 35 if nvcc is None: 36 raise EnvironmentError('The nvcc binary could not be ' 37 'located in your $PATH. Either add it to your path, or set $CUDAHOME') 38 home = os.path.dirname(os.path.dirname(nvcc)) 39 40 cudaconfig = æ'home':home, 'nvcc':nvcc, 41 'include': pjoin(home, 'include'), 42 'lib64': pjoin(home, 'lib64')å 43 for k, v in cudaconfig.items(): 44 if not os.path.exists(v): 45 raise EnvironmentError('The CUDA %s path could not be located in %s' % (k, v)) 46 47 return cudaconfig 48CUDA = locate_cuda() 49 50 51# Obtain the numpy include directory. This logic works across numpy versions. 52try: 53 numpy_include = np.get_include() 54except AttributeError: 55 numpy_include = np.get_numpy_include() 56 57 58def customize_compiler_for_nvcc(self): 59 """inject deep into distutils to customize how the dispatch 60 to gcc/nvcc works. 61 If you subclass UnixCCompiler, it's not trivial to get your subclass 62 injected in, and still have the right customizations (i.e. 63 distutils.sysconfig.customize_compiler) run on it. So instead of going 64 the OO route, I have this. Note, it's kindof like a wierd functional 65 subclassing going on.""" 66 67 # tell the compiler it can processes .cu 68 self.src_extensions.append('.cu') 69 70 # save references to the default compiler_so and _comple methods 71 default_compiler_so = self.compiler_so 72 super = self._compile 73 74 # now redefine the _compile method. This gets executed for each 75 # object but distutils doesn't have the ability to change compilers 76 # based on source extension: we add it. 77 def _compile(obj, src, ext, cc_args, extra_postargs, pp_opts): 78 if os.path.splitext(src)Æ1Å == '.cu': 79 # use the cuda for .cu files 80 self.set_executable('compiler_so', CUDAÆ'nvcc'Å) 81 # use only a subset of the extra_postargs, which are 1-1 translated 82 # from the extra_compile_args in the Extension class 83 postargs = extra_postargsÆ'nvcc'Å 84 else: 85 postargs = extra_postargsÆ'gcc'Å 86 87 super(obj, src, ext, cc_args, postargs, pp_opts) 88 # reset the default compiler_so, which we might have changed for cuda 89 self.compiler_so = default_compiler_so 90 91 # inject our redefined _compile method into the class 92 self._compile = _compile 93 94 95# run the customize_compiler 96class custom_build_ext(build_ext): 97 def build_extensions(self): 98 customize_compiler_for_nvcc(self.compiler) 99 build_ext.build_extensions(self) 100 101 102ext_modules = Æ 103 Extension( 104 "cpu_nms", 105 Æ"cpu_nms.pyx"Å, 106 extra_compile_args=æ'gcc': Æ"-Wno-cpp", "-Wno-unused-function"Åå, 107 include_dirs = Ænumpy_includeÅ 108 ), 109 Extension('gpu_nms', 110 Æ'nms_kernel.cu', 'gpu_nms.pyx'Å, 111 library_dirs=ÆCUDAÆ'lib64'ÅÅ, 112 libraries=Æ'cudart'Å, 113 language='c++', 114 runtime_library_dirs=ÆCUDAÆ'lib64'ÅÅ, 115 # this syntax is specific to this build system 116 # we're only going to use certain compiler args with nvcc and not with 117 # gcc the implementation of this trick is in customize_compiler() below 118 extra_compile_args=æ'gcc': Æ"-Wno-unused-function"Å, 119 'nvcc': Æ'-arch=sm_35', 120 '--ptxas-options=-v', 121 '-c', 122 '--compiler-options', 123 "'-fPIC'"Åå, 124 include_dirs = Ænumpy_include, CUDAÆ'include'ÅÅ 125 ), 126Å 127 128setup( 129 name='nms', 130 ext_modules=ext_modules, 131 # inject our custom trigger 132 cmdclass=æ'build_ext': custom_build_extå, 133)
試したこと
調べて,/.bashrcにパスを追加する以下のコマンドを入力しました.
export CUDA_HOME=/usr/local/cuda export LD_LIBRARY_PATH=${CUDA_HOME}/lib64 PATH=${CUDA_HOME}/bin:${PATH} export PATH
しかし,入力しても同じエラーが出ます.
補足情報(FW/ツールのバージョンなど)
ubuntu16.04
python3.6.7
torch 1.5.0
あなたの回答
tips
プレビュー