Skip to content

Commit be5c3c1

Browse files
committed
fix CPU only build with memman
1 parent 1ddf529 commit be5c3c1

7 files changed

+24
-18
lines changed

CHANGELOG.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
# Change Log
22

3-
## [master] - 2020-05-07
3+
## [master] - 2020-05-19
44

55
### Changed
66

README.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ We visualized a sparse tensor network operation on a sparse tensor, convolution,
4949

5050
- Ubuntu 14.04 or higher
5151
- CUDA 10.1 or higher
52-
- pytorch 1.3 or higher
52+
- pytorch 1.5 or higher
5353
- python 3.6 or higher
5454
- GCC 7 or higher
5555

setup.py

+3-1
Original file line numberDiff line numberDiff line change
@@ -131,7 +131,9 @@ def _argparse(pattern, argv, is_flag=True):
131131

132132
# extra_compile_args+=['-g'] # Uncomment for debugging
133133
if CPU_ONLY and not FORCE_CUDA:
134-
print("\nCPU_ONLY build set")
134+
print("--------------------------------")
135+
print("| WARNING: CPU_ONLY build set |")
136+
print("--------------------------------")
135137
compile_args += ["CPU_ONLY=1"]
136138
extra_compile_args += ["-DCPU_ONLY"]
137139
Extension = CppExtension

src/coords_manager.hpp

+9-9
Original file line numberDiff line numberDiff line change
@@ -122,18 +122,18 @@ template <typename MapType = CoordsToIndexMap> class CoordsManager {
122122
unordered_map<InOutMapKey, InOutMaps<int>, InOutMapKeyHash> in_maps;
123123
unordered_map<InOutMapKey, InOutMaps<int>, InOutMapKeyHash> out_maps;
124124

125-
CoordsManager(){
126-
gpu_memory_manager = std::make_shared<GPUMemoryManager>();
127-
};
128-
CoordsManager(int num_threads) {
129-
omp_set_dynamic(0);
130-
omp_set_num_threads(num_threads);
131-
}
132125
CoordsManager(int num_threads, MemoryManagerBackend backend) {
133-
omp_set_dynamic(0);
134-
omp_set_num_threads(num_threads);
126+
if (num_threads > 0) {
127+
omp_set_dynamic(0);
128+
omp_set_num_threads(num_threads);
129+
}
130+
#ifndef CPU_ONLY
135131
gpu_memory_manager = std::make_shared<GPUMemoryManager>(backend);
132+
#endif
136133
}
134+
CoordsManager(int num_threads): CoordsManager(num_threads, PYTORCH) {}
135+
CoordsManager(): CoordsManager(-1, PYTORCH) {}
136+
137137
~CoordsManager() { clear(); }
138138

139139
void printDiagnostics(py::object py_coords_key) const;

src/gpu_memory_manager.hpp

-2
Original file line numberDiff line numberDiff line change
@@ -41,8 +41,6 @@ namespace minkowski {
4141

4242
using std::vector;
4343

44-
enum MemoryManagerBackend { CUDA = 0, PYTORCH = 1 };
45-
4644
class GPUMemoryManager {
4745
private:
4846
int initial_size = 256;

src/types.hpp

+3
Original file line numberDiff line numberDiff line change
@@ -97,6 +97,9 @@ using InOutMapsRefPair = pair<InOutMaps<Itype> &, InOutMaps<Itype> &>;
9797
template <typename Itype>
9898
using pInOutMapsRefPair = pair<pInOutMaps<Itype> &, pInOutMaps<Itype> &>;
9999

100+
// GPU memory manager backend. No effect with CPU_ONLY build
101+
enum MemoryManagerBackend { CUDA = 0, PYTORCH = 1 };
102+
100103
// FNV64-1a
101104
// uint64_t for unsigned long, must use CXX -m64
102105
template <typename T> uint64_t hash_vec(T p) {

tests/coords.py

+7-4
Original file line numberDiff line numberDiff line change
@@ -169,11 +169,14 @@ def test_batch_size_initialize(self):
169169
self.assertTrue(cm.get_batch_size() == 2)
170170

171171
def test_memory_manager_backend(self):
172-
CoordsManager(memory_manager_backend=MemoryManagerBackend.CUDA, D=2)
173-
CoordsManager(memory_manager_backend=MemoryManagerBackend.PYTORCH, D=2)
174-
172+
# Set the global GPU memory manager backend. By default PYTORCH.
175173
ME.set_memory_manager_backend(MemoryManagerBackend.PYTORCH)
176-
CoordsManager(D=2)
174+
ME.set_memory_manager_backend(MemoryManagerBackend.CUDA)
175+
176+
# Create a coords man with the specified GPU memory manager backend.
177+
# No effect with CPU_ONLY build
178+
cm = CoordsManager(memory_manager_backend=MemoryManagerBackend.CUDA, D=2)
179+
cm = CoordsManager(memory_manager_backend=MemoryManagerBackend.PYTORCH, D=2)
177180

178181

179182
if __name__ == '__main__':

0 commit comments

Comments
 (0)