MagmaDNN  1.0
c++NeuralNetworkFramework
utilities_internal.h
Go to the documentation of this file.
1 
9 #pragma once
10 
11 #include <cstdio>
12 #include <cstdarg>
13 #include <cstdint>
14 #include <vector>
15 #include <set>
16 #include <deque>
17 
18 #if defined(_HAS_CUDA_)
19 #include "cudnn.h"
20 #include <cuda.h>
21 #include <curand.h>
22 #include <cuda_runtime_api.h>
23 
24 #define cudaErrchk(ans) { cudaAssert((ans), __FILE__, __LINE__); }
25 inline void cudaAssert(cudaError_t code, const char *file, int line, bool abort=true) {
26  if (code != cudaSuccess) {
27  fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
28  if (abort) exit(code);
29  }
30 }
31 
32 #define cudnnErrchk(ans) { cudnnAssert((ans), __FILE__, __LINE__); }
33 inline void cudnnAssert(cudnnStatus_t code, const char *file, int line, bool abort=true) {
34  if (code != CUDNN_STATUS_SUCCESS) {
35  fprintf(stderr, "CuDNNassert: %s %s %d\n", cudnnGetErrorString(code), file, line);
36  if (abort) exit(code);
37  }
38 }
39 
40 #define curandErrchk(ans) { curandAssert((ans), __FILE__, __LINE__); }
41 inline void curandAssert(curandStatus_t code, const char *file, int line, bool abort=true) {
42  if (code != CURAND_STATUS_SUCCESS) {
43  fprintf(stderr, "CuRandAssert: %d %s %d\n", code, file, line);
44  }
45 }
46 
47 #endif
48 
49 
50 
51 #define T_IS_SCALAR(tensor_ptr) ((tensor_ptr)->get_size() == 1)
52 #define T_IS_VECTOR(tensor_ptr) ((tensor_ptr)->get_size() != 1 && ((tensor_ptr)->get_shape().size() == 1))
53 #define T_IS_MATRIX(tensor_ptr) ((tensor_ptr)->get_shape().size() == 2)
54 #define T_IS_N_DIMENSIONAL(tensor_ptr, N) ((tensor_ptr)->get_shape().size() == N)
55 #define OP_IS_SCALAR(op_ptr) ((op_ptr)->get_output_size() == 1)
56 #define OP_IS_VECTOR(op_ptr) (((op_ptr)->get_output_size() != 1) && ((op_ptr)->get_output_shape().size() == 1))
57 #define OP_IS_MATRIX(op_ptr) ((op_ptr)->get_output_shape().size() == 2)
58 #define OP_IS_N_DIMENSIONAL(op_ptr, N) ((op_ptr)->get_output_shape().size() == N)
59 
60 #define T_IS_SAME_MEMORY_TYPE(x_ptr,y_ptr) ((x_ptr)->get_memory_type() == (y_ptr)->get_memory_type())
61 #define OP_IS_SAME_MEMORY_TYPE(x_ptr, y_ptr) ((x_ptr)->get_memory_type() == (y_ptr)->get_memory_type())
62 
63 
64 namespace magmadnn {
65 namespace internal {
66 
71 int debugf(const char *fmt, ...);
72 
73 
74 void print_vector(const std::vector<unsigned int>& vec, bool debug=true, char begin='{', char end='}', char delim=',');
75 
76 /*
77 template <typename T>
78 void print_tensor(const Tensor<T>& t, bool print_flat=false, bool debug=true, const char *begin="{", const char *end="}\n", const char *delim=", ");
79 
80 template <typename T>
81 void print_compute_graph(op::Operation<T> *node, bool debug=true);
82 */
83 
84 #if defined(_HAS_CUDA_)
85 template <typename T>
86 cudnnDataType_t get_cudnn_data_type(T val);
87 #endif
88 
89 } // namespace internal
90 } // namespace magmadnn
Definition: addop.cpp:11
int debugf(const char *fmt,...)
Definition: utilities_internal.cpp:14