13 #if defined(_HAS_CUDA_) 18 #define MAGMA_SGEMM_ROWMAJOR(A,B,C,m,n,k,alpha,beta,transf_A,transf_B,lda,ldb,ldc) \ 19 magma_sgemm(transf_B, transf_A, n, m, k, alpha, B, ldb, A, lda, beta, C, ldc) 21 #define MAGMA_DGEMM_ROWMAJOR(A,B,C,m,n,k,alpha,beta,transf_A,transf_B,lda,ldb,ldc) \ 22 magma_dgemm(transf_B, transf_A, n, m, k, alpha, B, ldb, A, lda, beta, C, ldc) 29 void matmul(T alpha,
bool trans_A, Tensor<T> *A,
bool trans_B, Tensor<T> *B, T beta, Tensor<T> *C);
MatmulOp< T > * matmul(Operation< T > *a, Operation< T > *b, bool needs_grad)
Definition: matmulop.cpp:128