Select Git revision
kernels_cuda.cu
-
Matteo Cicuttin authoredMatteo Cicuttin authored
kernels_cuda.cu 6.23 KiB
#include "entity_data.h"
#include "kernels_gpu.h"
template<size_t K>
__global__ void
gpu_deriv_planar(gpuTextureObject_t F, const double * __restrict__ J,
gpuTextureObject_t DM_tex, double * __restrict__ dF_dx,
double * __restrict__ dF_dy, double * __restrict__ dF_dz,
int32_t num_all_elems, int32_t* orients, int32_t dof_base)
{
using KS = kernel_gpu_sizes<K>;
int ofs_in_entity = (blockIdx.x * blockDim.x + threadIdx.x);
if (ofs_in_entity >= num_all_elems*KS::num_bf)
return;
int32_t cur_dof_offset = dof_base + ofs_in_entity;
int32_t iT = ofs_in_entity / KS::num_bf;
int32_t elem_dof_base = dof_base + iT*KS::num_bf;
int32_t iO = orients[iT];
int32_t DM_row = ofs_in_entity % KS::num_bf;
int32_t DM_orient = 3*KS::num_bf*KS::num_bf*iO;
double accm_dF_dx = 0.0;
double accm_dF_dy = 0.0;
double accm_dF_dz = 0.0;
int32_t jac_ofs = 9*iT;
for (int32_t dof = 0; dof < KS::num_bf; dof++)
{
int32_t d_ofs = DM_orient + DM_row + 3*KS::num_bf*dof;
int32_t f_ofs = elem_dof_base + dof;
double v = fetch_tex(DM_tex, d_ofs) * fetch_tex(F, f_ofs);
accm_dF_dx += J[jac_ofs+0] * v;
accm_dF_dy += J[jac_ofs+3] * v;
accm_dF_dz += J[jac_ofs+6] * v;
d_ofs = DM_orient + DM_row + 3*KS::num_bf*dof + KS::num_bf;
v = fetch_tex(DM_tex, d_ofs) * fetch_tex(F, f_ofs);
accm_dF_dx += J[jac_ofs+1] * v;
accm_dF_dy += J[jac_ofs+4] * v;
accm_dF_dz += J[jac_ofs+7] * v;
d_ofs = DM_orient + DM_row + 3*KS::num_bf*dof + 2*KS::num_bf;
v = fetch_tex(DM_tex, d_ofs) * fetch_tex(F, f_ofs);
accm_dF_dx += J[jac_ofs+2] * v;
accm_dF_dy += J[jac_ofs+5] * v;
accm_dF_dz += J[jac_ofs+8] * v;
}
dF_dx[cur_dof_offset] = accm_dF_dx;
dF_dy[cur_dof_offset] = accm_dF_dy;
dF_dz[cur_dof_offset] = accm_dF_dz;
}
template<size_t K>
void
launch_deriv_kernel(entity_data_gpu& edg,
gpuTextureObject_t f, double *df_dx, double* df_dy, double* df_dz)
{
const auto THREADS_PER_BLOCK = kernel_gpu_sizes<K>::deriv_threads;
auto num_blocks = edg.num_bf*edg.num_all_elems/THREADS_PER_BLOCK;
if (edg.num_bf*edg.num_all_elems % THREADS_PER_BLOCK)
num_blocks += 1;
auto J = edg.jacobians.data();
auto Dtex = edg.differentiation_matrices.get_texture();
int32_t num_elems = edg.num_all_elems;
auto orients = edg.element_orientation.data();
auto num_orients = edg.element_orientation.size();
if (edg.g_order == 1)
gpu_deriv_planar<K><<<num_blocks, THREADS_PER_BLOCK>>>(f, J,
Dtex, df_dx, df_dy, df_dz, num_elems, orients, edg.dof_base);
//else
// compute_field_derivatives_kernel_curved<1>(ed, f, df_dx, df_dy, df_dz);
}
void
gpu_compute_field_derivatives(entity_data_gpu& edg,
gpuTextureObject_t f, double *df_dx, double* df_dy, double* df_dz)
{
switch (edg.a_order)
{
case 1:
launch_deriv_kernel<1>(edg, f, df_dx, df_dy, df_dz);
break;
case 2:
launch_deriv_kernel<2>(edg, f, df_dx, df_dy, df_dz);
break;
case 3:
launch_deriv_kernel<3>(edg, f, df_dx, df_dy, df_dz);
break;
case 4:
launch_deriv_kernel<4>(edg, f, df_dx, df_dy, df_dz);
break;
case 5:
launch_deriv_kernel<5>(edg, f, df_dx, df_dy, df_dz);
break;
case 6:
launch_deriv_kernel<6>(edg, f, df_dx, df_dy, df_dz);
break;
default:
throw std::invalid_argument("compute_field_derivatives: invalid order");
}
}
template<size_t K>
__global__ void
gpu_lift_planar(gpuTextureObject_t flux, gpuTextureObject_t LM_tex,
const double * __restrict__ dets, double * __restrict__ lifted_flux,
int32_t num_all_elems, int32_t* orients, int32_t dof_base, int32_t flux_base)
{
using KS = kernel_gpu_sizes<K>;
/* One thread per *output* dof */
int ofs_in_entity = (blockIdx.x * blockDim.x + threadIdx.x);
if (ofs_in_entity >= num_all_elems*KS::num_bf)
return;
int32_t cur_dof_offset = dof_base + ofs_in_entity;
int32_t iT = ofs_in_entity / KS::num_bf;
int32_t elem_flux_base = flux_base + 4*iT*KS::num_fluxes;
int32_t iO = orients[iT];
int32_t LM_row = ofs_in_entity % KS::num_bf;
int32_t LM_orient = 4*KS::num_bf*KS::num_fluxes*iO;
double inv_det = 1./dets[iT];
double acc = 0.0;
for (int32_t dof = 0; dof < 4*KS::num_fluxes; dof++)
{
int32_t l_ofs = LM_orient + LM_row + KS::num_bf*dof;
int32_t f_ofs = elem_flux_base + dof;
acc += inv_det * fetch_tex(LM_tex, l_ofs) * fetch_tex(flux, f_ofs);
}
lifted_flux[cur_dof_offset] += acc;
}
template<size_t K>
void
launch_lift_kernel(entity_data_gpu& edg, gpuTextureObject_t f, double *out)
{
const auto THREADS_PER_BLOCK = 256;//kernel_gpu_sizes<K>::deriv_threads;
auto num_blocks = edg.num_bf*edg.num_all_elems/THREADS_PER_BLOCK;
if (edg.num_bf*edg.num_all_elems % THREADS_PER_BLOCK)
num_blocks += 1;
auto Ltex = edg.lifting_matrices.get_texture();
int32_t num_elems = edg.num_all_elems;
auto orients = edg.element_orientation.data();
auto num_orients = edg.element_orientation.size();
auto dets = edg.cell_determinants.data();
if (edg.g_order == 1)
gpu_lift_planar<K><<<num_blocks, THREADS_PER_BLOCK>>>(f, Ltex,
dets, out, num_elems, orients, edg.dof_base, edg.flux_base);
//else
// compute_field_derivatives_kernel_curved<1>(ed, f, df_dx, df_dy, df_dz);
}
void
gpu_compute_flux_lifting(entity_data_gpu& edg, gpuTextureObject_t f, double *out)
{
switch (edg.a_order)
{
case 1:
launch_lift_kernel<1>(edg, f, out);
break;
case 2:
launch_lift_kernel<2>(edg, f, out);
break;
case 3:
launch_lift_kernel<3>(edg, f, out);
break;
case 4:
launch_lift_kernel<4>(edg, f, out);
break;
case 5:
launch_lift_kernel<5>(edg, f, out);
break;
case 6:
launch_lift_kernel<6>(edg, f, out);
break;
default:
throw std::invalid_argument("compute_field_derivatives: invalid order");
}
}