diff --git a/src/maxwell_cpu.cpp b/src/maxwell_cpu.cpp index 2af0267f76cafae1a7af799c5abc4b5e3707441b..8ebd00e3651def056241cad3805d4777525b2e3e 100644 --- a/src/maxwell_cpu.cpp +++ b/src/maxwell_cpu.cpp @@ -146,7 +146,6 @@ compute_field_jumps(solver_state& state, const field& in) { size_t num_all_fluxes = num_elems_all_orientations(ed) * ed.num_faces_per_elem * ed.num_fluxes; - #pragma omp parallel for for (size_t i = 0; i < num_all_fluxes; i++) { auto lofs = i; @@ -184,7 +183,6 @@ compute_field_jumps_E(solver_state& state, const field& in) { size_t num_all_fluxes = num_elems_all_orientations(ed) * ed.num_faces_per_elem * ed.num_fluxes; - #pragma omp parallel for for (size_t i = 0; i < num_all_fluxes; i++) { auto lofs = i; @@ -216,7 +214,6 @@ compute_field_jumps_H(solver_state& state, const field& in) { size_t num_all_fluxes = num_elems_all_orientations(ed) * ed.num_faces_per_elem * ed.num_fluxes; - #pragma omp parallel for for (size_t i = 0; i < num_all_fluxes; i++) { auto lofs = i; @@ -248,7 +245,6 @@ compute_fluxes_planar(solver_state& state) { size_t num_all_faces = num_elems_all_orientations(ed) * ed.num_faces_per_elem; - #pragma omp parallel for for (size_t iF = 0; iF < num_all_faces; iF++) { auto n = ed.normals.row(iF); @@ -297,7 +293,6 @@ compute_fluxes_planar_E(solver_state& state) { size_t num_all_faces = num_elems_all_orientations(ed) * ed.num_faces_per_elem; - #pragma omp parallel for for (size_t iF = 0; iF < num_all_faces; iF++) { auto n = ed.normals.row(iF); @@ -340,7 +335,6 @@ compute_fluxes_planar_H(solver_state& state) { size_t num_all_faces = num_elems_all_orientations(ed) * ed.num_faces_per_elem; - #pragma omp parallel for for (size_t iF = 0; iF < num_all_faces; iF++) { auto n = ed.normals.row(iF); @@ -424,7 +418,6 @@ compute_fluxes_H(solver_state& state, const field& in, field& out) static void compute_euler_update(solver_state& state, const field& y, const field& k, double dt, field& out) { - #pragma omp parallel for for (size_t i = 0; i < out.num_dofs; i++) { auto CR = 1.0 - dt*state.matparams.sigma_over_epsilon[i]; @@ -434,7 +427,6 @@ compute_euler_update(solver_state& state, const field& y, const field& k, double out.Ez[i] = y.Ez[i]*CR + k.Ez[i]*CC; } - #pragma omp parallel for for (size_t i = 0; i < out.num_dofs; i++) { auto CC = dt*state.matparams.inv_mu[i]; @@ -447,7 +439,6 @@ compute_euler_update(solver_state& state, const field& y, const field& k, double static void compute_rk4_weighted_sum(solver_state& state, const field& in, double dt, field& out) { - #pragma omp parallel for for (size_t i = 0; i < out.num_dofs; i++) { auto CR = 1.0 - dt*state.matparams.sigma_over_epsilon[i]; @@ -457,7 +448,6 @@ compute_rk4_weighted_sum(solver_state& state, const field& in, double dt, field& out.Ez[i] = in.Ez[i]*CR + CC*(state.k1.Ez[i] + 2*state.k2.Ez[i] + 2*state.k3.Ez[i] + state.k4.Ez[i])/6; } - #pragma omp parallel for for (size_t i = 0; i < out.num_dofs; i++) { auto CC = dt*state.matparams.inv_mu[i]; @@ -480,7 +470,6 @@ leapfrog(solver_state& state) auto dt = state.delta_t; compute_curls_H(state, state.emf_curr, state.tmp); compute_fluxes_H(state, state.emf_curr, state.tmp); - #pragma omp parallel for for (size_t i = 0; i < state.emf_next.num_dofs; i++) { auto CRM = 1.0 - 0.5*dt*state.matparams.sigma_over_epsilon[i]; @@ -494,7 +483,6 @@ leapfrog(solver_state& state) compute_curls_E(state, state.emf_next, state.tmp); compute_fluxes_E(state, state.emf_next, state.tmp); - #pragma omp parallel for for (size_t i = 0; i < state.emf_next.num_dofs; i++) { auto CC = dt*state.matparams.inv_mu[i];