2019-05-12 21:12:07 +02:00
|
|
|
#pragma once
|
|
|
|
|
|
|
|
#include <array>
|
|
|
|
|
|
|
|
#include <general.hh>
|
|
|
|
#include <grid_fft.hh>
|
|
|
|
|
|
|
|
//! convolution class, respecting Orszag's 3/2 rule
|
|
|
|
template< typename data_t >
|
|
|
|
class OrszagConvolver
|
|
|
|
{
|
|
|
|
protected:
|
|
|
|
Grid_FFT<data_t> *f1p_, *f2p_;
|
2019-05-14 12:29:27 +02:00
|
|
|
Grid_FFT<data_t> *fbuf_;
|
|
|
|
|
2019-05-12 21:12:07 +02:00
|
|
|
std::array<size_t,3> np_;
|
|
|
|
std::array<real_t,3> length_;
|
|
|
|
|
|
|
|
ccomplex_t *crecvbuf_;
|
|
|
|
real_t *recvbuf_;
|
2019-05-13 09:56:58 +02:00
|
|
|
size_t maxslicesz_;
|
|
|
|
std::vector<ptrdiff_t> offsets_, offsetsp_;
|
|
|
|
std::vector<size_t> sizes_, sizesp_;
|
|
|
|
|
|
|
|
// ptrdiff_t *offsets_;
|
|
|
|
// ptrdiff_t *offsetsp_;
|
|
|
|
// ptrdiff_t *sizes_;
|
|
|
|
// ptrdiff_t *sizesp_;
|
2019-05-12 21:12:07 +02:00
|
|
|
|
|
|
|
private:
|
2019-05-13 09:56:58 +02:00
|
|
|
// int get_task( ptrdiff_t index, const ptrdiff_t *offsets, const ptrdiff_t *sizes, const int ntasks ) const
|
|
|
|
// {
|
|
|
|
// int itask = 0;
|
|
|
|
// while( itask < ntasks-1 && offsets[itask+1] <= index ) ++itask;
|
|
|
|
// return itask;
|
|
|
|
// }
|
|
|
|
|
|
|
|
// get task based on offsets
|
|
|
|
|
|
|
|
int get_task(ptrdiff_t index, const std::vector<ptrdiff_t>& offsets, const std::vector<size_t>& sizes, const int ntasks )
|
2019-05-12 21:12:07 +02:00
|
|
|
{
|
|
|
|
int itask = 0;
|
2019-05-13 09:56:58 +02:00
|
|
|
while (itask < ntasks - 1 && offsets[itask + 1] <= index) ++itask;
|
2019-05-12 21:12:07 +02:00
|
|
|
return itask;
|
|
|
|
}
|
|
|
|
// void pad_insert( const Grid_FFT<data_t> & f, Grid_FFT<data_t> & fp );
|
|
|
|
// void unpad( const Grid_FFT<data_t> & fp, Grid_FFT< data_t > & f );
|
|
|
|
|
|
|
|
public:
|
|
|
|
|
|
|
|
|
|
|
|
OrszagConvolver( const std::array<size_t, 3> &N, const std::array<real_t, 3> &L )
|
|
|
|
: np_({3*N[0]/2,3*N[1]/2,3*N[2]/2}), length_(L)
|
|
|
|
{
|
|
|
|
//... create temporaries
|
2019-05-14 12:29:27 +02:00
|
|
|
f1p_ = new Grid_FFT<data_t>(np_, length_, kspace_id);
|
|
|
|
f2p_ = new Grid_FFT<data_t>(np_, length_, kspace_id);
|
|
|
|
fbuf_ = new Grid_FFT<data_t>(N, length_, kspace_id); // needed for MPI, or for triple conv.
|
2019-05-12 21:12:07 +02:00
|
|
|
|
|
|
|
#if defined(USE_MPI)
|
2019-05-13 09:56:58 +02:00
|
|
|
maxslicesz_ = f1p_->sizes_[1] * f1p_->sizes_[3] * 2;
|
2019-05-12 21:12:07 +02:00
|
|
|
|
2019-05-13 09:56:58 +02:00
|
|
|
crecvbuf_ = new ccomplex_t[maxslicesz_ / 2];
|
2019-05-12 21:12:07 +02:00
|
|
|
recvbuf_ = reinterpret_cast<real_t *>(&crecvbuf_[0]);
|
|
|
|
|
|
|
|
int ntasks(MPI_Get_size());
|
|
|
|
|
2019-05-13 09:56:58 +02:00
|
|
|
offsets_.assign(ntasks,0);
|
|
|
|
offsetsp_.assign(ntasks,0);
|
|
|
|
sizes_.assign(ntasks,0);
|
|
|
|
sizesp_.assign(ntasks,0);
|
2019-05-12 21:12:07 +02:00
|
|
|
|
|
|
|
size_t tsize = N[0], tsizep = f1p_->size(0);
|
|
|
|
|
2019-05-14 12:29:27 +02:00
|
|
|
MPI_Allgather(&fbuf_->local_1_start_, 1, MPI_LONG_LONG, &offsets_[0], 1,
|
2019-05-12 21:12:07 +02:00
|
|
|
MPI_LONG_LONG, MPI_COMM_WORLD);
|
|
|
|
MPI_Allgather(&f1p_->local_1_start_, 1, MPI_LONG_LONG, &offsetsp_[0], 1,
|
|
|
|
MPI_LONG_LONG, MPI_COMM_WORLD);
|
|
|
|
MPI_Allgather(&tsize, 1, MPI_LONG_LONG, &sizes_[0], 1, MPI_LONG_LONG,
|
|
|
|
MPI_COMM_WORLD);
|
|
|
|
MPI_Allgather(&tsizep, 1, MPI_LONG_LONG, &sizesp_[0], 1, MPI_LONG_LONG,
|
|
|
|
MPI_COMM_WORLD);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
~OrszagConvolver()
|
|
|
|
{
|
|
|
|
delete f1p_;
|
|
|
|
delete f2p_;
|
2019-05-14 12:29:27 +02:00
|
|
|
delete fbuf_;
|
2019-05-12 21:12:07 +02:00
|
|
|
#if defined(USE_MPI)
|
|
|
|
delete[] crecvbuf_;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
template< typename opp >
|
|
|
|
void convolve_Hessians( Grid_FFT<data_t> & inl, const std::array<int,2>& d2l, Grid_FFT<data_t> & inr, const std::array<int,2>& d2r, Grid_FFT<data_t> & res, opp op ){
|
|
|
|
// transform to FS in case fields are not
|
|
|
|
inl.FourierTransformForward();
|
|
|
|
inr.FourierTransformForward();
|
|
|
|
// perform convolution of Hessians
|
|
|
|
this->convolve2(
|
|
|
|
[&]( size_t i, size_t j, size_t k ) -> ccomplex_t{
|
|
|
|
auto kk = inl.template get_k<real_t>(i,j,k);
|
|
|
|
return -kk[d2l[0]] * kk[d2l[1]] * inl.kelem(i,j,k);
|
|
|
|
},
|
|
|
|
[&]( size_t i, size_t j, size_t k ){
|
|
|
|
auto kk = inr.template get_k<real_t>(i,j,k);
|
|
|
|
return -kk[d2r[0]] * kk[d2r[1]] * inr.kelem(i,j,k);
|
|
|
|
}, res, op );
|
|
|
|
}
|
|
|
|
|
2019-05-14 12:29:27 +02:00
|
|
|
template< typename opp >
|
|
|
|
void convolve_Hessians( Grid_FFT<data_t> & inl, const std::array<int,2>& d2l,
|
|
|
|
Grid_FFT<data_t> & inm, const std::array<int,2>& d2m,
|
|
|
|
Grid_FFT<data_t> & inr, const std::array<int,2>& d2r,
|
|
|
|
Grid_FFT<data_t> & res, opp op )
|
|
|
|
{
|
|
|
|
// transform to FS in case fields are not
|
|
|
|
inl.FourierTransformForward();
|
|
|
|
inm.FourierTransformForward();
|
|
|
|
inr.FourierTransformForward();
|
|
|
|
// perform convolution of Hessians
|
|
|
|
this->convolve3(
|
|
|
|
[&]( size_t i, size_t j, size_t k ) -> ccomplex_t{
|
|
|
|
auto kk = inl.template get_k<real_t>(i,j,k);
|
|
|
|
return -kk[d2l[0]] * kk[d2l[1]] * inl.kelem(i,j,k);
|
|
|
|
},
|
|
|
|
[&]( size_t i, size_t j, size_t k ) -> ccomplex_t{
|
|
|
|
auto kk = inl.template get_k<real_t>(i,j,k);
|
|
|
|
return -kk[d2m[0]] * kk[d2m[1]] * inm.kelem(i,j,k);
|
|
|
|
},
|
|
|
|
[&]( size_t i, size_t j, size_t k ){
|
|
|
|
auto kk = inr.template get_k<real_t>(i,j,k);
|
|
|
|
return -kk[d2r[0]] * kk[d2r[1]] * inr.kelem(i,j,k);
|
|
|
|
}, res, op );
|
|
|
|
}
|
|
|
|
|
2019-05-12 21:12:07 +02:00
|
|
|
template< typename opp >
|
|
|
|
void convolve_SumHessians( Grid_FFT<data_t> & inl, const std::array<int,2>& d2l, Grid_FFT<data_t> & inr, const std::array<int,2>& d2r1,
|
|
|
|
const std::array<int,2>& d2r2, Grid_FFT<data_t> & res, opp op ){
|
|
|
|
// transform to FS in case fields are not
|
|
|
|
inl.FourierTransformForward();
|
|
|
|
inr.FourierTransformForward();
|
|
|
|
// perform convolution of Hessians
|
|
|
|
this->convolve2(
|
|
|
|
[&]( size_t i, size_t j, size_t k ) -> ccomplex_t{
|
|
|
|
auto kk = inl.template get_k<real_t>(i,j,k);
|
|
|
|
return -kk[d2l[0]] * kk[d2l[1]] * inl.kelem(i,j,k);
|
|
|
|
},
|
|
|
|
[&]( size_t i, size_t j, size_t k ){
|
|
|
|
auto kk = inr.template get_k<real_t>(i,j,k);
|
|
|
|
return (-kk[d2r1[0]] * kk[d2r1[1]] -kk[d2r2[0]] * kk[d2r2[1]]) * inr.kelem(i,j,k);
|
|
|
|
}, res, op );
|
|
|
|
}
|
|
|
|
|
|
|
|
template< typename kfunc1, typename kfunc2, typename opp >
|
|
|
|
void convolve2( kfunc1 kf1, kfunc2 kf2, Grid_FFT<data_t> & res, opp op )
|
|
|
|
{
|
|
|
|
//... prepare data 1
|
|
|
|
f1p_->FourierTransformForward(false);
|
|
|
|
this->pad_insert( kf1, *f1p_ );
|
|
|
|
|
2019-05-14 12:29:27 +02:00
|
|
|
//... prepare data 2
|
2019-05-12 21:12:07 +02:00
|
|
|
f2p_->FourierTransformForward(false);
|
|
|
|
this->pad_insert( kf2, *f2p_ );
|
|
|
|
|
|
|
|
//... convolve
|
|
|
|
f1p_->FourierTransformBackward();
|
|
|
|
f2p_->FourierTransformBackward();
|
|
|
|
|
|
|
|
#pragma omp parallel for
|
|
|
|
for (size_t i = 0; i < f1p_->ntot_; ++i){
|
|
|
|
(*f2p_).relem(i) *= (*f1p_).relem(i);
|
|
|
|
}
|
|
|
|
f2p_->FourierTransformForward();
|
|
|
|
//... copy data back
|
|
|
|
res.FourierTransformForward();
|
|
|
|
unpad(*f2p_, res, op);
|
|
|
|
}
|
|
|
|
|
2019-05-14 12:29:27 +02:00
|
|
|
template< typename kfunc1, typename kfunc2, typename kfunc3, typename opp >
|
|
|
|
void convolve3( kfunc1 kf1, kfunc2 kf2, kfunc3 kf3, Grid_FFT<data_t> & res, opp op )
|
|
|
|
{
|
|
|
|
convolve2( kf1, kf2, *fbuf_, []( ccomplex_t res, ccomplex_t ){ return res; } );
|
|
|
|
//... prepare data 1
|
|
|
|
f1p_->FourierTransformForward(false);
|
|
|
|
this->pad_insert( [&]( size_t i, size_t j, size_t k ){return fbuf_->kelem(i,j,k);}, *f1p_ );
|
|
|
|
|
|
|
|
//... prepare data 2
|
|
|
|
f2p_->FourierTransformForward(false);
|
|
|
|
this->pad_insert( kf3, *f2p_ );
|
|
|
|
|
|
|
|
//... convolve
|
|
|
|
f1p_->FourierTransformBackward();
|
|
|
|
f2p_->FourierTransformBackward();
|
|
|
|
|
|
|
|
#pragma omp parallel for
|
|
|
|
for (size_t i = 0; i < f1p_->ntot_; ++i){
|
|
|
|
(*f2p_).relem(i) *= (*f1p_).relem(i);
|
|
|
|
}
|
|
|
|
f2p_->FourierTransformForward();
|
|
|
|
//... copy data back
|
|
|
|
res.FourierTransformForward();
|
|
|
|
unpad(*f2p_, res, op);
|
|
|
|
}
|
|
|
|
|
|
|
|
template< typename opp >
|
|
|
|
void test_pad_unpad( Grid_FFT<data_t> & in, Grid_FFT<data_t> & res, opp op )
|
|
|
|
{
|
|
|
|
//... prepare data 1
|
|
|
|
f1p_->FourierTransformForward(false);
|
|
|
|
this->pad_insert( [&]( size_t i, size_t j, size_t k ){return in.kelem(i,j,k);}, *f1p_ );
|
|
|
|
f1p_->FourierTransformBackward();
|
|
|
|
f1p_->FourierTransformForward();
|
|
|
|
res.FourierTransformForward();
|
|
|
|
unpad(*f1p_, res, op);
|
|
|
|
}
|
|
|
|
|
2019-05-12 21:12:07 +02:00
|
|
|
//... inplace interface
|
|
|
|
/*void convolve3( const Grid_FFT<data_t> & f1, const Grid_FFT<data_t> & f2, const Grid_FFT<data_t> & f3, Grid_FFT<data_t> & res )
|
|
|
|
{
|
|
|
|
convolve2( f1, f2, res );
|
|
|
|
convolve2( res, f3, res );
|
|
|
|
}*/
|
|
|
|
|
|
|
|
|
|
|
|
private:
|
|
|
|
template <typename kdep_functor>
|
|
|
|
void pad_insert( kdep_functor kfunc, Grid_FFT<data_t> &fp ){
|
|
|
|
assert( fp.space_ == kspace_id );
|
|
|
|
|
2019-05-14 12:29:27 +02:00
|
|
|
const double rfac = std::pow(1.5,1.5);
|
2019-05-12 21:12:07 +02:00
|
|
|
|
|
|
|
fp.zero();
|
2019-05-13 09:56:58 +02:00
|
|
|
|
2019-05-12 21:12:07 +02:00
|
|
|
#if !defined(USE_MPI) ////////////////////////////////////////////////////////////////////////////////////
|
|
|
|
size_t nhalf[3] = {fp.n_[0] / 3, fp.n_[1] / 3, fp.n_[2] / 3};
|
|
|
|
|
|
|
|
#pragma omp parallel for
|
|
|
|
for (size_t i = 0; i < 2*fp.size(0)/3; ++i)
|
|
|
|
{
|
2019-05-13 09:56:58 +02:00
|
|
|
size_t ip = (i > nhalf[0]) ? i + nhalf[0] : i;
|
2019-05-12 21:12:07 +02:00
|
|
|
for (size_t j = 0; j < 2*fp.size(1)/3; ++j)
|
|
|
|
{
|
2019-05-13 09:56:58 +02:00
|
|
|
size_t jp = (j > nhalf[1]) ? j + nhalf[1] : j;
|
2019-05-12 21:12:07 +02:00
|
|
|
for (size_t k = 0; k < 2*fp.size(2)/3; ++k)
|
|
|
|
{
|
2019-05-13 09:56:58 +02:00
|
|
|
size_t kp = (k > nhalf[2]) ? k + nhalf[2] : k;
|
2019-05-12 21:12:07 +02:00
|
|
|
// if( i==nhalf[0]||j==nhalf[1]||k==nhalf[2]) continue;
|
|
|
|
fp.kelem(ip, jp, kp) = kfunc(i, j, k) * rfac;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#else /// then USE_MPI is defined ////////////////////////////////////////////////////////////
|
|
|
|
|
|
|
|
MPI_Barrier(MPI_COMM_WORLD);
|
2019-05-14 12:29:27 +02:00
|
|
|
fbuf_->FourierTransformForward(false);
|
2019-05-12 21:12:07 +02:00
|
|
|
/////////////////////////////////////////////////////////////////////
|
|
|
|
|
|
|
|
double tstart = get_wtime();
|
|
|
|
csoca::dlog << "[MPI] Started scatter for convolution" << std::endl;
|
|
|
|
|
|
|
|
//... collect offsets
|
|
|
|
|
2019-05-14 12:29:27 +02:00
|
|
|
assert(fbuf_->space_ == kspace_id);
|
2019-05-12 21:12:07 +02:00
|
|
|
|
2019-05-14 12:29:27 +02:00
|
|
|
size_t nf[3] = {fbuf_->size(0), fbuf_->size(1), fbuf_->size(2)};
|
2019-05-12 21:12:07 +02:00
|
|
|
size_t nfp[3] = {fp.size(0), fp.size(1), fp.size(2)};
|
|
|
|
|
|
|
|
//... local size must be divisible by 2, otherwise this gets too complicated
|
2019-05-14 12:29:27 +02:00
|
|
|
assert(fbuf_->n_[1] % 2 == 0);
|
|
|
|
size_t slicesz = fbuf_->size(1) * fbuf_->size(3);
|
2019-05-13 12:34:16 +02:00
|
|
|
|
|
|
|
MPI_Datatype datatype =
|
|
|
|
(typeid(data_t) == typeid(float)) ? MPI_COMPLEX :
|
|
|
|
(typeid(data_t) == typeid(double)) ? MPI_DOUBLE_COMPLEX : MPI_BYTE;
|
2019-05-12 21:12:07 +02:00
|
|
|
|
|
|
|
|
2019-05-14 12:29:27 +02:00
|
|
|
// fill MPI send buffer with results of kfunc
|
|
|
|
|
2019-05-12 21:12:07 +02:00
|
|
|
#pragma omp parallel for
|
2019-05-14 12:29:27 +02:00
|
|
|
for (size_t i = 0; i < fbuf_->size(0); ++i)
|
2019-05-12 21:12:07 +02:00
|
|
|
{
|
2019-05-14 12:29:27 +02:00
|
|
|
for (size_t j = 0; j < fbuf_->size(1); ++j)
|
2019-05-12 21:12:07 +02:00
|
|
|
{
|
2019-05-14 12:29:27 +02:00
|
|
|
for (size_t k = 0; k < fbuf_->size(2); ++k)
|
2019-05-12 21:12:07 +02:00
|
|
|
{
|
2019-05-14 12:29:27 +02:00
|
|
|
fbuf_->kelem(i, j, k) = kfunc(i, j, k) * rfac;
|
2019-05-12 21:12:07 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-13 09:56:58 +02:00
|
|
|
MPI_Status status;
|
|
|
|
|
|
|
|
std::vector<MPI_Request> req;
|
|
|
|
MPI_Request temp_req;
|
2019-05-12 21:12:07 +02:00
|
|
|
|
|
|
|
// send data from buffer
|
|
|
|
for (size_t i = 0; i < nf[0]; ++i)
|
|
|
|
{
|
|
|
|
size_t iglobal = i + offsets_[CONFIG::MPI_task_rank];
|
|
|
|
|
2019-05-14 12:29:27 +02:00
|
|
|
if (iglobal < nf[0]/2 )//fny[0])
|
2019-05-12 21:12:07 +02:00
|
|
|
{
|
|
|
|
int sendto = get_task(iglobal, offsetsp_, sizesp_, CONFIG::MPI_task_size);
|
2019-05-14 12:29:27 +02:00
|
|
|
MPI_Isend(&fbuf_->kelem(i * slicesz), (int)slicesz, datatype, sendto,
|
2019-05-12 21:12:07 +02:00
|
|
|
(int)iglobal, MPI_COMM_WORLD, &temp_req);
|
|
|
|
req.push_back(temp_req);
|
2019-05-13 12:34:16 +02:00
|
|
|
// std::cout << "task " << CONFIG::MPI_task_rank << " : added request No" << req.size()-1 << ": Isend #" << iglobal << " to task " << sendto << ", size = " << slicesz << std::endl;
|
2019-05-12 21:12:07 +02:00
|
|
|
}
|
2019-05-14 12:29:27 +02:00
|
|
|
if (iglobal > nf[0]/2) //fny[0])
|
2019-05-12 21:12:07 +02:00
|
|
|
{
|
2019-05-14 12:29:27 +02:00
|
|
|
int sendto = get_task(iglobal + nf[0]/2, offsetsp_, sizesp_, CONFIG::MPI_task_size);
|
|
|
|
MPI_Isend(&fbuf_->kelem(i * slicesz), (int)slicesz, datatype, sendto,
|
|
|
|
(int)(iglobal + nf[0]/2), MPI_COMM_WORLD, &temp_req);
|
2019-05-12 21:12:07 +02:00
|
|
|
req.push_back(temp_req);
|
2019-05-13 12:34:16 +02:00
|
|
|
// std::cout << "task " << CONFIG::MPI_task_rank << " : added request No" << req.size()-1 << ": Isend #" << iglobal+fny[0] << " to task " << sendto << ", size = " << slicesz<< std::endl;
|
2019-05-12 21:12:07 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (size_t i = 0; i < nfp[0]; ++i)
|
|
|
|
{
|
|
|
|
size_t iglobal = i + offsetsp_[CONFIG::MPI_task_rank];
|
|
|
|
|
2019-05-14 12:29:27 +02:00
|
|
|
if (iglobal < nf[0]/2 || iglobal > nf[0])
|
2019-05-12 21:12:07 +02:00
|
|
|
{
|
|
|
|
int recvfrom = 0;
|
2019-05-14 12:29:27 +02:00
|
|
|
if (iglobal < nf[0]/2)
|
2019-05-12 21:12:07 +02:00
|
|
|
recvfrom = get_task(iglobal, offsets_, sizes_, CONFIG::MPI_task_size);
|
|
|
|
else
|
2019-05-14 12:29:27 +02:00
|
|
|
recvfrom = get_task(iglobal - nf[0]/2, offsets_, sizes_, CONFIG::MPI_task_size);
|
2019-05-12 21:12:07 +02:00
|
|
|
|
2019-05-13 12:34:16 +02:00
|
|
|
// std::cout << "task " << CONFIG::MPI_task_rank << " : receive #" << iglobal << " from task "
|
|
|
|
// << recvfrom << ", size = " << slicesz << ", " << crecvbuf_ << ", " << datatype << std::endl;
|
2019-05-12 21:12:07 +02:00
|
|
|
|
|
|
|
MPI_Recv(&recvbuf_[0], (int)slicesz, datatype, recvfrom, (int)iglobal,
|
|
|
|
MPI_COMM_WORLD, &status);
|
2019-05-13 12:34:16 +02:00
|
|
|
// std::cout << "---> ok! " << (bool)(status.MPI_ERROR==MPI_SUCCESS) << std::endl;
|
2019-05-12 21:12:07 +02:00
|
|
|
|
2019-05-14 12:29:27 +02:00
|
|
|
assert(status.MPI_ERROR == MPI_SUCCESS);
|
2019-05-12 21:12:07 +02:00
|
|
|
|
|
|
|
for (size_t j = 0; j < nf[1]; ++j)
|
|
|
|
{
|
2019-05-14 12:29:27 +02:00
|
|
|
if (j < nf[1]/2)
|
2019-05-12 21:12:07 +02:00
|
|
|
{
|
|
|
|
size_t jp = j;
|
|
|
|
for (size_t k = 0; k < nf[2]; ++k)
|
|
|
|
{
|
2019-05-14 12:29:27 +02:00
|
|
|
if (k < nf[2]/2)
|
|
|
|
fp.kelem(i, jp, k) = crecvbuf_[j * fbuf_->sizes_[3] + k];
|
|
|
|
else if (k > nf[2]/2)
|
|
|
|
fp.kelem(i, jp, k + nf[2]/2) = crecvbuf_[j * fbuf_->sizes_[3] + k];
|
2019-05-12 21:12:07 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-14 12:29:27 +02:00
|
|
|
else if (j > nf[1]/2)
|
2019-05-12 21:12:07 +02:00
|
|
|
{
|
2019-05-14 12:29:27 +02:00
|
|
|
size_t jp = j + nf[1]/2;
|
2019-05-12 21:12:07 +02:00
|
|
|
for (size_t k = 0; k < nf[2]; ++k)
|
|
|
|
{
|
2019-05-14 12:29:27 +02:00
|
|
|
if (k < nf[2]/2)
|
|
|
|
fp.kelem(i, jp, k) = crecvbuf_[j * fbuf_->sizes_[3] + k];
|
|
|
|
else if (k > nf[2]/2)
|
|
|
|
fp.kelem(i, jp, k + nf[2]/2) = crecvbuf_[j * fbuf_->sizes_[3] + k];
|
2019-05-12 21:12:07 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (size_t i = 0; i < req.size(); ++i)
|
|
|
|
{
|
|
|
|
// need to set status as wait does not necessarily modify it
|
|
|
|
// c.f. http://www.open-mpi.org/community/lists/devel/2007/04/1402.php
|
|
|
|
status.MPI_ERROR = MPI_SUCCESS;
|
|
|
|
// ofs << "task " << CONFIG::MPI_task_rank << " : checking request No" << i << std::endl;
|
|
|
|
MPI_Wait(&req[i], &status);
|
|
|
|
// ofs << "---> ok!" << std::endl;
|
|
|
|
assert(status.MPI_ERROR == MPI_SUCCESS);
|
|
|
|
}
|
|
|
|
|
|
|
|
// usleep(1000);
|
|
|
|
|
|
|
|
MPI_Barrier(MPI_COMM_WORLD);
|
|
|
|
|
|
|
|
// std::cerr << ">>>>> task " << CONFIG::MPI_task_rank << " all transfers completed! <<<<<"
|
|
|
|
// << std::endl; ofs << ">>>>> task " << CONFIG::MPI_task_rank << " all transfers completed!
|
|
|
|
// <<<<<" << std::endl;
|
|
|
|
csoca::dlog.Print("[MPI] Completed scatter for convolution, took %fs\n",
|
|
|
|
get_wtime() - tstart);
|
|
|
|
|
|
|
|
#endif /// end of ifdef/ifndef USE_MPI ///////////////////////////////////////////////////////////////
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
template <typename operator_t>
|
|
|
|
void unpad(const Grid_FFT<data_t> &fp, Grid_FFT<data_t> &f, operator_t op )
|
|
|
|
{
|
2019-05-14 12:29:27 +02:00
|
|
|
const double rfac = std::sqrt(fp.n_[0] * fp.n_[1] * fp.n_[2]) / std::sqrt(f.n_[0] * f.n_[1] * f.n_[2]);
|
2019-05-12 21:12:07 +02:00
|
|
|
|
|
|
|
// make sure we're in Fourier space...
|
|
|
|
assert( fp.space_ == kspace_id );
|
|
|
|
f.FourierTransformForward();
|
|
|
|
|
2019-05-13 09:56:58 +02:00
|
|
|
#if !defined(USE_MPI) ////////////////////////////////////////////////////////////////////////////////////
|
2019-05-12 21:12:07 +02:00
|
|
|
size_t nhalf[3] = {f.n_[0] / 2, f.n_[1] / 2, f.n_[2] / 2};
|
2019-05-14 12:29:27 +02:00
|
|
|
|
2019-05-12 21:12:07 +02:00
|
|
|
for (size_t i = 0; i < f.size(0); ++i)
|
|
|
|
{
|
2019-05-14 12:29:27 +02:00
|
|
|
size_t ip = (i > nhalf[0]) ? i + nhalf[0] : i;
|
2019-05-12 21:12:07 +02:00
|
|
|
for (size_t j = 0; j < f.size(1); ++j)
|
|
|
|
{
|
2019-05-14 12:29:27 +02:00
|
|
|
size_t jp = (j > nhalf[1]) ? j + nhalf[1] : j;
|
2019-05-12 21:12:07 +02:00
|
|
|
for (size_t k = 0; k < f.size(2); ++k)
|
|
|
|
{
|
2019-05-14 12:29:27 +02:00
|
|
|
size_t kp = (k > nhalf[2]) ? k + nhalf[2] : k;
|
2019-05-12 21:12:07 +02:00
|
|
|
// if( i==nhalf[0]||j==nhalf[1]||k==nhalf[2]) continue;
|
|
|
|
f.kelem(i, j, k) = op(fp.kelem(ip, jp, kp) / rfac, f.kelem(i, j, k));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#else /// then USE_MPI is defined //////////////////////////////////////////////////////////////
|
|
|
|
|
|
|
|
/////////////////////////////////////////////////////////////////////
|
|
|
|
|
|
|
|
double tstart = get_wtime();
|
|
|
|
|
2019-05-14 12:29:27 +02:00
|
|
|
csoca::dlog << "[MPI] Started gather for convolution";
|
2019-05-12 21:12:07 +02:00
|
|
|
|
|
|
|
MPI_Barrier(MPI_COMM_WORLD);
|
|
|
|
|
|
|
|
size_t nf[3] = {f.size(0), f.size(1), f.size(2)};
|
|
|
|
size_t nfp[4] = {fp.size(0), fp.size(1), fp.size(2), fp.size(3)};
|
|
|
|
size_t fny[3] = {f.n_[1] / 2, f.n_[0] / 2, f.n_[2] / 2};
|
|
|
|
|
|
|
|
size_t slicesz = fp.size(1) * fp.size(3);
|
|
|
|
|
2019-05-13 12:34:16 +02:00
|
|
|
MPI_Datatype datatype =
|
|
|
|
(typeid(data_t) == typeid(float)) ? MPI_COMPLEX :
|
|
|
|
(typeid(data_t) == typeid(double)) ? MPI_DOUBLE_COMPLEX : MPI_BYTE;
|
2019-05-12 21:12:07 +02:00
|
|
|
|
|
|
|
MPI_Status status;
|
|
|
|
|
|
|
|
//... local size must be divisible by 2, otherwise this gets too complicated
|
|
|
|
// assert( tsize%2 == 0 );
|
|
|
|
|
|
|
|
std::vector<MPI_Request> req;
|
|
|
|
MPI_Request temp_req;
|
|
|
|
|
|
|
|
for (size_t i = 0; i < nfp[0]; ++i)
|
|
|
|
{
|
|
|
|
size_t iglobal = i + offsetsp_[CONFIG::MPI_task_rank];
|
|
|
|
|
|
|
|
//... sending
|
|
|
|
if (iglobal < fny[0])
|
|
|
|
{
|
|
|
|
int sendto = get_task(iglobal, offsets_, sizes_, CONFIG::MPI_task_size);
|
|
|
|
|
|
|
|
MPI_Isend(&fp.kelem(i * slicesz), (int)slicesz, datatype, sendto, (int)iglobal,
|
|
|
|
MPI_COMM_WORLD, &temp_req);
|
|
|
|
req.push_back(temp_req);
|
|
|
|
}
|
|
|
|
else if (iglobal > 2 * fny[0])
|
|
|
|
{
|
|
|
|
int sendto = get_task(iglobal - fny[0], offsets_, sizes_, CONFIG::MPI_task_size);
|
|
|
|
MPI_Isend(&fp.kelem(i * slicesz), (int)slicesz, datatype, sendto, (int)iglobal,
|
|
|
|
MPI_COMM_WORLD, &temp_req);
|
|
|
|
req.push_back(temp_req);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (size_t i = 0; i < nf[0]; ++i)
|
|
|
|
{
|
|
|
|
size_t iglobal = i + offsets_[CONFIG::MPI_task_rank];
|
|
|
|
|
2019-05-13 17:02:31 +02:00
|
|
|
status.MPI_ERROR = MPI_SUCCESS;
|
|
|
|
|
2019-05-12 21:12:07 +02:00
|
|
|
int recvfrom = 0;
|
|
|
|
if (iglobal < fny[0])
|
|
|
|
{
|
|
|
|
recvfrom = get_task(iglobal, offsetsp_, sizesp_, CONFIG::MPI_task_size);
|
|
|
|
MPI_Recv(&recvbuf_[0], (int)slicesz, datatype, recvfrom, (int)iglobal,
|
|
|
|
MPI_COMM_WORLD, &status);
|
|
|
|
}
|
|
|
|
else if (iglobal > fny[0])
|
|
|
|
{
|
|
|
|
recvfrom = get_task(iglobal + fny[0], offsetsp_, sizesp_, CONFIG::MPI_task_size);
|
|
|
|
MPI_Recv(&recvbuf_[0], (int)slicesz, datatype, recvfrom,
|
|
|
|
(int)(iglobal + fny[0]), MPI_COMM_WORLD, &status);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
continue;
|
|
|
|
|
|
|
|
assert(status.MPI_ERROR == MPI_SUCCESS);
|
|
|
|
|
|
|
|
for (size_t j = 0; j < nf[1]; ++j)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (j < fny[1])
|
|
|
|
{
|
|
|
|
size_t jp = j;
|
|
|
|
for (size_t k = 0; k < nf[2]; ++k)
|
|
|
|
{
|
|
|
|
if (k < fny[2])
|
2019-05-14 12:29:27 +02:00
|
|
|
f.kelem(i, j, k) = op(crecvbuf_[jp * nfp[3] + k]/rfac,f.kelem(i, j, k));
|
2019-05-12 21:12:07 +02:00
|
|
|
else if (k > fny[2])
|
2019-05-14 12:29:27 +02:00
|
|
|
f.kelem(i, j, k) = op(crecvbuf_[jp * nfp[3] + k + fny[2]]/rfac, f.kelem(i, j, k));
|
2019-05-12 21:12:07 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if (j > fny[1])
|
|
|
|
{
|
|
|
|
size_t jp = j + fny[1];
|
|
|
|
for (size_t k = 0; k < nf[2]; ++k)
|
|
|
|
{
|
|
|
|
if (k < fny[2])
|
2019-05-14 12:29:27 +02:00
|
|
|
f.kelem(i, j, k) = op(crecvbuf_[jp * nfp[3] + k]/rfac, f.kelem(i, j, k));
|
2019-05-12 21:12:07 +02:00
|
|
|
else if (k > fny[2])
|
2019-05-14 12:29:27 +02:00
|
|
|
f.kelem(i, j, k) = op(crecvbuf_[jp * nfp[3] + k + fny[2]]/rfac, f.kelem(i, j, k));
|
2019-05-12 21:12:07 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (size_t i = 0; i < req.size(); ++i)
|
|
|
|
{
|
|
|
|
// need to preset status as wait does not necessarily modify it to reflect
|
|
|
|
// success c.f.
|
|
|
|
// http://www.open-mpi.org/community/lists/devel/2007/04/1402.php
|
|
|
|
status.MPI_ERROR = MPI_SUCCESS;
|
|
|
|
|
|
|
|
MPI_Wait(&req[i], &status);
|
|
|
|
assert(status.MPI_ERROR == MPI_SUCCESS);
|
|
|
|
}
|
|
|
|
|
|
|
|
MPI_Barrier(MPI_COMM_WORLD);
|
|
|
|
|
2019-05-14 12:29:27 +02:00
|
|
|
csoca::dlog.Print("[MPI] Completed gather for convolution, took %fs", get_wtime() - tstart);
|
2019-05-12 21:12:07 +02:00
|
|
|
|
|
|
|
#endif /// end of ifdef/ifndef USE_MPI //////////////////////////////////////////////////////////////
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
};
|