GetFEM++  5.3
getfem_assembling_tensors.h
Go to the documentation of this file.
1 /* -*- c++ -*- (enables emacs c++ mode) */
2 /*===========================================================================
3 
4  Copyright (C) 2003-2017 Julien Pommier
5 
6  This file is a part of GetFEM++
7 
8  GetFEM++ is free software; you can redistribute it and/or modify it
9  under the terms of the GNU Lesser General Public License as published
10  by the Free Software Foundation; either version 3 of the License, or
11  (at your option) any later version along with the GCC Runtime Library
12  Exception either version 3.1 or (at your option) any later version.
13  This program is distributed in the hope that it will be useful, but
14  WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15  or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
16  License and GCC Runtime Library Exception for more details.
17  You should have received a copy of the GNU Lesser General Public License
18  along with this program; if not, write to the Free Software Foundation,
19  Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
20 
21  As a special exception, you may use this file as it is a part of a free
22  software library without restriction. Specifically, if other files
23  instantiate templates or use macros or inline functions from this file,
24  or you compile this file and link it with other files to produce an
25  executable, this file does not by itself cause the resulting executable
26  to be covered by the GNU Lesser General Public License. This exception
27  does not however invalidate any other reasons why the executable file
28  might be covered by the GNU Lesser General Public License.
29 
30 ===========================================================================*/
31 
32 /**@file getfem_assembling_tensors.h
33  @author Julien Pommier <Julien.Pommier@insa-toulouse.fr>
34  @date January 2003.
35  @brief Generic assembly implementation.
36 */
37 #ifndef GETFEM_ASSEMBLING_TENSORS_H__
38 #define GETFEM_ASSEMBLING_TENSORS_H__
39 
40 #include "gmm/gmm_kernel.h"
41 #include "getfem_mesh_fem.h"
42 #include "getfem_mesh_im.h"
43 #include "bgeot_sparse_tensors.h"
44 #include "getfem_mat_elem_type.h"
45 #include "getfem_mat_elem.h"
46 #include <map>
47 
48 #define ASM_THROW_PARSE_ERROR(x) \
49  GMM_ASSERT1(false, "parse error: " << x << endl << "found here:\n " \
50  << syntax_err_print());
51 #define ASM_THROW_TENSOR_ERROR(x) \
52  GMM_ASSERT1(false, "tensor error: " << x);
53 #define ASM_THROW_ERROR(x) GMM_ASSERT1(false, "error: " << x);
54 
55 namespace getfem {
56  using bgeot::stride_type;
57  using bgeot::index_type;
58  using bgeot::index_set;
59  using bgeot::tensor_ranges;
60  using bgeot::tensor_strides;
61  using bgeot::tensor_mask;
62  using bgeot::tensor_shape;
63  using bgeot::tensor_ref;
64  using bgeot::multi_tensor_iterator;
65  using bgeot::TDIter;
66 
67  class ATN_tensor;
68 
69  /*
70  base class for the tree built from the expression of the tensor assembly
71  (ATN == Assembly Tree Node)
72  */
73  class ATN {
74  std::deque< ATN_tensor* > childs_;
75  std::string name_;/* the name is a part of the parsed string */
76  unsigned number_; /* a unique number, used for the ordering of the tree */
77  protected:
78  size_type current_cv;
79  dim_type current_face;
80  public:
81  ATN(const std::string& n=std::string("unnamed")) :
82  name_(n), number_(unsigned(-1)), current_cv(size_type(-1)),
83  current_face(dim_type(-1)) {}
84  virtual ~ATN() {}
85 
86  void add_child(ATN_tensor& a) { childs_.push_back(&a); }
87  ATN_tensor& child(size_type n) { return *childs_[n]; }
88  size_type nchilds() { return childs_.size(); }
89  /* reinit is called each time the object need to reset itself
90  (when the shape of one of its childs has changed) */
91  void reinit() { if (!is_zero_size()) reinit_(); }
92  /* do the computations for a given convex */
93  void exec(size_type cv, dim_type face) {
94  if (cv != current_cv || face != current_face) {
95  if (!is_zero_size())
96  exec_(cv,face);
97  current_cv = cv;
98  current_face = face;
99  }
100  }
101  const std::string& name() { return name_; }
102  void set_name(const std::string& n) { name_ = n; }
103  /* the "root" nodes expect to get all tensor values
104  others nodes have a more specific behavior
105  */
106  virtual void update_childs_required_shape();
107 
108  virtual bool is_zero_size();
109 
110  /* numbering og tensors, such that if i < j then tensor(j)
111  cannot be in the sub-tree of tensor(i) */
112  void set_number(unsigned &gcnt);
113  unsigned number() const { return number_; }
114  private:
115  virtual void reinit_() = 0;
116  virtual void exec_(size_type , dim_type ) {}
117  };
118 
119  class ATN_tensors_sum_scaled;
120 
121  /* Base class for every node except the "final" ones */
122  class ATN_tensor : public ATN {
123  protected:
124  tensor_ranges r_;
125  bool shape_updated_;
126  tensor_ref tr;
127  tensor_shape req_shape;
128  bool frozen_; /* used to recognize intermediate results of
129  computations stored in a temporary variable: they
130  cannot be modified a posteriori (like it could
131  happen with an ATN_tensors_sum_scaled) */
132  public:
133  ATN_tensor() { shape_updated_ = false; frozen_ = false; }
134  bool is_shape_updated() const { return shape_updated_; }
135  void freeze() { frozen_ = true; }
136  bool is_frozen() const { return frozen_; }
137  const tensor_ranges& ranges() const { return r_; }
138  const tensor_shape& required_shape() const { return req_shape; }
139  /* check_shape_update is called for each node of the tree
140  if the shape of the tensor has been modified, the flag
141  shape_updated_ should be set, and r_ should contain the
142  new dimensions. This function is called in such an order
143  that the shape updates are automatically propagated in the tree */
144  virtual void check_shape_update(size_type , dim_type) {}
145  /* if the shape was updated, the node should initialise its req_shape */
146  virtual void init_required_shape() { req_shape.set_empty(r_); }
147  /* then each node update the req_shape of its childs.
148  */
149  virtual void update_childs_required_shape() {
150  for (dim_type i=0; i < nchilds(); ++i) {
151  child(i).merge_required_shape(req_shape);
152  }
153  }
154  /* ... then reserve some memory if necessary for tensor storage
155  in 'reinit' (inherited here from ATN)
156  */
157  tensor_ref& tensor() {
158  return tr;
159  }
160 
161  bool is_zero_size() { return r_.is_zero_size(); }
162 
163  void merge_required_shape(const tensor_shape& shape_from_parent) {
164  req_shape.merge(shape_from_parent, false);
165  }
166  /* recognize sums of scaled tensors
167  (in order to stack those sums on the same object)
168  (dynamic_cast prohibited for the moment: crashes matlab) */
169  virtual ATN_tensors_sum_scaled* is_tensors_sum_scaled() { return 0; }
170  };
171 
172 
173  /* simple list of "virtual" dimensions, i.e. which may be constant
174  or be given by a mesh_fem */
175  struct vdim_specif {
176  size_type dim;
177  const mesh_fem *pmf;
178  bool is_mf_ref() const { return (pmf != 0); }
179  vdim_specif() { dim = size_type(-1); pmf = 0; }
180  vdim_specif(size_type i) { dim = i; pmf = 0; }
181  vdim_specif(const mesh_fem *pmf_) { dim = pmf_->nb_dof(); pmf = pmf_; }
182  };
183  class vdim_specif_list : public std::vector< vdim_specif > {
184  public:
185  vdim_specif_list() { reserve(8); }
186  size_type nb_mf() const;
187  size_type nbelt() const;
188  void build_strides_for_cv(size_type cv, tensor_ranges& r,
189  std::vector<tensor_strides >& str) const;
190  };
191 
192  /* final node for array output: array means full array of 0,1,2,3 or
193  more dimensions, stored in a vector VEC in fortran order
194  */
195  template< typename VEC > class ATN_array_output : public ATN {
196  VEC& v;
197  vdim_specif_list vdim;
198  multi_tensor_iterator mti;
199  tensor_strides strides;
200  const mesh_fem *pmf;
201  public:
202  ATN_array_output(ATN_tensor& a, VEC& v_, vdim_specif_list &d)
203  : v(v_), vdim(d) {
204 
205  strides.resize(vdim.size()+1);
206  add_child(a);
207  strides[0] = 1;
208  pmf = 0;
209  for (size_type i=0; i < vdim.size(); ++i) {
210  if (vdim[i].pmf) pmf = vdim[i].pmf;
211  strides[i+1] = strides[i]*int(vdim[i].dim);
212  }
213  if (gmm::vect_size(v) != size_type(strides[vdim.size()]))
214  ASM_THROW_TENSOR_ERROR("wrong size for output vector: supplied "
215  "vector size is " << gmm::vect_size(v)
216  << " while it should be "
217  << strides[vdim.size()]);
218  }
219  private:
220  void reinit_() {
221  mti = multi_tensor_iterator(child(0).tensor(),true);
222  }
223 
224  void exec_(size_type cv, dim_type) {
225  tensor_ranges r;
226  std::vector< tensor_strides > str;
227  vdim.build_strides_for_cv(cv, r, str);
228  if (child(0).ranges() != r) {
229  ASM_THROW_TENSOR_ERROR("can't output a tensor of dimensions "
230  << child(0).ranges() <<
231  " into an output array of size " << r);
232  }
233  mti.rewind();
234  if (pmf && pmf->is_reduced()) {
235  if ( pmf->nb_dof() != 0)
236  {
237  do {
238  size_type nb_dof = pmf->nb_dof();
239  dim_type qqdim = dim_type(gmm::vect_size(v) / nb_dof);
240 
241  if (qqdim == 1) {
242  size_type i = 0;
243  for (dim_type j=0; j < mti.ndim(); ++j) i += str[j][mti.index(j)];
244  gmm::add(gmm::scaled(gmm::mat_row(pmf->extension_matrix(), i),
245  mti.p(0)), v);
246  }
247  else {
248  GMM_ASSERT1(false, "To be verified ... ");
249  size_type i = 0;
250  for (dim_type j=0; j < mti.ndim(); ++j) i += str[j][mti.index(j)];
251  gmm::add(gmm::scaled(gmm::mat_row(pmf->extension_matrix(),i/qqdim),
252  mti.p(0)),
253  gmm::sub_vector(v, gmm::sub_slice(i%qqdim,nb_dof,qqdim)));
254  }
255  } while (mti.qnext1());
256  }
257  }
258  else {
259  do {
260  typename gmm::linalg_traits<VEC>::iterator it = gmm::vect_begin(v);
261  for (dim_type j = 0; j < mti.ndim(); ++j) it += str[j][mti.index(j)];
262  *it += mti.p(0);
263  } while (mti.qnext1());
264  }
265  }
266  };
267 
268  template <typename MAT, typename ROW, typename COL>
269  void asmrankoneupdate(const MAT &m_, const ROW &row, const COL &col,
270  scalar_type r) {
271  MAT &m = const_cast<MAT &>(m_);
272  typename gmm::linalg_traits<ROW>::const_iterator itr = row.begin();
273  for (; itr != row.end(); ++itr) {
274  typename gmm::linalg_traits<COL>::const_iterator itc = col.begin();
275  for (; itc != col.end(); ++itc)
276  m(itr.index(), itc.index()) += (*itr) * (*itc) * r;
277  }
278  }
279 
280  template <typename MAT, typename ROW>
281  void asmrankoneupdate(const MAT &m_, const ROW &row, size_type j, scalar_type r) {
282  MAT &m = const_cast<MAT &>(m_);
283  typename gmm::linalg_traits<ROW>::const_iterator itr = row.begin();
284  for (; itr != row.end(); ++itr) m(itr.index(), j) += (*itr) * r;
285  }
286 
287  template <typename MAT, typename COL>
288  void asmrankoneupdate(const MAT &m_, size_type j, const COL &col, scalar_type r) {
289  MAT &m = const_cast<MAT &>(m_);
290  typename gmm::linalg_traits<COL>::const_iterator itc = col.begin();
291  for (; itc != col.end(); ++itc) m(j, itc.index()) += (*itc) * r;
292  }
293 
294  /* final node for sparse matrix output */
295  template< typename MAT > class ATN_smatrix_output : public ATN {
296  const mesh_fem &mf_r, &mf_c;
297  MAT& m;
298  multi_tensor_iterator mti;
299  struct ijv { // just a fast cache for the mti output
300  // (yes it makes a small difference)
301  scalar_type *p;
302  unsigned i,j;
303  };
304  std::vector<ijv> it;
305  public:
306  ATN_smatrix_output(ATN_tensor& a, const mesh_fem& mf_r_,
307  const mesh_fem& mf_c_, MAT& m_)
308  : mf_r(mf_r_), mf_c(mf_c_), m(m_) {
309  add_child(a);
310  it.reserve(100);
311  }
312  private:
313  void reinit_() {
314  mti = multi_tensor_iterator(child(0).tensor(),true);
315  it.resize(0);
316  }
317  void exec_(size_type cv, dim_type) {
318  size_type nb_r = mf_r.nb_basic_dof_of_element(cv);
319  size_type nb_c = mf_c.nb_basic_dof_of_element(cv);
320  if (child(0).tensor().ndim() != 2)
321  ASM_THROW_TENSOR_ERROR("cannot write a " <<
322  int(child(0).tensor().ndim()) <<
323  "D-tensor into a matrix!");
324  if (child(0).tensor().dim(0) != nb_r ||
325  child(0).tensor().dim(1) != nb_c) {
326  ASM_THROW_TENSOR_ERROR("size mismatch for sparse matrix output:"
327  " tensor dimension is " << child(0).ranges()
328  << ", while the elementary matrix for convex "
329  << cv << " should have " << nb_r << "x"
330  << nb_c << " elements");
331  }
332  std::vector<size_type> cvdof_r(mf_r.ind_basic_dof_of_element(cv).begin(),
333  mf_r.ind_basic_dof_of_element(cv).end());
334  std::vector<size_type> cvdof_c(mf_c.ind_basic_dof_of_element(cv).begin(),
335  mf_c.ind_basic_dof_of_element(cv).end());
336 
337  if (it.size() == 0) {
338  mti.rewind();
339  do {
340  ijv v;
341  v.p = &mti.p(0);
342  v.i = mti.index(0);
343  v.j = mti.index(1);
344  it.push_back(v);
345  } while (mti.qnext1());
346  }
347 
348  bool valid_mf_r = mf_r.nb_dof() > 0;
349  bool valid_mf_c = mf_c.nb_dof() > 0;
350 
351  if (mf_r.is_reduced()) {
352  if (mf_c.is_reduced() && valid_mf_r && valid_mf_c) {
353  for (unsigned i = 0; i < it.size(); ++i)
354  if (*it[i].p)
355  asmrankoneupdate(m, gmm::mat_row(mf_r.extension_matrix(),
356  cvdof_r[it[i].i]),
357  gmm::mat_row(mf_c.extension_matrix(),
358  cvdof_c[it[i].j]),
359  *it[i].p);
360  }
361  else if (valid_mf_r) {
362  for (unsigned i = 0; i < it.size(); ++i)
363  if (*it[i].p)
364  asmrankoneupdate(m, gmm::mat_row(mf_r.extension_matrix(),
365  cvdof_r[it[i].i]),
366  cvdof_c[it[i].j], *it[i].p);
367  }
368  }
369  else {
370  if (mf_c.is_reduced() && valid_mf_c) {
371  for (unsigned i = 0; i < it.size(); ++i)
372  if (*it[i].p)
373  asmrankoneupdate(m, cvdof_r[it[i].i],
374  gmm::mat_row(mf_c.extension_matrix(),
375  cvdof_c[it[i].j]),
376  *it[i].p);
377  }
378  else {
379  for (unsigned i = 0; i < it.size(); ++i)
380  if (*it[i].p)
381  m(cvdof_r[it[i].i], cvdof_c[it[i].j]) += *it[i].p;
382  }
383  }
384  }
385  };
386 
387 
388 
389  /* some wrappers : their aim is to provide a better genericity,
390  and to avoid the whole templatization of the 'generic_assembly' class,
391  which is quite(!) big
392  */
393  class base_asm_data {
394  public:
395  virtual size_type vect_size() const = 0;
396  virtual void copy_with_mti(const std::vector<tensor_strides> &,
397  multi_tensor_iterator &,
398  const mesh_fem *) const = 0;
399  virtual ~base_asm_data() {}
400  };
401 
402  template< typename VEC > class asm_data : public base_asm_data {
403  const VEC &v;
404  public:
405  asm_data(const VEC *v_) : v(*v_) {}
406  size_type vect_size() const {
407  return gmm::vect_size(v);
408  }
409  /* used to transfer the data for the current convex to the mti of
410  ATN_tensor_from_dofs_data */
411  void copy_with_mti(const std::vector<tensor_strides> &str,
412  multi_tensor_iterator &mti, const mesh_fem *pmf) const {
413  size_type ppos;
414  if (pmf && pmf->is_reduced()) {
415  do {
416  ppos = 0;
417  for (dim_type i = 0; i < mti.ndim(); ++i) ppos+=str[i][mti.index(i)];
418  mti.p(0)
419  = gmm::vect_sp(gmm::mat_row(pmf->extension_matrix(), ppos), v);
420  } while (mti.qnext1());
421 
422  }
423  else {
424  do {
425  ppos = 0;
426  for (dim_type i = 0; i < mti.ndim(); ++i) ppos+=str[i][mti.index(i)];
427  mti.p(0) = v[ppos];
428  } while (mti.qnext1());
429  }
430  }
431  };
432 
433  class base_asm_vec {
434  public:
435  virtual std::unique_ptr<ATN> build_output_tensor(ATN_tensor &a,
436  vdim_specif_list& vdim)=0;
437  virtual ~base_asm_vec() {}
438  };
439 
440  template< typename VEC > class asm_vec : public base_asm_vec {
441  std::shared_ptr<VEC> v;
442  public:
443  asm_vec(const std::shared_ptr<VEC> &v_) : v(v_) {}
444  asm_vec(VEC *v_) : v(std::shared_ptr<VEC>(), v_) {}
445  virtual std::unique_ptr<ATN> build_output_tensor(ATN_tensor &a,
446  vdim_specif_list& vdim) {
447  return std::make_unique<ATN_array_output<VEC>>(a, *v, vdim);
448  }
449  VEC *vec() { return v.get(); }
450  };
451 
452  /* the "factory" is only useful for the matlab interface,
453  since the number of output arrays and sparse matrices is unknown
454  for user-supplied assemblies. Hence they are created "on-the-fly" */
455  class base_vec_factory {
456  public:
457  virtual base_asm_vec* create_vec(const tensor_ranges& r) = 0;
458  virtual ~base_vec_factory() {}
459  };
460 
461  template< typename VEC > class vec_factory
462  : public base_vec_factory, private std::deque<asm_vec<VEC> > {
463  public:
464  base_asm_vec* create_vec(const tensor_ranges& r) {
465  size_type sz = 1; for (size_type i=0; i < r.size(); ++i) sz *= r[i];
466  if (sz == 0)
467  ASM_THROW_TENSOR_ERROR("can't create a vector of size " << r);
468  this->push_back(asm_vec<VEC>(std::make_shared<VEC>(sz)));
469  return &this->back();
470  }
471  };
472 
473 
474  /* matrix wrappers */
475  class base_asm_mat {
476  public:
477  virtual std::unique_ptr<ATN>
478  build_output_tensor(ATN_tensor& a, const mesh_fem& mf1,
479  const mesh_fem& mf2) = 0;
480  virtual ~base_asm_mat() {}
481  };
482 
483  template< typename MAT > class asm_mat : public base_asm_mat {
484  std::shared_ptr<MAT> m;
485  public:
486  asm_mat(const std::shared_ptr<MAT> &m_) : m(m_) {}
487  asm_mat(MAT *m_) : m(std::shared_ptr<MAT>(), m_) {}
488  std::unique_ptr<ATN>
489  build_output_tensor(ATN_tensor& a, const mesh_fem& mf1,
490  const mesh_fem& mf2) {
491  return std::make_unique<ATN_smatrix_output<MAT>>(a, mf1, mf2, *m);
492  }
493  MAT *mat() { return m.get(); }
494  ~asm_mat() {}
495  };
496 
497  class base_mat_factory {
498  public:
499  virtual base_asm_mat* create_mat(size_type m, size_type n) = 0;
500  virtual ~base_mat_factory() {};
501  };
502 
503  template< typename MAT > class mat_factory
504  : public base_mat_factory, private std::deque<asm_mat<MAT> > {
505  public:
506  base_asm_mat* create_mat(size_type m, size_type n) {
507  this->push_back(asm_mat<MAT>(std::make_shared<MAT>(m, n)));
508  return &this->back();
509  }
510  };
511 
512 
513  class tnode {
514  public:
515  typedef enum { TNCONST, TNTENSOR, TNNONE } node_type;
516  private:
517  node_type type_;
518  scalar_type x;
519  ATN_tensor *t;
520  public:
521  tnode() : type_(TNNONE), x(1e300), t(NULL) {}
522  tnode(scalar_type x_) { assign(x_); }
523  tnode(ATN_tensor *t_) { assign(t_); }
524  void assign(scalar_type x_) { type_ = TNCONST; t = NULL; x = x_; }
525  void assign(ATN_tensor *t_) { type_ = TNTENSOR; t = t_; x = 1e300; }
526  ATN_tensor* tensor() { assert(type_ == TNTENSOR); return t; }
527  scalar_type xval() { assert(type_ == TNCONST); return x; }
528  node_type type() { return type_; }
529  void check0() { if (xval() == 0) ASM_THROW_ERROR("division by zero"); }
530  };
531 
532  class asm_tokenizer {
533  public:
534  typedef enum { OPEN_PAR='(', CLOSE_PAR=')', COMMA=',',
535  SEMICOLON=';', COLON=':', EQUAL='=', MFREF='#', IMREF='%',
536  PLUS='+',MINUS='-', PRODUCT='.',MULTIPLY='*',
537  DIVIDE='/', ARGNUM_SELECTOR='$',
538  OPEN_BRACE='{', CLOSE_BRACE='}',
539  END=0, IDENT=1, NUMBER=2 } tok_type_enum;
540  private:
541  std::string str;
542  size_type tok_pos, tok_len;
543  tok_type_enum curr_tok_type;
544  std::string curr_tok;
545  int curr_tok_ival;
546  double curr_tok_dval;
547  size_type err_msg_mark;
548  std::deque<size_type> marks;
549  public:
550  asm_tokenizer() {}
551  void set_str(const std::string& s_) {
552  str = s_; tok_pos = 0; tok_len = size_type(-1); curr_tok_type = END;
553  err_msg_mark = 0; get_tok();
554  }
555  std::string tok() const { return curr_tok; }
556  tok_type_enum tok_type() const { return curr_tok_type; }
557  size_type tok_mark() { return tok_pos; }
558  std::string tok_substr(size_type i1, size_type i2)
559  { return str.substr(i1, i2-i1); }
560  void err_set_mark() {
561  err_msg_mark = tok_pos;
562  }
563  void push_mark() { marks.push_back(tok_pos); }
564  void pop_mark() { assert(marks.size()); marks.pop_back(); }
565  std::string mark_txt() {
566  assert(marks.size());
567  return tok_substr(marks.back(),tok_pos);
568  }
569 
570  /* returns a friendly message indicated the location of the syntax error */
571  std::string syntax_err_print();
572  void accept(tok_type_enum t, const char *msg_="syntax error") {
573  if (tok_type() != t) ASM_THROW_PARSE_ERROR(msg_); advance();
574  }
575  void accept(tok_type_enum t, tok_type_enum t2,
576  const char *msg_="syntax error") {
577  if (tok_type() != t && tok_type() != t2)
578  ASM_THROW_PARSE_ERROR(msg_);
579  advance();
580  }
581  bool advance_if(tok_type_enum t) {
582  if (tok_type() == t) { advance(); return true; } else return false;
583  }
584  void advance() { tok_pos += tok_len; get_tok(); }
585  void get_tok();
586  double tok_number_dval()
587  { assert(tok_type()==NUMBER); return curr_tok_dval; }
588  int tok_number_ival(int maxval=10000000) {
589  int n=int(tok_number_dval());
590  if (n != curr_tok_dval) ASM_THROW_PARSE_ERROR("not an integer");
591  if (n > maxval) ASM_THROW_PARSE_ERROR("out of bound integer");
592  return n-1; /* -1 pour un indicage qui commence à 1! */
593  }
594  size_type tok_mfref_num()
595  { assert(tok_type()==MFREF); return curr_tok_ival; }
596  size_type tok_imref_num()
597  { assert(tok_type()==IMREF); return curr_tok_ival; }
598  size_type tok_argnum()
599  { assert(tok_type()==ARGNUM_SELECTOR); return curr_tok_ival; }
600  };
601 
602 
603  /** Generic assembly of vectors, matrices.
604 
605  Many examples of use available @link asm here@endlink.
606  */
607  class generic_assembly : public asm_tokenizer {
608  std::vector<const mesh_fem *> mftab; /* list of the mesh_fem used. */
609  std::vector<const mesh_im *> imtab; /* list of the mesh_im used. */
610  std::vector<pnonlinear_elem_term> innonlin; /* alternatives to base, */
611  /* grad, hess in comp() for non-linear computations) */
612  std::vector<std::unique_ptr<base_asm_data>> indata; /* data sources */
613  std::vector<std::shared_ptr<base_asm_vec>> outvec; /* vectors in which is done the */
614  /* assembly */
615  std::vector<std::shared_ptr<base_asm_mat>> outmat; /* matrices in which is done the */
616  /* assembly */
617 
618  base_vec_factory *vec_fact; /* if non null, used to fill the outvec */
619  /* list with a given vector class */
620  base_mat_factory *mat_fact; /* if non null, used to fill the outmat */
621  /* list with a given matrix class */
622 
623  std::vector<std::unique_ptr<ATN>> outvars; /* the list of "final tensors"*/
624  /* which produce some output in outvec and outmat. */
625 
626  std::map<std::string, ATN_tensor *> vars; /* the list of user variables */
627  std::vector<std::unique_ptr<ATN_tensor>> atn_tensors; /* keep track of */
628  /* all tensors objects (except the ones listed in 'outvars') for */
629  /* deallocation when all is done. Note that they are not stored in a */
630  /* random order, but are reordered such that the childs of the */
631  /* i-th ATN_tensor are all stored at indices j < i. This assumption is */
632  /* largely used for calls to shape updates and exec(cv,f). */
633  bool parse_done;
634 
635  public:
636  generic_assembly() : vec_fact(0), mat_fact(0), parse_done(false) {}
637  generic_assembly(const std::string& s_) :
638  vec_fact(0), mat_fact(0), parse_done(false)
639  { set_str(s_); }
640  ~generic_assembly() {}
641 
642  void set(const std::string& s_) { set_str(s_); }
643  const std::vector<const mesh_fem*>& mf() const { return mftab; }
644  const std::vector<const mesh_im*>& im() const { return imtab; }
645  const std::vector<pnonlinear_elem_term> nonlin() const { return innonlin; }
646  const std::vector<std::unique_ptr<base_asm_data>>& data() const { return indata; }
647  const std::vector<std::shared_ptr<base_asm_vec>>& vec() const { return outvec; }
648  const std::vector<std::shared_ptr<base_asm_mat>>& mat() const { return outmat; }
649  /// Add a new mesh_fem
650  void push_mf(const mesh_fem& mf_) { mftab.push_back(&mf_); }
651  /// Add a new mesh_im
652  void push_mi(const mesh_im& im_) { imtab.push_back(&im_); }
653  void push_im(const mesh_im& im_) { imtab.push_back(&im_); }
654  /// Add a new non-linear term
656  innonlin.push_back(net);
657  }
658  /// Add a new data (dense array)
659  template< typename VEC > void push_data(const VEC& d) {
660  indata.push_back(std::make_unique<asm_data<VEC>>(&d));
661  }
662  /// Add a new output vector
663  template< typename VEC > void push_vec(VEC& v) {
664  outvec.push_back(std::make_shared<asm_vec<VEC>>(&(gmm::linalg_cast(v))));
665  }
666  /// Add a new output vector (fake const version..)
667  template< typename VEC > void push_vec(const VEC& v) {
668  outvec.push_back(std::make_shared<asm_vec<VEC>>(&(gmm::linalg_cast(v))));
669  }
670  /// Add a new output matrix (fake const version..)
671  template< typename MAT > void push_mat(const MAT& m) {
672  outmat.push_back(std::make_shared<asm_mat<MAT>>(&(gmm::linalg_cast(m))));
673  }
674  /// Add a new output matrix
675  template< typename MAT > void push_mat(MAT& m) {
676  outmat.push_back(std::make_shared<asm_mat<MAT>>(&(gmm::linalg_cast(m))));
677  }
678 
679  template <typename T> void push_mat_or_vec(T &v) {
680  push_mat_or_vec(v, typename gmm::linalg_traits<T>::linalg_type());
681  }
682 
683  /// used by the getfem_interface..
684  void set_vec_factory(base_vec_factory *fact) { vec_fact = fact; }
685  void set_mat_factory(base_mat_factory *fact) { mat_fact = fact; }
686 
687  private:
688  ATN_tensor* record(std::unique_ptr<ATN_tensor> &&t) {
689  t->set_name(mark_txt());
690  atn_tensors.push_back(std::move(t)); return atn_tensors.back().get();
691  }
692  ATN* record_out(std::unique_ptr<ATN> t) {
693  t->set_name(mark_txt());
694  outvars.push_back(std::move(t)); return outvars.back().get();
695  }
696  const mesh_fem& do_mf_arg_basic();
697  const mesh_fem& do_mf_arg(std::vector<const mesh_fem*> *multimf = 0);
698  void do_dim_spec(vdim_specif_list& lst);
699  std::string do_comp_red_ops();
700  ATN_tensor* do_comp();
701  ATN_tensor* do_data();
702  std::pair<ATN_tensor*, std::string> do_red_ops(ATN_tensor* t);
703  tnode do_tens();
704  tnode do_prod();
705  tnode do_prod_trans();
706  tnode do_term();
707  tnode do_expr();
708  void do_instr();
709  void exec(size_type cv, dim_type face);
710  void consistency_check();
711  template <typename T> void push_mat_or_vec(T &v, gmm::abstract_vector) {
712  push_vec(v);
713  }
714  template <typename T> void push_mat_or_vec(T &v, gmm::abstract_matrix) {
715  push_mat(v);
716  }
717  public:
718  /* parse the string 'str' and build the tree of vtensors */
719  void parse();
720 
721  /** do the assembly on the specified region (boundary or set of convexes)*/
722  void assembly(const mesh_region &region =
724  };
725 } /* end of namespace getfem. */
726 
727 
728 #endif /* GETFEM_ASSEMBLING_TENSORS_H__ */
structure used to hold a set of convexes and/or convex faces.
void push_data(const VEC &d)
Add a new data (dense array)
static mesh_region all_convexes()
provide a default value for the mesh_region parameters of assembly procedures etc.
void push_nonlinear_term(pnonlinear_elem_term net)
Add a new non-linear term.
void push_mi(const mesh_im &im_)
Add a new mesh_im.
Define the getfem::mesh_im class (integration of getfem::mesh_fem).
Define the getfem::mesh_fem class.
Describe an integration method linked to a mesh.
void push_vec(const VEC &v)
Add a new output vector (fake const version..)
void push_mf(const mesh_fem &mf_)
Add a new mesh_fem.
Generic assembly of vectors, matrices.
size_t size_type
used as the common size type in the library
Definition: bgeot_poly.h:49
Sparse tensors, used during the assembly.
void push_mat(MAT &m)
Add a new output matrix.
void set_vec_factory(base_vec_factory *fact)
used by the getfem_interface..
Include the base gmm files.
elementary computations (used by the generic assembly).
GEneric Tool for Finite Element Methods.
Build elementary tensors descriptors, used by generic assembly.
Describe a finite element method linked to a mesh.
void push_mat(const MAT &m)
Add a new output matrix (fake const version..)
abstract class for integration of non-linear terms into the mat_elem computations the nonlinear term ...
void push_vec(VEC &v)
Add a new output vector.