GetFEM++  5.3
bgeot_tensor.h
Go to the documentation of this file.
1 /* -*- c++ -*- (enables emacs c++ mode) */
2 /*===========================================================================
3 
4  Copyright (C) 2000-2017 Yves Renard
5 
6  This file is a part of GetFEM++
7 
8  GetFEM++ is free software; you can redistribute it and/or modify it
9  under the terms of the GNU Lesser General Public License as published
10  by the Free Software Foundation; either version 3 of the License, or
11  (at your option) any later version along with the GCC Runtime Library
12  Exception either version 3.1 or (at your option) any later version.
13  This program is distributed in the hope that it will be useful, but
14  WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15  or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
16  License and GCC Runtime Library Exception for more details.
17  You should have received a copy of the GNU Lesser General Public License
18  along with this program; if not, write to the Free Software Foundation,
19  Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
20 
21  As a special exception, you may use this file as it is a part of a free
22  software library without restriction. Specifically, if other files
23  instantiate templates or use macros or inline functions from this file,
24  or you compile this file and link it with other files to produce an
25  executable, this file does not by itself cause the resulting executable
26  to be covered by the GNU Lesser General Public License. This exception
27  does not however invalidate any other reasons why the executable file
28  might be covered by the GNU Lesser General Public License.
29 
30 ===========================================================================*/
31 
32 /**@file bgeot_tensor.h
33  @author Yves Renard <Yves.Renard@insa-lyon.fr>
34  @date October 09, 2000.
35  @brief tensor class, used in mat_elem computations.
36 */
37 #ifndef BGEOT_TENSOR_H__
38 #define BGEOT_TENSOR_H__
39 
40 #include "bgeot_small_vector.h"
41 #include "getfem/getfem_omp.h"
42 
43 
44 namespace bgeot {
45 
46  /* ********************************************************************* */
47  /* Class tensor<T>. */
48  /* ********************************************************************* */
49 
50  typedef size_t size_type;
51  typedef gmm::uint16_type short_type;
52 
53  class multi_index : public std::vector<size_type> {
54  public :
55 
56  void incrementation(const multi_index &m) {
57  iterator it = begin(), ite = end();
58  const_iterator itm = m.begin();
59  if (it != ite) {
60  ++(*it);
61  while (*it >= *itm && it != (ite-1)) { *it = 0; ++it; ++itm; ++(*it); }
62  } else resize(1);
63  }
64 
65  void reset(void) { std::fill(begin(), end(), 0); }
66 
67  inline bool finished(const multi_index &m) {
68  if (m.size() == 0)
69  return (size() == 1);
70  else
71  return ((*this)[size()-1] >= m[size()-1]);
72  }
73 
74  multi_index(size_t n) : std::vector<size_type>(n)
75  { std::fill(begin(), end(), size_type(0)); }
76  multi_index(size_type i, size_type j)
77  : std::vector<size_type>(2)
78  { (*this)[0] = i; (*this)[1] = j; }
79  multi_index(size_type i, size_type j, size_type k)
80  : std::vector<size_type>(3)
81  { (*this)[0] = i; (*this)[1] = j; (*this)[2] = k; }
82  multi_index(size_type i, size_type j, size_type k, size_type l)
83  : std::vector<size_type>(4)
84  { (*this)[0] = i; (*this)[1] = j; (*this)[2] = k; (*this)[3] = l; }
85 
86  multi_index(void) {}
87 
88  bool is_equal(const multi_index &m) const {
89  if (this->size() != m.size()) return false;
90  for (size_type i = 0; i < m.size(); ++i)
91  if (m[i] != (*this)[i]) return false;
92  return true;
93  }
94 
95  size_type total_size(void) const {
96  size_type s = 1;
97  for (size_type k = 0; k < this->size(); ++k) s *= (*this)[k];
98  return s;
99  }
100 
101  size_type memsize() const {
102  return std::vector<size_type>::capacity()*sizeof(size_type) +
103  sizeof(multi_index);
104  }
105  };
106 
107  inline std::ostream &operator <<(std::ostream &o,
108  const multi_index& mi) { /* a compiler ...*/
109  multi_index::const_iterator it = mi.begin(), ite = mi.end();
110  bool f = true;
111  o << "(";
112  for ( ; it != ite; ++it)
113  { if (!f) o << ", "; o << *it; f = false; }
114  o << ")";
115  return o;
116  }
117 
118  template<class T> class tensor : public std::vector<T> {
119  protected:
120 
121  multi_index sizes_, coeff;
122 
123  public:
124 
125  typedef typename std::vector<T>::size_type size_type;
126  typedef typename std::vector<T>::iterator iterator;
127  typedef typename std::vector<T>::const_iterator const_iterator;
128 
129  template<class CONT> inline const T& operator ()(const CONT &c) const {
130  typename CONT::const_iterator it = c.begin();
131  multi_index::const_iterator q = coeff.begin(), e = coeff.end();
132 #ifndef NDEBUG
133  multi_index::const_iterator qv = sizes_.begin();
134 #endif
135  size_type d = 0;
136  for ( ; q != e; ++q, ++it) {
137  d += (*q) * (*it);
138  GMM_ASSERT2(*it < *qv++, "Index out of range.");
139  }
140  return *(this->begin() + d);
141  }
142 
143  inline T& operator ()(size_type i, size_type j, size_type k,
144  size_type l) {
145  GMM_ASSERT2(order() == 4, "Bad tensor order.");
146  size_type d = coeff[0]*i + coeff[1]*j + coeff[2]*k + coeff[3]*l;
147  GMM_ASSERT2(d < size(), "Index out of range.");
148  return *(this->begin() + d);
149  }
150 
151  inline T& operator ()(size_type i, size_type j, size_type k) {
152  GMM_ASSERT2(order() == 3, "Bad tensor order.");
153  size_type d = coeff[0]*i + coeff[1]*j + coeff[2]*k;
154  GMM_ASSERT2(d < size(), "Index out of range.");
155  return *(this->begin() + d);
156  }
157 
158  inline T& operator ()(size_type i, size_type j) {
159  GMM_ASSERT2(order() == 2, "Bad tensor order");
160  size_type d = coeff[0]*i + coeff[1]*j;
161  GMM_ASSERT2(d < size(), "Index out of range.");
162  return *(this->begin() + d);
163  }
164 
165  inline const T& operator ()(size_type i, size_type j, size_type k,
166  size_type l) const {
167  GMM_ASSERT2(order() == 4, "Bad tensor order.");
168  size_type d = coeff[0]*i + coeff[1]*j + coeff[2]*k + coeff[3]*l;
169  GMM_ASSERT2(d < size(), "Index out of range.");
170  return *(this->begin() + d);
171  }
172 
173  inline const T& operator ()(size_type i, size_type j,
174  size_type k) const {
175  GMM_ASSERT2(order() == 3, "Bad tensor order.");
176  size_type d = coeff[0]*i + coeff[1]*j + coeff[2]*k;
177  GMM_ASSERT2(d < size(), "Index out of range.");
178  return *(this->begin() + d);
179  }
180 
181  inline const T& operator ()(size_type i, size_type j) const {
182  GMM_ASSERT2(order() == 2, "Bad tensor order.");
183  size_type d = coeff[0]*i + coeff[1]*j;
184  GMM_ASSERT2(d < size(), "Index out of range.");
185  return *(this->begin() + d);
186  }
187 
188  template<class CONT> inline T& operator ()(const CONT &c) {
189  typename CONT::const_iterator it = c.begin();
190  multi_index::iterator q = coeff.begin(), e = coeff.end();
191  size_type d = 0;
192  for ( ; q != e; ++q, ++it) d += (*q) * (*it);
193  GMM_ASSERT2(d < size(), "Index out of range.");
194  return *(this->begin() + d);
195  }
196 
197  inline size_type size(void) const { return std::vector<T>::size(); }
198  inline size_type size(size_type i) const { return sizes_[i]; }
199  inline const multi_index &sizes(void) const { return sizes_; }
200  inline size_type order(void) const { return sizes_.size(); }
201 
202  void init(const multi_index &c) {
203  auto it = c.begin();
204  size_type d = 1;
205  sizes_ = c; coeff.resize(c.size());
206  auto p = coeff.begin(), pe = coeff.end();
207  for ( ; p != pe; ++p, ++it) { *p = d; d *= *it; }
208  this->resize(d);
209  }
210 
211  inline void init() { sizes_.resize(0); coeff.resize(0); this->resize(1); }
212 
213  inline void init(size_type i) {
214  sizes_.resize(1); sizes_[0] = i; coeff.resize(1); coeff[0] = 1;
215  this->resize(i);
216  }
217 
218  inline void init(size_type i, size_type j) {
219  sizes_.resize(2); sizes_[0] = i; sizes_[1] = j;
220  coeff.resize(2); coeff[0] = 1; coeff[1] = i;
221  this->resize(i*j);
222  }
223 
224  inline void init(size_type i, size_type j, size_type k) {
225  sizes_.resize(3); sizes_[0] = i; sizes_[1] = j; sizes_[2] = k;
226  coeff.resize(3); coeff[0] = 1; coeff[1] = i; coeff[2] = i*j;
227  this->resize(i*j*k);
228  }
229 
230  inline void init(size_type i, size_type j, size_type k, size_type l) {
231  sizes_.resize(4);
232  sizes_[0] = i; sizes_[1] = j; sizes_[2] = k; sizes_[3] = k;
233  coeff.resize(4);
234  coeff[0] = 1; coeff[1] = i; coeff[2] = i*j; coeff[3] = i*j*k;
235  this->resize(i*j*k*l);
236  }
237 
238  inline void adjust_sizes(const multi_index &mi) { init(mi); }
239  inline void adjust_sizes(void) { init(); }
240  inline void adjust_sizes(size_type i) { init(i); }
241  inline void adjust_sizes(size_type i, size_type j) { init(i, j); }
242  inline void adjust_sizes(size_type i, size_type j, size_type k)
243  { init(i, j, k); }
244  inline void adjust_sizes(size_type i, size_type j, size_type k, size_type l)
245  { init(i, j, k, l); }
246 
247  inline size_type adjust_sizes_changing_last(const tensor &t, size_type P) {
248  const multi_index &mi = t.sizes_; size_type d = mi.size();
249  sizes_.resize(d); coeff.resize(d);
250  if (d) {
251  std::copy(mi.begin(), mi.end(), sizes_.begin());
252  std::copy(t.coeff.begin(), t.coeff.end(), coeff.begin());
253  size_type e = coeff.back();
254  sizes_.back() = P;
255  this->resize(e*P);
256  return e;
257  } else {
258  this->resize(1);
259  return 1;
260  }
261  }
262 
263  inline void remove_unit_dim() {
264  if (sizes_.size()) {
265  size_type i = 0, j = 0;
266  for (; i < sizes_.size(); ++i)
267  if (sizes_[i] != 1) { sizes_[j]=sizes_[i]; coeff[j]=coeff[i]; ++j; }
268  if (!j) ++j;
269  sizes_.resize(j);
270  coeff.resize(j);
271  }
272  }
273 
274  /** reduction of tensor t with respect to index ni with matrix m:
275  * t(...,j,...) <-- t(...,i,..) m(i, j)
276  */
277  void mat_reduction(const tensor &t, const gmm::dense_matrix<T> &m, int ni);
278  void mat_transp_reduction(const tensor &t, const gmm::dense_matrix<T> &m,
279  int ni);
280  /** mm(i,j) = t(i,j,k,l) * m(k,l); For order four tensor. */
281  void mat_mult(const gmm::dense_matrix<T> &m, gmm::dense_matrix<T> &mm);
282 
283  /** tt = t(...) * t2(...) */
284  void product(const tensor &t2, tensor &tt);
285  /** tt = t(...,k) * t2(k,...) */
286  void dot_product(const tensor &t2, tensor &tt);
287  void dot_product(const gmm::dense_matrix<T> &m, tensor &tt);
288  /** tt = t(...,k,l) * t2(k,l,...) */
289  void double_dot_product(const tensor &t2, tensor &tt);
290  void double_dot_product(const gmm::dense_matrix<T> &m, tensor &tt);
291 
292  size_type memsize() const {
293  return sizeof(T) * this->size()
294  + sizeof(*this) + sizes_.memsize() + coeff.memsize();
295  }
296 
297  std::vector<T> &as_vector(void) { return *this; }
298  const std::vector<T> &as_vector(void) const { return *this; }
299 
300 
301  tensor<T>& operator +=(const tensor<T>& w)
302  { gmm::add(w.as_vector(), this->as_vector()); return *this; }
303 
304  tensor<T>& operator -=(const tensor<T>& w) {
305  gmm::add(gmm::scaled(w.as_vector(), T(-1)), this->as_vector());
306  return *this;
307  }
308 
309  tensor<T>& operator *=(const scalar_type w)
310  { gmm::scale(this->as_vector(), w); return *this; }
311 
312  tensor<T>& operator /=(const scalar_type w)
313  { gmm::scale(this->as_vector(), scalar_type(1)/w); return *this; }
314 
315  tensor &operator =(const tensor &t) {
316  if (this->size() != t.size()) this->resize(t.size());
317  std::copy(t.begin(), t.end(), this->begin());
318  if (sizes_.size() != t.sizes_.size()) sizes_.resize(t.sizes_.size());
319  std::copy(t.sizes_.begin(), t.sizes_.end(), sizes_.begin());
320  if (coeff.size() != t.coeff.size()) coeff.resize(t.coeff.size());
321  std::copy(t.coeff.begin(), t.coeff.end(), coeff.begin());
322  return *this;
323  }
324 
325  tensor(const multi_index &c) { init(c); }
326  tensor(size_type i, size_type j, size_type k, size_type l)
327  { init(multi_index(i, j, k, l)); }
328  tensor(void) {}
329  };
330 
331  template<class T> void tensor<T>::mat_transp_reduction
332  (const tensor &t, const gmm::dense_matrix<T> &m, int ni) {
333  /* reduction du tenseur t par son indice ni et la matrice */
334  /* transposee de m. */
335 
336  DEFINE_STATIC_THREAD_LOCAL(std::vector<T>, tmp);
337  DEFINE_STATIC_THREAD_LOCAL(multi_index, mi);
338 
339  mi = t.sizes();
340  size_type dimt = mi[ni], dim = m.nrows();
341 
342  GMM_ASSERT2(dimt, "Inconsistent dimension.");
343  GMM_ASSERT2(dimt == m.ncols(), "Dimensions mismatch.");
344  GMM_ASSERT2(&t != this, "Does not work when t and *this are the same.");
345 
346  mi[ni] = dim;
347  if (tmp.size() < dimt) tmp.resize(dimt);
348  adjust_sizes(mi);
349 
350  const_iterator pft = t.begin();
351  iterator pf = this->begin();
352  size_type dd = coeff[ni]*( sizes()[ni]-1)-1, co = coeff[ni];
353  size_type ddt = t.coeff[ni]*(t.sizes()[ni]-1)-1, cot = t.coeff[ni];
354  std::fill(mi.begin(), mi.end(), 0);
355  for (;!mi.finished(sizes()); mi.incrementation(sizes()), ++pf, ++pft) {
356  if (mi[ni] != 0) {
357  for (size_type k = 0; k <= size_type(ni); ++k)
358  mi[k] = size_type(sizes()[k] - 1);
359  pf += dd; pft += ddt;
360  } else {
361  const_iterator pl = pft; iterator pt = tmp.begin();
362  *pt++ = *pl;
363  for(size_type k = 1; k < dimt; ++k, ++pt) { pl += cot; *pt = *pl;}
364 
365  iterator pff = pf;
366  for (size_type k = 0; k < dim; ++k) {
367  if (k) pff += co;
368  *pff = T(0); pt = tmp.begin(); pl = m.begin() + k;
369  *pff += (*pl) * (*pt); ++pt;
370  for (size_type l = 1; l < dimt; ++l, ++pt) {
371  pl += dim;
372  *pff += (*pl) * (*pt);
373  }
374  }
375  }
376  }
377  }
378 
379  template<class T> void tensor<T>::mat_mult(const gmm::dense_matrix<T> &m,
380  gmm::dense_matrix<T> &mm) {
381  GMM_ASSERT2(order() == 4,
382  "This operation is for order four tensors only.");
383  GMM_ASSERT2(sizes_[2] == gmm::mat_nrows(m) &&
384  sizes_[3] == gmm::mat_ncols(m), "Dimensions mismatch.");
385  mm.base_resize(sizes_[0], sizes_[1]);
386  gmm::clear(mm);
387 
388  const_iterator pt = this->begin();
389  const_iterator pm = m.begin();
390  for (size_type l = 0; l < sizes_[3]; ++l)
391  for (size_type k = 0; k < sizes_[2]; ++k) {
392  iterator pmm = mm.begin();
393  for (size_type j = 0; j < sizes_[1]; ++j)
394  for (size_type i = 0; i < sizes_[0]; ++i)
395  *pmm++ += *pt++ * (*pm);
396  ++pm;
397  }
398  }
399 
400  template<class T> void tensor<T>::mat_reduction
401  (const tensor &t, const gmm::dense_matrix<T> &m, int ni) {
402  /* reduction du tenseur t par son indice ni et la matrice m. */
403  DEFINE_STATIC_THREAD_LOCAL(std::vector<T>, tmp);
404  DEFINE_STATIC_THREAD_LOCAL(multi_index, mi);
405 
406  mi = t.sizes();
407  size_type dimt = mi[ni], dim = m.ncols();
408  GMM_ASSERT2(dimt, "Inconsistent dimension.");
409  GMM_ASSERT2(dimt == m.nrows(), "Dimensions mismatch.");
410  GMM_ASSERT2(&t != this, "Does not work when t and *this are the same.");
411 
412  mi[ni] = dim;
413  if (tmp.size() < dimt) tmp.resize(dimt);
414  adjust_sizes(mi);
415  const_iterator pft = t.begin();
416  iterator pf = this->begin();
417  size_type dd = coeff[ni]*( sizes()[ni]-1)-1, co = coeff[ni];
418  size_type ddt = t.coeff[ni]*(t.sizes()[ni]-1)-1, cot = t.coeff[ni];
419  std::fill(mi.begin(), mi.end(), 0);
420  for (;!mi.finished(sizes()); mi.incrementation(sizes()), ++pf, ++pft) {
421  if (mi[ni] != 0) {
422  for (size_type k = 0; k <= size_type(ni); ++k)
423  mi[k] = size_type(sizes()[k] - 1);
424  pf += dd; pft += ddt;
425  }
426  else {
427  const_iterator pl = pft; iterator pt = tmp.begin();
428  *pt++ = *pl;
429  for(size_type k = 1; k < dimt; ++k, ++pt) { pl += cot; *pt = *pl; }
430 
431  iterator pff = pf; pl = m.begin();
432  for (size_type k = 0; k < dim; ++k) {
433  if (k) pff += co;
434  *pff = T(0); pt = tmp.begin();
435  for (size_type l = 0; l < dimt; ++l, ++pt, ++pl)
436  *pff += (*pl) * (*pt);
437  }
438  }
439  }
440  }
441 
442 
443  template<class T> void tensor<T>::product(const tensor<T> &t2,
444  tensor<T> &tt) {
445  size_type res_order = order() + t2.order();
446  multi_index res_size(res_order);
447  for (size_type i = 0 ; i < this->order(); ++i) res_size[i] = this->size(i);
448  for (size_type i = 0 ; i < t2.order(); ++i) res_size[order() + i] = t2.size(i);
449  tt.adjust_sizes(res_size);
450  gmm::clear(tt.as_vector());
451 
452  size_type size1 = this->size();
453  size_type size2 = t2.size();
454  const_iterator pt2 = t2.begin();
455  iterator ptt = tt.begin();
456  for (size_type j = 0; j < size2; ++j, ++pt2) {
457  const_iterator pt1 = this->begin();
458  for (size_type i = 0; i < size1; ++i, ++pt1, ++ptt)
459  *ptt += *pt1 * (*pt2);
460  }
461  }
462 
463 
464  template<class T> void tensor<T>::dot_product(const tensor<T> &t2,
465  tensor<T> &tt) {
466  GMM_ASSERT2(size(order()-1) == t2.size(0),
467  "Dimensions mismatch between last dimension of first tensor "
468  "and first dimension of second tensor.");
469  size_type res_order = order() + t2.order() - 2;
470  multi_index res_size(res_order);
471  for (size_type i = 0 ; i < this->order() - 1; ++i) res_size[i] = this->size(i);
472  for (size_type i = 0 ; i < t2.order() - 1; ++i) res_size[order() - 1 + i] = t2.size(i);
473  tt.adjust_sizes(res_size);
474  gmm::clear(tt.as_vector());
475 
476  size_type size0 = t2.size(0);
477  size_type size1 = this->size()/size0;
478  size_type size2 = t2.size()/size0;
479  const_iterator pt2 = t2.begin();
480  iterator ptt = tt.begin();
481  for (size_type j = 0; j < size2; ++j) {
482  const_iterator pt1 = this->begin();
483  iterator ptt0 = ptt;
484  for (size_type q = 0; q < size0; ++q, ++pt2) {
485  ptt = ptt0;
486  for (size_type i = 0; i < size1; ++i, ++pt1, ++ptt)
487  *ptt += *pt1 * (*pt2);
488  }
489  }
490  }
491 
492  template<class T> void tensor<T>::dot_product(const gmm::dense_matrix<T> &m,
493  tensor<T> &tt) {
494  GMM_ASSERT2(size(order()-1) == gmm::mat_nrows(m),
495  "Dimensions mismatch between last dimensions of tensor "
496  "and rows of the matrix.");
497  tensor<T> t2(multi_index(gmm::mat_nrows(m),gmm::mat_ncols(m)));
498  gmm::copy(m.as_vector(), t2.as_vector());
499  dot_product(t2, tt);
500  }
501 
502 
503  template<class T> void tensor<T>::double_dot_product(const tensor<T> &t2,
504  tensor<T> &tt) {
505  GMM_ASSERT2(order() >= 2 && t2.order() >= 2,
506  "Tensors of wrong size. Tensors of order two or higher are required.");
507  GMM_ASSERT2(size(order()-2) == t2.size(0) && size(order()-1) == t2.size(1),
508  "Dimensions mismatch between last two dimensions of first tensor "
509  "and first two dimensions of second tensor.");
510  size_type res_order = order() + t2.order() - 4;
511  multi_index res_size(res_order);
512  for (size_type i = 0 ; i < this->order() - 2; ++i) res_size[i] = this->size(i);
513  for (size_type i = 0 ; i < t2.order() - 2; ++i) res_size[order() - 2 + i] = t2.size(i);
514  tt.adjust_sizes(res_size);
515  gmm::clear(tt.as_vector());
516 
517  size_type size0 = t2.size(0)*t2.size(1);
518  size_type size1 = this->size()/size0;
519  size_type size2 = t2.size()/size0;
520  const_iterator pt2 = t2.begin();
521  iterator ptt = tt.begin();
522  for (size_type j = 0; j < size2; ++j) {
523  const_iterator pt1 = this->begin();
524  iterator ptt0 = ptt;
525  for (size_type q = 0; q < size0; ++q, ++pt2) {
526  ptt = ptt0;
527  for (size_type i = 0; i < size1; ++i, ++pt1, ++ptt)
528  *ptt += *pt1 * (*pt2);
529  }
530  }
531  }
532 
533  template<class T> void tensor<T>::double_dot_product(const gmm::dense_matrix<T> &m,
534  tensor<T> &tt) {
535  GMM_ASSERT2(order() >= 2,
536  "Tensor of wrong size. Tensor of order two or higher is required.");
537  GMM_ASSERT2(size(order()-2) == gmm::mat_nrows(m) &&
538  size(order()-1) == gmm::mat_ncols(m),
539  "Dimensions mismatch between last two dimensions of tensor "
540  "and dimensions of the matrix.");
541  tensor<T> t2(multi_index(gmm::mat_nrows(m),gmm::mat_ncols(m)));
542  gmm::copy(m.as_vector(), t2.as_vector());
543  double_dot_product(t2, tt);
544  }
545 
546 
547  template<class T> std::ostream &operator <<
548  (std::ostream &o, const tensor<T>& t) {
549  o << "sizes " << t.sizes() << " " << vref(t.as_vector());
550  return o;
551  }
552 
553  typedef tensor<scalar_type> base_tensor;
554  typedef tensor<complex_type> base_complex_tensor;
555 
556 
557 } /* end of namespace bgeot. */
558 
559 
560 #endif /* BGEOT_TENSOR_H */
Small (dim < 8) vectors.
std::ostream & operator<<(std::ostream &o, const convex_structure &cv)
Print the details of the convex structure cvs to the output stream o.
Tools for multithreaded, OpenMP and Boost based parallelization.
size_t size_type
used as the common size type in the library
Definition: bgeot_poly.h:49
void resize(V &v, size_type n)
*/
Definition: gmm_blas.h:209
void clear(L &l)
clear (fill with zeros) a vector or matrix.
Definition: gmm_blas.h:59
gmm::uint16_type short_type
used as the common short type integer in the library
Definition: bgeot_config.h:79
Basic Geometric Tools.