• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

adc-connect / adcc / 21242181274

22 Jan 2026 08:56AM UTC coverage: 73.944% (-0.3%) from 74.293%
21242181274

Pull #201

github

web-flow
Merge 9c01b5574 into 9ff4abd52
Pull Request #201: Add MP2 Triples and instantiate 5 and 6D Tensors

1218 of 1928 branches covered (63.17%)

Branch coverage included in aggregate %.

126 of 226 new or added lines in 6 files covered. (55.75%)

3 existing lines in 2 files now uncovered.

7605 of 10004 relevant lines covered (76.02%)

171404.51 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

74.12
/libadcc_src/TensorImpl.cc
1
//
2
// Copyright (C) 2020 by the adcc authors
3
//
4
// This file is part of adcc.
5
//
6
// adcc is free software: you can redistribute it and/or modify
7
// it under the terms of the GNU General Public License as published
8
// by the Free Software Foundation, either version 3 of the License, or
9
// (at your option) any later version.
10
//
11
// adcc is distributed in the hope that it will be useful,
12
// but WITHOUT ANY WARRANTY; without even the implied warranty of
13
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14
// GNU General Public License for more details.
15
//
16
// You should have received a copy of the GNU General Public License
17
// along with adcc. If not, see <http://www.gnu.org/licenses/>.
18
//
19

20
#include "TensorImpl.hh"
21
#include "TensorImpl/as_bispace.hh"
22
#include "TensorImpl/as_lt_symmetry.hh"
23
#include "TensorImpl/get_block_starts.hh"
24
#include "shape_to_string.hh"
25

26
// Change visibility of libtensor singletons to public
27
#pragma GCC visibility push(default)
28
#include <libtensor/block_tensor/btod_add.h>
29
#include <libtensor/block_tensor/btod_copy.h>
30
#include <libtensor/block_tensor/btod_dotprod.h>
31
#include <libtensor/block_tensor/btod_export.h>
32
#include <libtensor/block_tensor/btod_random.h>
33
#include <libtensor/block_tensor/btod_select.h>
34
#include <libtensor/block_tensor/btod_set.h>
35
#include <libtensor/block_tensor/btod_set_diag.h>
36
#include <libtensor/block_tensor/btod_set_elem.h>
37
#include <libtensor/symmetry/print_symmetry.h>
38
#pragma GCC visibility pop
39

40
namespace libadcc {
41
namespace lt = libtensor;
42

43
#define DIMENSIONALITY_CHECK(OTHER)                                                    \
44
  {                                                                                    \
45
    if (ndim() != OTHER->ndim()) {                                                     \
46
      throw dimension_mismatch(                                                        \
47
            "Dimensionality of this tensor (" + std::to_string(ndim()) +               \
48
            ") does not agree with the dimensionality of the other tensor"             \
49
            " passed, which has dimensionality " +                                     \
50
            std::to_string(OTHER->ndim()) + ".");                                      \
51
    }                                                                                  \
52
    if (shape() != OTHER->shape()) {                                                   \
53
      throw dimension_mismatch("Shape of this tensor (" + shape_to_string(shape()) +   \
54
                               ") does not agree with the shape of the other tensor" + \
55
                               " passed, which has shape " +                           \
56
                               shape_to_string(OTHER->shape()) + ".");                 \
57
    }                                                                                  \
58
    if (axes() != OTHER->axes()) {                                                     \
59
      throw dimension_mismatch("Axes of this tensor (" + axes_to_string(axes()) +      \
60
                               ") do not agree with the axes of the other tensor "     \
61
                               "passed, which has axis labels " +                      \
62
                               axes_to_string(OTHER->axes()) + ".");                   \
63
    }                                                                                  \
64
  }
65

66
namespace {
67
std::string axes_to_string(const std::vector<AxisInfo>& axes) {
×
68
  std::string res = "";
×
69
  for (auto& ax : axes) res.append(ax.label);
×
70
  return res;
×
71
}
×
72

73
/** Build an identity permutation of length n */
74
std::vector<size_t> identity_permutation(size_t n) {
975,562✔
75
  std::vector<size_t> permutation;
975,562✔
76
  for (size_t i = 0; i < n; ++i) {
4,018,928✔
77
    permutation.push_back(i);
3,043,366✔
78
  }
79
  return permutation;
975,562✔
80
}
×
81

82
template <size_t N>
83
lt::expr::label<N> strip_safe(const std::vector<std::shared_ptr<const lt::letter>>& in) {
1,895,542✔
84
  std::vector<const lt::letter*> label_unsafe;
1,895,542✔
85
  for (size_t i = 0; i < in.size(); ++i) {
7,204,425✔
86
    label_unsafe.push_back(in[i].get());
5,308,883✔
87
  }
88
  return lt::expr::label<N>(label_unsafe);
3,791,084✔
89
}
1,895,542✔
90

91
template <size_t N>
92
std::vector<size_t> extract_expr_permutation(
1,331,101✔
93
      const lt::expr::expr_rhs<N, scalar_type>& expr,
94
      const std::vector<std::shared_ptr<const lt::letter>>& label) {
95
  std::vector<size_t> permutation;
1,331,101✔
96
  lt::permutation<N> perm = expr.get_label().permutation_of(strip_safe<N>(label));
1,331,101✔
97
  perm.invert();
1,331,101✔
98
  for (size_t i = 0; i < N; ++i) {
5,352,757✔
99
    permutation.push_back(perm[i]);
4,021,656✔
100
  }
101
  return permutation;
2,662,202✔
102
}
×
103

104
/** Merge two keepalive lists */
105
std::vector<std::shared_ptr<void>> merge(std::vector<std::shared_ptr<void>> lhs,
781,717✔
106
                                         const std::vector<std::shared_ptr<void>>& rhs) {
107
  for (auto& ptr : rhs) lhs.push_back(ptr);
2,231,046✔
108
  return lhs;
781,717✔
109
}
110

111
std::vector<std::shared_ptr<const lt::letter>> make_label(size_t n) {
1,743,965✔
112
  std::vector<std::shared_ptr<const lt::letter>> ret;
1,743,965✔
113
  for (size_t i = 0; i < n; ++i) {
7,153,931✔
114
    ret.push_back(std::make_shared<lt::letter>());
5,409,966✔
115
  }
116
  return ret;
1,743,965✔
117
}
×
118

119
template <typename T, size_t>
120
using repeat_t = T;
121

122
template <typename T, size_t... I>
123
std::tuple<repeat_t<T, I>...> tuple_array_impl(std::index_sequence<I...>);
124

125
/** Array like tuple that contains N fields of the given type */
126
template <typename T, size_t N>
127
using tuple_array_t = decltype(tuple_array_impl<T>(std::make_index_sequence<N>{}));
128

129
template <size_t PERM_SIZE, size_t N_PERMS, size_t... I>
130
tuple_array_t<lt::expr::label<N_PERMS>, PERM_SIZE> make_label_tuple(
143,069✔
131
      const std::array<std::vector<const lt::letter*>, PERM_SIZE>& sets,
132
      std::index_sequence<I...>) {
133
  return {(lt::expr::label<N_PERMS>(sets[I]))...};
143,069✔
134
}
135

NEW
136
std::string perm_to_string(const std::vector<size_t>& permutation) {
×
NEW
137
  std::ostringstream oss;
×
NEW
138
  oss << "[";
×
NEW
139
  for (size_t i{0}; i < permutation.size(); i++) {
×
NEW
140
    if (i != 0) oss << ", ";
×
NEW
141
    oss << permutation[i];
×
142
  }
NEW
143
  oss << "]";
×
NEW
144
  return oss.str();
×
NEW
145
}
×
146

147
// Using a vector as return type would not significantly simplify the
148
// code in symmetrise and antisymmetrise below.
149
// Using an array would in principle be possible and avoids the
150
// meta programming above to define the tuple_array_t, but there
151
// are additional compile time guarantees to prevent for instance
152
// out of bounds access for tuples, which is not available for arrays
153
template <size_t PERM_SIZE, size_t N_PERMS, size_t N>
154
tuple_array_t<lt::expr::label<N_PERMS>, PERM_SIZE> parse_permutation(
143,069✔
155
      const std::vector<AxisInfo>& axes, const lt::expr::label<N>& label,
156
      const std::vector<std::vector<size_t>>& permutations) {
157
  static_assert(PERM_SIZE > 1,
158
                "Permutations have to occur between at least two sets of indices.");
159
  static_assert(N_PERMS > 0, "There has to be at least 1 permutation");
160

161
  if (permutations.size() != N_PERMS)
143,069✔
NEW
162
    throw invalid_argument("Invalid number of permutations provided (" +
×
163
                           std::to_string(permutations.size()) + "). Expected " +
164
                           std::to_string(N_PERMS) + ".");
165

166
  std::array<std::vector<const lt::letter*>, PERM_SIZE> sets;
143,069✔
167
  std::vector<size_t> processed_indices;
143,069✔
168
  for (const auto& perm : permutations) {
300,267✔
169
    if (perm.size() != PERM_SIZE)
157,198✔
NEW
170
      throw invalid_argument(
×
171
            "Bad number of indices in permutation: " + perm_to_string(perm) +
172
            ". Expected " + std::to_string(PERM_SIZE) + " indices.");
173
    // Sanity check the permutation (PERM_SIZE is small -> just brute force)
174
    for (size_t i{0}; i < PERM_SIZE; i++) {
471,630✔
175
      for (size_t j{i + 1}; j < PERM_SIZE; j++) {
471,702✔
176
        if (perm[i] == perm[j])
157,270✔
NEW
177
          throw invalid_argument("Duplicate entry in permutation " +
×
178
                                 perm_to_string(perm) + " detected.");
179
        if (axes[perm[i]] != axes[perm[j]]) {
157,270✔
NEW
180
          std::ostringstream msg;
×
NEW
181
          msg << "(Anti)-Symmetrisation can only be performed over equivalent axes (not ";
×
NEW
182
          for (size_t k{0}; k < PERM_SIZE; k++) {
×
NEW
183
            if (k != 0) msg << ", ";
×
NEW
184
            msg << axes[perm[k]].label;
×
185
          }
NEW
186
          msg << ")";
×
NEW
187
          throw invalid_argument(msg.str());
×
NEW
188
        }
×
189
      }
190
    }
191
    // go through the permutation and actually parse it
192
    for (size_t i{0}; i < PERM_SIZE; i++) {
471,630✔
193
      // check if the index was already included in another permutation
194
      auto find_i =
195
            std::find(processed_indices.begin(), processed_indices.end(), perm[i]);
314,432✔
196
      if (find_i != processed_indices.end()) {
314,432✔
NEW
197
        std::ostringstream msg;
×
NEW
198
        msg << "Permutations in a permutation list have to be disjoint. Got\n";
×
NEW
199
        for (const auto& perm : permutations) {
×
NEW
200
          msg << perm_to_string(perm) << "\n";
×
201
        }
NEW
202
        throw invalid_argument(msg.str());
×
UNCOV
203
      }
×
204
      if (perm[i] >= N)
314,432✔
NEW
205
        throw invalid_argument("Index in permutation " + perm_to_string(perm) +
×
206
                               " cannot be larger than dimensionality (" +
207
                               std::to_string(N) + ").");
208
      sets[i].push_back(&label.letter_at(perm[i]));
314,432✔
209
      processed_indices.push_back(perm[i]);
314,432✔
210
    }
211
  }
212
  // convert the positions into libtensor labels
213
  return make_label_tuple<PERM_SIZE, N_PERMS>(sets,
214
                                              std::make_index_sequence<PERM_SIZE>{});
286,138✔
215
}
143,069✔
216

217
template <size_t N, typename T>
218
std::pair<lt::index<N>, lt::index<N>> assert_convert_tensor_index(
1,962,732✔
219
      lt::btensor<N, T>& tensor, const std::vector<size_t>& idx) {
220
  if (idx.size() != N) {
1,962,732✔
221
    throw dimension_mismatch("Tensor is of dimension " + std::to_string(N) +
×
222
                             ", but passed index has a dimennsion of " +
223
                             std::to_string(idx.size()) + ".");
224
  }
225
  const lt::dimensions<N>& dims = tensor.get_bis().get_dims();
1,962,732✔
226
  for (size_t i = 0; i < N; ++i) {
9,674,708✔
227
    if (idx[i] >= dims[i]) {
7,711,976✔
228
      throw invalid_argument("Passed index " + shape_to_string(idx) +
×
229
                             " overshoots Tensor at dimension " + std::to_string(i) +
230
                             " (with extent: " + std::to_string(dims[i]) + ")");
231
    }
232
  }
233

234
  lt::index<N> block_idx;
1,962,732✔
235
  for (size_t idim = 0; idim < N; ++idim) {
9,674,708✔
236
    // Find splits
237
    const size_t dim_type     = tensor.get_bis().get_type(idim);
7,711,976✔
238
    const lt::split_points sp = tensor.get_bis().get_splits(dim_type);
7,711,976✔
239
    block_idx[idim]           = 0;
7,711,976✔
240
    for (size_t isp = 0; isp < sp.get_num_points(); ++isp) {
11,553,966✔
241
      if (sp[isp] > idx[idim]) break;
7,712,006✔
242
      block_idx[idim] = isp + 1;
3,841,990✔
243
    }
244
  }
245

246
  lt::index<N> inblock_idx;
1,962,732✔
247
  lt::index<N> bstart    = tensor.get_bis().get_block_start(block_idx);
1,962,732✔
248
  lt::dimensions<N> bdim = tensor.get_bis().get_block_dims(block_idx);
1,962,732✔
249
  for (size_t idim = 0; idim < N; ++idim) {
9,674,708✔
250
    inblock_idx[idim] = idx[idim] - bstart[idim];
7,711,976✔
251
    if (inblock_idx[idim] >= bdim[idim]) {
7,711,976✔
252
      throw runtime_error(
×
253
            "Internal error: Determined in-block index overshoots block dimensionality");
254
    }
255
  }
256

257
  return std::make_pair(block_idx, inblock_idx);
3,925,464✔
258
}
259

260
template <typename Comparator, size_t N>
261
std::vector<std::pair<std::vector<size_t>, scalar_type>> execute_select_n(
3,002✔
262
      lt::btensor<N, scalar_type>& tensor, size_t n, bool unique_by_symmetry) {
263
  using btod_select_t = lt::btod_select<N, Comparator>;
264
  std::list<lt::block_tensor_element<N, scalar_type>> il;
3,002✔
265
  if (!unique_by_symmetry) {
3,002✔
266
    lt::symmetry<N, scalar_type> nosym(tensor.get_bis());
×
267
    btod_select_t(tensor, nosym).perform(il, n);
×
268
  } else {
×
269
    btod_select_t(tensor).perform(il, n);
3,002✔
270
  }
271

272
  std::vector<std::pair<std::vector<size_t>, scalar_type>> ret;
3,002✔
273
  for (auto it = il.begin(); it != il.end(); ++it) {
46,678✔
274
    std::vector<size_t> fidx(N);
43,676✔
275
    const lt::index<N> bstart = tensor.get_bis().get_block_start(it->get_block_index());
43,676✔
276
    for (size_t i = 0; i < N; ++i) {
131,028✔
277
      fidx[i] = bstart[i] + it->get_in_block_index()[i];
87,352✔
278
    }
279
    ret.emplace_back(fidx, it->get_value());
43,676✔
280
  }
281
  return ret;
6,004✔
282
}
3,002✔
283
}  // namespace
284

285
template <size_t N>
286
void TensorImpl<N>::check_state() const {
13,931,033✔
287
  if (m_expr_ptr == nullptr && m_libtensor_ptr == nullptr) {
13,931,033✔
288
    throw runtime_error(
×
289
          "Internal error: m_libtensor_ptr and m_expr_ptr cannot both be nullptr.");
290
  }
291
  if (m_expr_ptr != nullptr && m_libtensor_ptr != nullptr) {
13,931,033✔
292
    throw runtime_error(
×
293
          "Internal error: m_libtensor_ptr and m_expr_ptr cannot both be set pointers.");
294
  }
295

296
  if (N != ndim()) {
13,931,033✔
297
    throw runtime_error("Internal error: libtensor dimension (== " + std::to_string(N) +
×
298
                        ") and tensor dimension (==" + std::to_string(ndim()) +
299
                        ") differ.");
300
  }
301

302
  if (m_libtensor_ptr) {
13,931,033✔
303
    std::vector<size_t> btshape(N);
12,050,799✔
304
    const lt::dimensions<N>& dims = m_libtensor_ptr->get_bis().get_dims();
12,050,799✔
305
    for (size_t i = 0; i < N; ++i) {
53,233,305✔
306
      btshape[i] = dims.get_dim(i);
41,182,506✔
307
    }
308
    if (shape() != btshape) {
12,050,799✔
309
      throw runtime_error(
×
310
            "Internal error: libtensor shape (== " + shape_to_string(btshape) +
311
            ") and tensor shape (==" + shape_to_string(shape()) + ") differ.");
312
    }
313

314
    const std::vector<std::vector<size_t>> tensorblocks =
12,050,799✔
315
          get_block_starts(*m_libtensor_ptr);
12,050,799✔
316
    for (size_t i = 0; i < N; ++i) {
53,233,305✔
317
      if (axes()[i].block_starts != tensorblocks[i]) {
41,182,506✔
318
        throw runtime_error("Internal error: Block starts of btensor " +
×
319
                            shape_to_string(tensorblocks[i]) + " at dimension " +
×
320
                            std::to_string(i) +
321
                            " do not agree with the cached block sarts " +
322
                            shape_to_string(axes()[i].block_starts) + ".");
×
323
      }
324
    }
325
  }
12,050,799✔
326

327
  if (m_expr_ptr) {
13,931,033✔
328
    if (m_expr_ptr->permutation.size() != N) {
1,880,234✔
329
      throw runtime_error(
×
330
            "Internal error: Expression dimension (== " + std::to_string(N) +
331
            ") and tensor dimension (==" + std::to_string(ndim()) + ") differ.");
332
    }
333
  }
334
}
13,931,033✔
335

336
template <size_t N>
337
void TensorImpl<N>::reset_state(
381,408✔
338
      std::shared_ptr<lt::btensor<N, scalar_type>> libtensor_ptr) const {
339
  if (m_expr_ptr != nullptr && m_libtensor_ptr != nullptr) {
381,408✔
340
    throw runtime_error(
×
341
          "Internal error: m_libtensor_ptr and m_expr_ptr cannot both be set pointers.");
342
  }
343
  if (libtensor_ptr == nullptr) {
381,408✔
344
    throw runtime_error(
×
345
          "Internal error: libtensor_ptr to be used for reset_state is a nullptr.");
346
  }
347
  m_libtensor_ptr = libtensor_ptr;
381,408✔
348
  m_expr_ptr.reset();
381,408✔
349
  check_state();
381,408✔
350
}
381,408✔
351

352
template <size_t N>
353
void TensorImpl<N>::reset_state(std::shared_ptr<ExpressionTree> expr_ptr) const {
1,659,953✔
354
  if (m_expr_ptr != nullptr && m_libtensor_ptr != nullptr) {
1,659,953✔
355
    throw runtime_error(
×
356
          "Internal error: m_libtensor_ptr and m_expr_ptr cannot both be set pointers.");
357
  }
358
  if (expr_ptr == nullptr) {
1,659,953✔
359
    throw runtime_error(
×
360
          "Internal error: expr_ptr to be used for reset_state is a nullptr.");
361
  }
362
  m_expr_ptr = expr_ptr;
1,659,953✔
363
  m_libtensor_ptr.reset();
1,659,953✔
364
  check_state();
1,659,953✔
365
}
1,659,953✔
366

367
template <size_t N>
368
TensorImpl<N>::TensorImpl(std::shared_ptr<const AdcMemory> adcmem_ptr,
1,822,170✔
369
                          std::vector<AxisInfo> axes,
370
                          std::shared_ptr<lt::btensor<N, scalar_type>> libtensor_ptr,
371
                          std::shared_ptr<ExpressionTree> expr_ptr)
372
      : Tensor(adcmem_ptr, axes), m_libtensor_ptr(nullptr), m_expr_ptr(nullptr) {
1,822,170✔
373
  if (axes.size() != N) {
1,822,170✔
374
    throw invalid_argument("axes length (== " + std::to_string(axes.size()) +
×
375
                           ") does not agree with tensor dimensionality " +
376
                           std::to_string(N));
377
  }
378

379
  if (expr_ptr != nullptr && libtensor_ptr != nullptr) {
1,822,170✔
380
    throw invalid_argument("libtensor_ptr and expr_ptr cannot both be set pointers.");
×
381
  }
382
  if (expr_ptr == nullptr && libtensor_ptr == nullptr) {
1,822,170✔
383
    // Allocate an empty tensor.
384
    libtensor_ptr = std::make_shared<lt::btensor<N, scalar_type>>(as_bispace<N>(axes));
84✔
385
  }
386

387
  if (expr_ptr != nullptr) reset_state(expr_ptr);
1,822,170✔
388
  if (libtensor_ptr != nullptr) reset_state(libtensor_ptr);
1,822,170✔
389
}
1,822,170✔
390

391
template <size_t N>
392
void TensorImpl<N>::evaluate() const {
11,779,439✔
393
  check_state();
11,779,439✔
394
  if (!needs_evaluation()) return;
11,779,439✔
395

396
  // Allocate output tensor and evaluate
397
  auto newtensor_ptr =
219,191✔
398
        std::make_shared<lt::btensor<N, scalar_type>>(as_bispace<N>(m_axes));
219,191✔
399
  m_expr_ptr->evaluate_to(*newtensor_ptr, /* add = */ false);
219,191✔
400

401
  // Check and test new tensor, cleanup expression
402
  reset_state(newtensor_ptr);
219,191✔
403
}
219,191✔
404

405
template <size_t N>
406
std::shared_ptr<Tensor> TensorImpl<N>::empty_like() const {
110,233✔
407
  check_state();
110,233✔
408

409
  // TODO This evaluates the expression, which is probably an unexpected effect.
410

411
  // Create new btensor using the old bispace
412
  auto newtensor_ptr =
110,233✔
413
        std::make_shared<lt::btensor<N, scalar_type>>(libtensor_ptr()->get_bis());
220,466✔
414

415
  // Copy the symmetry over
416
  lt::block_tensor_ctrl<N, scalar_type> ctrl_to(*newtensor_ptr);
110,233✔
417
  lt::block_tensor_ctrl<N, scalar_type> ctrl_from(*libtensor_ptr());
110,233✔
418
  lt::so_copy<N, scalar_type>(ctrl_from.req_const_symmetry())
110,233✔
419
        .perform(ctrl_to.req_symmetry());
110,233✔
420

421
  // Enwrap inside TensorImpl and return
422
  return std::make_shared<TensorImpl<N>>(m_adcmem_ptr, m_axes, std::move(newtensor_ptr));
220,466✔
423
}
110,233✔
424

425
template <size_t N>
426
std::shared_ptr<Tensor> TensorImpl<N>::nosym_like() const {
52✔
427
  return std::make_shared<TensorImpl<N>>(m_adcmem_ptr, m_axes);
52✔
428
}
429

430
template <size_t N>
431
void TensorImpl<N>::set_mask(std::string mask, scalar_type value) {
157,199✔
432
  if (N != mask.size()) {
157,199✔
433
    throw invalid_argument("The number of characters in the index mask (== " + mask +
×
434
                           ") does not agree with the Tensor dimensionality (== " +
435
                           std::to_string(N) + ")");
436
  }
437

438
  // Non-obviously the indices for the mask have to start with 1,
439
  // 0 gives utterly weird values
440
  size_t next_idx = 1;
157,199✔
441
  std::map<char, size_t> char_to_idx;
157,199✔
442
  lt::sequence<N, size_t> seq(0);
157,199✔
443
  for (size_t i = 0; i < mask.size(); ++i) {
601,927✔
444
    const char c  = mask[i];
444,728✔
445
    const auto it = char_to_idx.find(c);
444,728✔
446
    if (it == char_to_idx.end()) {
444,728✔
447
      char_to_idx[c] = next_idx;
442,962✔
448
      seq[i]         = next_idx;
442,962✔
449
      next_idx += 1;
442,962✔
450
    } else {
451
      seq[i] = it->second;
1,766✔
452
    }
453
  }
454

455
  if (char_to_idx.size() == N) {
157,199✔
456
    // Every character in the mask is different ... just use bto_set
457
    // TODO Optimise: This evaluates, but here there is no point ... Just allocate
458
    lt::btod_set<N>(value).perform(*libtensor_ptr());
155,433✔
459
  } else {
460
    lt::btod_set_diag<N>(seq, value).perform(*libtensor_ptr());
1,766✔
461
  }
462
}
157,199✔
463

464
template <size_t N>
465
void TensorImpl<N>::set_random() {
458✔
466
  // TODO optimise: No point in evaluating ... just allocate
467
  lt::btod_random<N>().perform(*libtensor_ptr());
458✔
468
}
458✔
469

470
namespace {
471

472
template <size_t R, size_t D, size_t N>
473
std::shared_ptr<Tensor> execute_diagonal(
21,492✔
474
      std::shared_ptr<const AdcMemory> adcmem_ptr,
475
      const std::vector<std::shared_ptr<const lt::letter>>& label_result,
476
      const std::vector<std::shared_ptr<const lt::letter>>& label_diag,
477
      const std::vector<std::shared_ptr<const lt::letter>>& label_expr,
478
      std::shared_ptr<ExpressionTree> expr, std::vector<AxisInfo> axes) {
479
  auto lthis    = expr->attach_letters<N>(label_expr);
21,492✔
480
  auto res      = lt::expr::diag(*label_diag[0], strip_safe<D>(label_diag), lthis);
21,492✔
481
  auto expr_ptr = std::make_shared<ExpressionTree>(
21,492✔
482
        res.get_expr(), extract_expr_permutation(res, label_result), expr->keepalives);
21,492✔
483
  return std::make_shared<TensorImpl<R>>(adcmem_ptr, axes, expr_ptr);
42,984✔
484
}
21,492✔
485

486
}  // namespace
487

488
template <size_t N>
489
std::shared_ptr<Tensor> TensorImpl<N>::diagonal(std::vector<size_t> axes) {
21,492✔
490
  if (axes.size() <= 1) {
21,492✔
491
    throw invalid_argument("Axes needs to have at least two entries.");
×
492
  }
493
  auto label                                = make_label(N);
21,492✔
494
  std::shared_ptr<ExpressionTree> expr_this = expression_ptr();
21,492✔
495

496
  std::vector<std::shared_ptr<const lt::letter>> diag;
21,492✔
497
  std::unique_ptr<AxisInfo> diagaxis_ptr;
21,492✔
498
  std::vector<size_t> used_indices;
21,492✔
499
  for (size_t& i : axes) {
64,476✔
500
    auto it = std::find(used_indices.begin(), used_indices.end(), i);
42,984✔
501
    if (it != used_indices.end()) {
42,984✔
502
      throw invalid_argument("Axes may not have repeated indices.");
×
503
    }
504
    if (i >= N) {
42,984✔
505
      throw invalid_argument("Axis index (== " + std::to_string(i) +
×
506
                             ") goes beyond dimensionality of tensor (" +
507
                             std::to_string(N) + ")");
508
    }
509
    if (diagaxis_ptr == nullptr) {
42,984✔
510
      diagaxis_ptr.reset(new AxisInfo(m_axes[i]));
21,492✔
511
    } else {
512
      if (*diagaxis_ptr != m_axes[i]) {
21,492✔
513
        throw invalid_argument("Cannot form diagonal over differing axes. " +
×
514
                               diagaxis_ptr->label + " versus " + m_axes[i].label + ".");
×
515
      }
516
    }
517

518
    diag.push_back(label[i]);
42,984✔
519
    used_indices.push_back(i);
42,984✔
520
  }
521

522
  // Collect letters, which are to be left unchanged.
523
  std::vector<AxisInfo> axes_result;
21,492✔
524
  std::vector<std::shared_ptr<const lt::letter>> label_result;
21,492✔
525
  for (size_t i = 0; i < N; ++i) {
78,586✔
526
    auto it = std::find(used_indices.begin(), used_indices.end(), i);
57,094✔
527
    if (it == used_indices.end()) {
57,094✔
528
      label_result.push_back(label[i]);
14,110✔
529
      axes_result.push_back(m_axes[i]);
14,110✔
530
    }
531
  }
532
  label_result.push_back(diag[0]);
21,492✔
533
  axes_result.push_back(*diagaxis_ptr);
21,492✔
534

535
#define IF_MATCHES_EXECUTE(NTHIS, DIAG)                                            \
536
  if (N == NTHIS && DIAG == diag.size()) {                                         \
537
    constexpr size_t DIMOUT = NTHIS - DIAG + 1;                                    \
538
    static_assert((DIMOUT > 0 && DIMOUT < 100),                                    \
539
                  "Internal error with DIMOUT computation");                       \
540
    return execute_diagonal<DIMOUT, DIAG, NTHIS>(m_adcmem_ptr, label_result, diag, \
541
                                                 label, expr_this, axes_result);   \
542
  }
543

544
  IF_MATCHES_EXECUTE(2, 2)  //
12,088✔
545
  IF_MATCHES_EXECUTE(3, 2)  //
4,698✔
546
  IF_MATCHES_EXECUTE(3, 3)  //
×
547
  IF_MATCHES_EXECUTE(4, 2)  //
4,706✔
548
  IF_MATCHES_EXECUTE(4, 3)  //
×
NEW
549
  IF_MATCHES_EXECUTE(4, 4)  //
×
NEW
550
  IF_MATCHES_EXECUTE(5, 2)  //
×
NEW
551
  IF_MATCHES_EXECUTE(5, 3)  //
×
NEW
552
  IF_MATCHES_EXECUTE(5, 4)  //
×
NEW
553
  IF_MATCHES_EXECUTE(5, 5)  //
×
NEW
554
  IF_MATCHES_EXECUTE(6, 2)  //
×
NEW
555
  IF_MATCHES_EXECUTE(6, 3)  //
×
NEW
556
  IF_MATCHES_EXECUTE(6, 4)  //
×
NEW
557
  IF_MATCHES_EXECUTE(6, 5)  //
×
NEW
558
  IF_MATCHES_EXECUTE(6, 6)  //
×
559

560
  throw not_implemented_error("diagonal not implemented for dimensionality " +
×
561
                              std::to_string(N) + " and " + std::to_string(diag.size()) +
562
                              " axes indices.");
563

564
#undef IF_MATCHES_EXECUTE
565
}
21,492✔
566

567
template <size_t N>
568
std::shared_ptr<Tensor> TensorImpl<N>::scale(scalar_type c) const {
384,823✔
569
  // Collect labelled expressions
570
  std::vector<std::shared_ptr<const lt::letter>> label = make_label(N);
384,823✔
571
  std::shared_ptr<ExpressionTree> expr_this            = expression_ptr();
384,823✔
572
  auto lthis = expr_this->attach_letters<N>(label);
384,823✔
573

574
  // Execute the operation
575
  auto scaled = c * lthis;
384,823✔
576

577
  auto expr_ptr = std::make_shared<ExpressionTree>(
384,823✔
578
        scaled.get_expr(), extract_expr_permutation(scaled, label),
579
        expr_this->keepalives);
384,823✔
580
  return std::make_shared<TensorImpl<N>>(m_adcmem_ptr, m_axes, expr_ptr);
769,646✔
581
}
384,823✔
582

583
template <size_t N>
584
std::shared_ptr<Tensor> TensorImpl<N>::add(std::shared_ptr<Tensor> other) const {
330,964✔
585
  DIMENSIONALITY_CHECK(other);
330,964✔
586

587
  // Collect labelled expressions
588
  auto label                                 = make_label(N);
330,960✔
589
  std::shared_ptr<ExpressionTree> expr_this  = expression_ptr();
330,960✔
590
  std::shared_ptr<ExpressionTree> expr_other = as_expression(other);
330,960✔
591
  auto lthis                                 = expr_this->attach_letters<N>(label);
330,960✔
592
  auto lother                                = expr_other->attach_letters<N>(label);
330,960✔
593

594
  // Execute the operation
595
  auto sum = lthis + lother;
330,960✔
596

597
  auto expr_ptr = std::make_shared<ExpressionTree>(
330,960✔
598
        sum.get_expr(), extract_expr_permutation(sum, label),
599
        merge(expr_this->keepalives, expr_other->keepalives));
330,960✔
600
  return std::make_shared<TensorImpl<N>>(m_adcmem_ptr, m_axes, expr_ptr);
661,920✔
601
}
330,960✔
602

603
template <size_t N>
604
void TensorImpl<N>::add_linear_combination(
76,096✔
605
      std::vector<scalar_type> scalars,
606
      std::vector<std::shared_ptr<Tensor>> tensors) const {
607
  if (scalars.size() != tensors.size()) {
76,096✔
608
    throw dimension_mismatch(
×
609
          "std::vector of scalars has size " + std::to_string(scalars.size()) +
610
          ", but passed vector of tensors has size " + std::to_string(tensors.size()));
611
  }
612
  if (scalars.size() == 0) return;
76,096✔
613

614
  std::unique_ptr<lt::btod_add<N>> operator_ptr;
76,096✔
615
  for (size_t i = 0; i < scalars.size(); ++i) {
1,969,211✔
616
    DIMENSIONALITY_CHECK(tensors[i]);
1,893,115✔
617
    if (!operator_ptr) {
1,893,115✔
618
      operator_ptr.reset(new lt::btod_add<N>(as_btensor<N>(tensors[i]), scalars[i]));
76,096✔
619
    } else {
620
      operator_ptr->add_op(as_btensor<N>(tensors[i]), scalars[i]);
1,817,019✔
621
    }
622
  }
623
  operator_ptr->perform(*libtensor_ptr(), 1.0);
76,096✔
624
}
76,096✔
625

626
template <size_t N>
627
std::shared_ptr<Tensor> TensorImpl<N>::multiply(std::shared_ptr<Tensor> other) const {
3,404✔
628
  DIMENSIONALITY_CHECK(other);
3,404✔
629

630
  // Collect labelled expressions
631
  auto label                                 = make_label(N);
3,404✔
632
  std::shared_ptr<ExpressionTree> expr_this  = expression_ptr();
3,404✔
633
  std::shared_ptr<ExpressionTree> expr_other = as_expression(other);
3,404✔
634
  auto lthis                                 = expr_this->attach_letters<N>(label);
3,404✔
635
  auto lother                                = expr_other->attach_letters<N>(label);
3,404✔
636

637
  // Execute the operation
638
  auto mult = lt::expr::mult(lthis, lother);
3,404✔
639

640
  auto expr_ptr = std::make_shared<ExpressionTree>(
3,404✔
641
        mult.get_expr(), extract_expr_permutation(mult, label),
642
        merge(expr_this->keepalives, expr_other->keepalives));
3,404✔
643
  return std::make_shared<TensorImpl<N>>(m_adcmem_ptr, m_axes, expr_ptr);
6,808✔
644
}
3,404✔
645

646
template <size_t N>
647
std::shared_ptr<Tensor> TensorImpl<N>::divide(std::shared_ptr<Tensor> other) const {
34,519✔
648
  DIMENSIONALITY_CHECK(other);
34,519✔
649

650
  // Collect labelled expressions
651
  auto label                                 = make_label(N);
34,519✔
652
  std::shared_ptr<ExpressionTree> expr_this  = expression_ptr();
34,519✔
653
  std::shared_ptr<ExpressionTree> expr_other = as_expression(other);
34,519✔
654
  auto lthis                                 = expr_this->attach_letters<N>(label);
34,519✔
655
  auto lother                                = expr_other->attach_letters<N>(label);
34,519✔
656

657
  // Execute the operation
658
  auto div = lt::expr::div(lthis, lother);
34,519✔
659

660
  auto expr_ptr = std::make_shared<ExpressionTree>(
34,519✔
661
        div.get_expr(), extract_expr_permutation(div, label),
662
        merge(expr_this->keepalives, expr_other->keepalives));
34,519✔
663
  return std::make_shared<TensorImpl<N>>(m_adcmem_ptr, m_axes, expr_ptr);
69,038✔
664
}
34,519✔
665

666
template <size_t N>
667
std::shared_ptr<Tensor> TensorImpl<N>::copy() const {
6,370✔
668
  if (needs_evaluation()) {
6,370✔
669
    // Return deep copy to the expression
670
    return std::make_shared<TensorImpl<N>>(m_adcmem_ptr, m_axes, m_expr_ptr);
68✔
671
  } else {
672
    // Actually make a deep copy of the tensor
673
    auto ret_ptr = empty_like();
6,302✔
674
    auto lt_ptr  = std::static_pointer_cast<TensorImpl<N>>(ret_ptr)->libtensor_ptr();
6,302✔
675
    lt::btod_copy<N>(*libtensor_ptr()).perform(*lt_ptr);
6,302✔
676
    return ret_ptr;
6,302✔
677
  }
6,302✔
678
}
679

680
template <size_t N>
681
std::shared_ptr<Tensor> TensorImpl<N>::transpose(std::vector<size_t> permutation) const {
328,784✔
682
  if (permutation.size() != N) {
328,784✔
683
    throw invalid_argument(
×
684
          "Number of indices in provided transposition axes (== " +
685
          std::to_string(permutation.size()) +
686
          ") does not agree with tensor dimension (== " + std::to_string(N) + ").");
687
  }
688

689
  // Reorder the axes
690
  std::vector<AxisInfo> newaxes;
328,784✔
691
  for (size_t i = 0; i < N; ++i) {
1,303,784✔
692
    for (size_t j = 0; j < i; ++j) {
2,097,454✔
693
      if (permutation[i] == permutation[j]) {
1,122,454✔
694
        throw invalid_argument("Duplicate index in transposition axes (" +
×
695
                               std::to_string(permutation[i]) + ") at indices " +
×
696
                               std::to_string(i) + " and " + std::to_string(j) + ".");
697
      }
698
    }
699
    if (permutation[i] >= N) {
975,000✔
700
      throw invalid_argument("Invalid axes specifier " + std::to_string(permutation[i]) +
×
701
                             ". Exceeds tensor dimension -1 (==" + std::to_string(N - 1) +
702
                             ").");
703
    }
704

705
    newaxes.push_back(m_axes[permutation[i]]);
975,000✔
706
  }
707

708
  // Chain permutations
709
  std::shared_ptr<ExpressionTree> expr_this = expression_ptr();
328,784✔
710
  std::vector<size_t> result_permutation;
328,784✔
711
  for (size_t i = 0; i < N; ++i) {
1,303,784✔
712
    result_permutation.push_back(expr_this->permutation[permutation[i]]);
975,000✔
713
  }
714
  auto expr_ptr = std::make_shared<ExpressionTree>(
328,784✔
715
        *expr_this->tree_ptr, result_permutation, expr_this->keepalives);
328,784✔
716
  return std::make_shared<TensorImpl<N>>(m_adcmem_ptr, newaxes, expr_ptr);
657,568✔
717
}
328,784✔
718

719
namespace {
720
template <size_t R, size_t N, size_t M>
721
std::shared_ptr<Tensor> execute_direct_sum(
10,968✔
722
      std::shared_ptr<const AdcMemory> adcmem_ptr,
723
      const std::vector<std::shared_ptr<const lt::letter>>& label_result,
724
      const std::vector<std::shared_ptr<const lt::letter>>& label_first,
725
      const std::vector<std::shared_ptr<const lt::letter>>& label_second,
726
      std::shared_ptr<ExpressionTree> expr_first,
727
      std::shared_ptr<ExpressionTree> expr_second, std::vector<AxisInfo> axes_result) {
728
  auto lfirst  = expr_first->attach_letters<N>(label_first);
10,968✔
729
  auto lsecond = expr_second->attach_letters<M>(label_second);
10,968✔
730

731
  // Execute the operation (dirsum)
732
  auto res      = lt::dirsum(lfirst, lsecond);
10,968✔
733
  auto expr_ptr = std::make_shared<ExpressionTree>(
10,968✔
734
        res.get_expr(), extract_expr_permutation(res, label_result),
735
        merge(expr_first->keepalives, expr_second->keepalives));
10,968✔
736
  return std::make_shared<TensorImpl<R>>(adcmem_ptr, axes_result, expr_ptr);
21,936✔
737
}
10,968✔
738
}  // namespace
739

740
template <size_t N>
741
std::shared_ptr<Tensor> TensorImpl<N>::direct_sum(std::shared_ptr<Tensor> other) const {
10,968✔
742
  typedef std::vector<std::shared_ptr<const lt::letter>> lalvec_t;
743
  lalvec_t label_first  = make_label(N);
10,968✔
744
  lalvec_t label_second = make_label(other->ndim());
10,968✔
745

746
  lalvec_t label_result;
10,968✔
747
  for (auto& v : label_first) label_result.push_back(v);
28,784✔
748
  for (auto& v : label_second) label_result.push_back(v);
25,894✔
749
  std::vector<AxisInfo> axes_result;
10,968✔
750
  for (auto& ax : m_axes) axes_result.push_back(ax);
28,784✔
751
  for (auto& ax : other->axes()) axes_result.push_back(ax);
25,894✔
752

753
  std::shared_ptr<ExpressionTree> expr_first  = expression_ptr();
10,968✔
754
  std::shared_ptr<ExpressionTree> expr_second = as_expression(other);
10,968✔
755
#define IF_MATCHES_EXECUTE(DIMA, DIMB)                                                   \
756
  if (DIMA == label_first.size() && DIMB == label_second.size()) {                       \
757
    constexpr size_t DIMOUT = DIMA + DIMB;                                               \
758
    static_assert((DIMOUT > 0 && DIMOUT < 100),                                          \
759
                  "Internal error with DIMOUT computation");                             \
760
    if (DIMOUT != label_result.size()) {                                                 \
761
      throw runtime_error(                                                               \
762
            "Internal error: Inconsistency with DIMOUT and label_contracted.size()");    \
763
    }                                                                                    \
764
    return execute_direct_sum<DIMOUT, DIMA, DIMB>(m_adcmem_ptr, label_result,            \
765
                                                  label_first, label_second, expr_first, \
766
                                                  expr_second, axes_result);             \
767
  }
768

769
  IF_MATCHES_EXECUTE(1, 1)  //
10,968✔
770
  IF_MATCHES_EXECUTE(1, 2)  //
5,880✔
771
  IF_MATCHES_EXECUTE(1, 3)  //
5,878✔
772
  IF_MATCHES_EXECUTE(1, 4)  //
5,878✔
773
  IF_MATCHES_EXECUTE(1, 5)  //
5,878✔
774
  IF_MATCHES_EXECUTE(2, 1)  //
5,878✔
775
  IF_MATCHES_EXECUTE(2, 2)  //
4,910✔
776
  IF_MATCHES_EXECUTE(2, 3)  //
978✔
777
  IF_MATCHES_EXECUTE(2, 4)  //
978✔
778
  IF_MATCHES_EXECUTE(3, 1)  //
970✔
NEW
779
  IF_MATCHES_EXECUTE(3, 2)  //
×
NEW
780
  IF_MATCHES_EXECUTE(3, 3)  //
×
NEW
781
  IF_MATCHES_EXECUTE(4, 1)  //
×
NEW
782
  IF_MATCHES_EXECUTE(4, 2)  //
×
NEW
783
  IF_MATCHES_EXECUTE(5, 1)  //
×
784

785
  throw not_implemented_error(
×
786
        "Did not implement the case of a direct_sum of two tensors of dimension " +
787
        std::to_string(ndim()) + " and " + std::to_string(other->ndim()) + ".");
788
#undef IF_MATCHES_EXECUTE
789
}
10,968✔
790

791
template <>
792
double TensorImpl<1>::trace(std::string) const {
×
793
  throw runtime_error("Trace can only be applied to tensors of even rank.");
×
794
}
795

796
template <>
797
double TensorImpl<3>::trace(std::string) const {
×
798
  throw runtime_error("Trace can only be applied to tensors of even rank.");
×
799
}
800

801
template <size_t N>
802
double TensorImpl<N>::trace(std::string contraction) const {
10✔
803
  if (contraction.size() != N) {
10✔
804
    throw invalid_argument(
×
805
          "Number of passed contraction indices needs to match tensor dimensionality.");
806
  }
807

808
  std::vector<std::pair<size_t, size_t>> trace_pairs;
10✔
809
  std::vector<bool> index_done(N, false);
10✔
810
  for (size_t i = 0; i < N; ++i) {
46✔
811
    if (index_done[i]) continue;
36✔
812
    index_done[i]   = true;
18✔
813
    bool found_pair = false;
18✔
814
    for (size_t j = i + 1; j < N; ++j) {
30✔
815
      if (contraction[i] == contraction[j]) {
30✔
816
        if (m_axes[i] != m_axes[j]) {
18✔
817
          throw invalid_argument("Axes to be traced along do not agree: " +
×
818
                                 m_axes[i].label + " versus " + m_axes[j].label);
×
819
        }
820
        index_done[j] = true;
18✔
821
        trace_pairs.push_back({i, j});
18✔
822
        found_pair = true;
18✔
823
        break;
18✔
824
      }
825
    }
826
    if (!found_pair) {
18✔
827
      throw("Found no matching second index for '" + std::string(1, contraction[i]) +
×
828
            "'.");
829
    }
830
  }
831

832
  if (2 * trace_pairs.size() != N) {
10✔
833
    throw invalid_argument(
×
834
          "Expected to find half as many trace indices as there are tensor dimensions, "
835
          "i.e. " +
836
          std::to_string(N / 2) + " indices and not " +
837
          std::to_string(trace_pairs.size()) + ".");
838
  }
839

840
  typedef std::vector<std::shared_ptr<const lt::letter>> lalvec_t;
841
  lalvec_t label = make_label(N);
10✔
842
  lalvec_t tlal_first;
10✔
843
  lalvec_t tlal_second;
10✔
844
  for (const auto& p : trace_pairs) {
28✔
845
    tlal_first.push_back(label[p.first]);
18✔
846
    tlal_second.push_back(label[p.second]);
18✔
847
  }
848

849
  std::shared_ptr<ExpressionTree> expr_this = expression_ptr();
10✔
850
  auto lfirst                               = expr_this->attach_letters<N>(label);
10✔
851
  constexpr size_t K                        = N / 2;
10✔
852
  return lt::trace(strip_safe<K>(tlal_first), strip_safe<K>(tlal_second), lfirst);
20✔
853
}
10✔
854

855
namespace {
856
template <size_t R, size_t K, size_t N, size_t M>
857
TensorOrScalar execute_tensordot_contract(
399,860✔
858
      std::shared_ptr<const AdcMemory> adcmem_ptr,
859
      const std::vector<std::shared_ptr<const lt::letter>>& label_result,
860
      const std::vector<std::shared_ptr<const lt::letter>>& label_contracted,
861
      const std::vector<std::shared_ptr<const lt::letter>>& label_first,
862
      const std::vector<std::shared_ptr<const lt::letter>>& label_second,
863
      std::shared_ptr<ExpressionTree> expr_first,
864
      std::shared_ptr<ExpressionTree> expr_second, std::vector<AxisInfo> axes_result) {
865
  // Build labelled expressions:
866
  auto lfirst  = expr_first->attach_letters<N>(label_first);
399,860✔
867
  auto lsecond = expr_second->attach_letters<M>(label_second);
399,860✔
868

869
  // Execute the operation (contract)
870
  auto res      = lt::contract(strip_safe<K>(label_contracted), lfirst, lsecond);
399,860✔
871
  auto expr_ptr = std::make_shared<ExpressionTree>(
399,860✔
872
        res.get_expr(), extract_expr_permutation(res, label_result),
873
        merge(expr_first->keepalives, expr_second->keepalives));
399,860✔
874
  auto tensor_ptr = std::make_shared<TensorImpl<R>>(adcmem_ptr, axes_result, expr_ptr);
399,860✔
875
  return TensorOrScalar{tensor_ptr, 0.0};
399,860✔
876
}
399,860✔
877

878
template <size_t R, size_t N, size_t M>
879
TensorOrScalar execute_tensordot_tensorprod(
2,006✔
880
      std::shared_ptr<const AdcMemory> adcmem_ptr,
881
      const std::vector<std::shared_ptr<const lt::letter>>& label_result,
882
      const std::vector<std::shared_ptr<const lt::letter>>& label_first,
883
      const std::vector<std::shared_ptr<const lt::letter>>& label_second,
884
      std::shared_ptr<ExpressionTree> expr_first,
885
      std::shared_ptr<ExpressionTree> expr_second, std::vector<AxisInfo> axes_result) {
886
  auto lfirst  = expr_first->attach_letters<N>(label_first);
2,006✔
887
  auto lsecond = expr_second->attach_letters<M>(label_second);
2,006✔
888

889
  // Execute the operation (tensor product)
890
  auto res      = lfirst * lsecond;
2,006✔
891
  auto expr_ptr = std::make_shared<ExpressionTree>(
2,006✔
892
        res.get_expr(), extract_expr_permutation(res, label_result),
893
        merge(expr_first->keepalives, expr_second->keepalives));
2,006✔
894
  auto tensor_ptr = std::make_shared<TensorImpl<R>>(adcmem_ptr, axes_result, expr_ptr);
2,006✔
895
  return TensorOrScalar{tensor_ptr, 0.0};
2,006✔
896
}
2,006✔
897

898
}  // namespace
899

900
template <size_t N>
901
TensorOrScalar TensorImpl<N>::tensordot(
401,876✔
902
      std::shared_ptr<Tensor> other,
903
      std::pair<std::vector<size_t>, std::vector<size_t>> axes) const {
904
  const std::vector<size_t>& axes_first  = axes.first;
401,876✔
905
  const std::vector<size_t>& axes_second = axes.second;
401,876✔
906

907
  if (axes_first.size() != axes_second.size()) {
401,876✔
908
    throw invalid_argument(
×
909
          "Length of the passed axes does not agree "
910
          " (first == " +
911
          shape_to_string(axes_first) + " and second == " + shape_to_string(axes_second) +
912
          ")");
913
  }
914
  if (axes_first.size() > N) {
401,876✔
915
    throw invalid_argument(
×
916
          "Length of the passed axes overshoots dimensionality of the first "
917
          "tensor.");
918
  }
919
  if (axes_first.size() > other->ndim()) {
401,876✔
920
    throw invalid_argument(
×
921
          "Length of the passed axes overshoots dimensionality of the second "
922
          "tensor.");
923
  }
924

925
  // Build label for first and second tensor and contraction indices
926
  typedef std::vector<std::shared_ptr<const lt::letter>> lalvec_t;
927
  lalvec_t label_first  = make_label(N);
401,876✔
928
  lalvec_t label_second = make_label(other->ndim());
401,876✔
929
  lalvec_t label_contracted;
401,876✔
930
  for (size_t i = 0; i < axes_first.size(); ++i) {
1,085,267✔
931
    std::shared_ptr<const lt::letter> l_contraction = label_first[axes_first[i]];
683,391✔
932
    label_second[axes_second[i]]                    = l_contraction;
683,391✔
933

934
    if (m_axes[axes_first[i]] != other->axes()[axes_second[i]]) {
683,391✔
935
      throw invalid_argument(
×
936
            "tensordot can only contract equivalent axes together. The " +
937
            std::to_string(i) + "-th axis clashes (" + m_axes[axes_first[i]].label +
×
938
            " versus " + other->axes()[axes_second[i]].label + "). Tensor spaces are " +
×
939
            space() + " and " + other->space());
940
    }
941
    label_contracted.push_back(l_contraction);
683,391✔
942
  }
943

944
  // Build labels of the result
945
  lalvec_t label_result;
401,876✔
946
  std::vector<AxisInfo> axes_result;
401,876✔
947
  for (size_t i = 0; i < N; ++i) {
1,571,804✔
948
    auto it = std::find(axes_first.begin(), axes_first.end(), i);
1,169,928✔
949
    if (it == axes_first.end()) {
1,169,928✔
950
      label_result.push_back(label_first[i]);
486,537✔
951
      axes_result.push_back(m_axes[i]);
486,537✔
952
    }
953
  }
954
  for (size_t j = 0; j < other->ndim(); ++j) {
1,727,632✔
955
    auto it = std::find(axes_second.begin(), axes_second.end(), j);
1,325,756✔
956
    if (it == axes_second.end()) {
1,325,756✔
957
      label_result.push_back(label_second[j]);
642,365✔
958
      axes_result.push_back(other->axes()[j]);
642,365✔
959
    }
960
  }
961

962
  if (label_result.size() != N + other->ndim() - 2 * label_contracted.size()) {
401,876✔
963
    throw runtime_error(
×
964
          "Internal error: Result index count does not agree with expected "
965
          "number.");
966
  }
967

968
  // Build labelled expressions:
969
  std::shared_ptr<ExpressionTree> expr_first  = expression_ptr();
401,876✔
970
  std::shared_ptr<ExpressionTree> expr_second = as_expression(other);
401,876✔
971

972
  // Branch into the different cases
973
  if (label_result.size() == 0 && N == label_contracted.size() &&
401,896✔
974
      N == label_first.size() && N == label_second.size()) {
401,896✔
975
    // Full contraction => execute dot_product
976

977
    auto lfirst  = expr_first->attach_letters<N>(label_first);
10✔
978
    auto lsecond = expr_second->attach_letters<N>(label_second);
10✔
979
    return TensorOrScalar{nullptr, lt::dot_product(lfirst, lsecond)};
20✔
980
  } else if (label_contracted.size() == 0) {
401,876✔
981
#define IF_DIMENSIONS_MATCH_EXECUTE_TENSORPROD(DIMA, DIMB)                            \
982
  if (DIMA == label_first.size() && DIMB == label_second.size()) {                    \
983
    constexpr size_t DIMOUT = DIMB + DIMA;                                            \
984
    static_assert((DIMOUT > 0 && DIMOUT < 100),                                       \
985
                  "Internal error with DIMOUT computation");                          \
986
    if (DIMOUT != label_result.size()) {                                              \
987
      throw runtime_error(                                                            \
988
            "Internal error: Inconsistency with DIMOUT and label_contracted.size()"); \
989
    }                                                                                 \
990
    return execute_tensordot_tensorprod<DIMOUT, DIMA, DIMB>(                          \
991
          m_adcmem_ptr, label_result, label_first, label_second, expr_first,          \
992
          expr_second, axes_result);                                                  \
993
  }
994

995
    //
996
    // Instantiation generated from TensorImpl/instantiate_valid.py
997
    //
998
    IF_DIMENSIONS_MATCH_EXECUTE_TENSORPROD(1, 1)  //
2,006✔
999
    IF_DIMENSIONS_MATCH_EXECUTE_TENSORPROD(1, 2)  //
2,004✔
1000
    IF_DIMENSIONS_MATCH_EXECUTE_TENSORPROD(1, 3)  //
2,004✔
1001
    IF_DIMENSIONS_MATCH_EXECUTE_TENSORPROD(1, 4)  //
2,004✔
1002
    IF_DIMENSIONS_MATCH_EXECUTE_TENSORPROD(1, 5)  //
2,004✔
1003
    IF_DIMENSIONS_MATCH_EXECUTE_TENSORPROD(2, 1)  //
2,004✔
1004
    IF_DIMENSIONS_MATCH_EXECUTE_TENSORPROD(2, 2)  //
2,004✔
NEW
1005
    IF_DIMENSIONS_MATCH_EXECUTE_TENSORPROD(2, 3)  //
×
NEW
1006
    IF_DIMENSIONS_MATCH_EXECUTE_TENSORPROD(2, 4)  //
×
UNCOV
1007
    IF_DIMENSIONS_MATCH_EXECUTE_TENSORPROD(3, 1)  //
×
NEW
1008
    IF_DIMENSIONS_MATCH_EXECUTE_TENSORPROD(3, 2)  //
×
NEW
1009
    IF_DIMENSIONS_MATCH_EXECUTE_TENSORPROD(3, 3)  //
×
NEW
1010
    IF_DIMENSIONS_MATCH_EXECUTE_TENSORPROD(4, 1)  //
×
NEW
1011
    IF_DIMENSIONS_MATCH_EXECUTE_TENSORPROD(4, 2)  //
×
NEW
1012
    IF_DIMENSIONS_MATCH_EXECUTE_TENSORPROD(5, 1)  //
×
1013

1014
#undef IF_DIMENSIONS_MATCH_EXECUTE_TENSORPROD
1015
  } else {
1016
    // Other cases => normal contraction
1017
#define IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(N_CONTR_IDCS, DIMA, DIMB)                \
1018
  if (DIMA == label_first.size() && DIMB == label_second.size() &&                    \
1019
      N_CONTR_IDCS == label_contracted.size()) {                                      \
1020
    constexpr size_t DIMOUT = DIMB + DIMA - N_CONTR_IDCS - N_CONTR_IDCS;              \
1021
    static_assert((DIMOUT > 0 && DIMOUT < 100),                                       \
1022
                  "Internal error with DIMOUT computation");                          \
1023
    if (DIMOUT != label_result.size()) {                                              \
1024
      throw runtime_error(                                                            \
1025
            "Internal error: Inconsistency with DIMOUT and label_contracted.size()"); \
1026
    }                                                                                 \
1027
    return execute_tensordot_contract<DIMOUT, N_CONTR_IDCS, DIMA, DIMB>(              \
1028
          m_adcmem_ptr, label_result, label_contracted, label_first, label_second,    \
1029
          expr_first, expr_second, axes_result);                                      \
1030
  }
1031

1032
    //
1033
    // Instantiation generated from TensorImpl/instantiate_valid.py
1034
    //
1035
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(1, 1, 2)  //
399,860✔
1036
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(1, 1, 3)  //
399,858✔
1037
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(1, 1, 4)  //
399,858✔
1038
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(1, 1, 5)  //
399,858✔
1039
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(1, 1, 6)  //
399,858✔
1040
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(1, 2, 1)  //
399,858✔
1041
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(1, 2, 2)  //
399,854✔
1042
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(1, 2, 3)  //
314,466✔
1043
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(1, 2, 4)  //
314,466✔
1044
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(1, 2, 5)  //
255,216✔
1045
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(1, 2, 6)  //
255,216✔
1046
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(1, 3, 1)  //
255,216✔
1047
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(1, 3, 2)  //
255,216✔
1048
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(1, 3, 3)  //
255,212✔
1049
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(1, 3, 4)  //
255,212✔
1050
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(1, 3, 5)  //
255,212✔
1051
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(1, 4, 1)  //
255,212✔
1052
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(1, 4, 2)  //
255,212✔
1053
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(1, 4, 3)  //
204,211✔
1054
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(1, 4, 4)  //
204,211✔
1055
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(1, 5, 1)  //
204,195✔
1056
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(1, 5, 2)  //
204,195✔
1057
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(1, 5, 3)  //
204,195✔
1058
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(1, 6, 1)  //
204,195✔
1059
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(1, 6, 2)  //
204,195✔
1060
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(2, 2, 3)  //
204,195✔
1061
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(2, 2, 4)  //
204,195✔
1062
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(2, 2, 5)  //
132,067✔
1063
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(2, 2, 6)  //
132,067✔
1064
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(2, 3, 2)  //
132,067✔
1065
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(2, 3, 3)  //
132,067✔
1066
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(2, 3, 4)  //
132,067✔
1067
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(2, 3, 5)  //
132,067✔
1068
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(2, 3, 6)  //
132,067✔
1069
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(2, 4, 2)  //
132,067✔
1070
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(2, 4, 3)  //
129,607✔
1071
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(2, 4, 4)  //
129,607✔
1072
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(2, 4, 5)  //
79,308✔
1073
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(2, 4, 6)  //
79,308✔
1074
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(2, 5, 2)  //
79,308✔
1075
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(2, 5, 3)  //
79,308✔
1076
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(2, 5, 4)  //
79,308✔
1077
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(2, 5, 5)  //
79,308✔
1078
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(2, 6, 2)  //
79,308✔
1079
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(2, 6, 3)  //
79,308✔
1080
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(2, 6, 4)  //
79,308✔
1081
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(3, 3, 4)  //
79,308✔
1082
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(3, 3, 5)  //
79,308✔
1083
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(3, 3, 6)  //
79,308✔
1084
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(3, 4, 3)  //
79,308✔
1085
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(3, 4, 4)  //
79,308✔
NEW
1086
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(3, 4, 5)  //
×
NEW
1087
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(3, 4, 6)  //
×
NEW
1088
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(3, 5, 3)  //
×
NEW
1089
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(3, 5, 4)  //
×
NEW
1090
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(3, 5, 5)  //
×
NEW
1091
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(3, 5, 6)  //
×
NEW
1092
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(3, 6, 3)  //
×
NEW
1093
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(3, 6, 4)  //
×
NEW
1094
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(3, 6, 5)  //
×
NEW
1095
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(3, 6, 6)  //
×
NEW
1096
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(4, 4, 5)  //
×
NEW
1097
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(4, 4, 6)  //
×
NEW
1098
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(4, 5, 4)  //
×
NEW
1099
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(4, 5, 5)  //
×
NEW
1100
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(4, 5, 6)  //
×
NEW
1101
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(4, 6, 4)  //
×
NEW
1102
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(4, 6, 5)  //
×
NEW
1103
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(4, 6, 6)  //
×
NEW
1104
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(5, 5, 6)  //
×
NEW
1105
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(5, 6, 5)  //
×
NEW
1106
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(5, 6, 6)  //
×
1107

1108
#undef IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT
1109
  }
1110

1111
  throw not_implemented_error(
×
1112
        "Did not implement the case of a tensordot over " +
1113
        std::to_string(label_contracted.size()) +
1114
        " indices for two tensors of dimensions " + std::to_string(label_first.size()) +
1115
        " and " + std::to_string(label_second.size()) +
1116
        ", yielding a tensor of dimension " + std::to_string(label_result.size()) + ".");
1117
}
401,886✔
1118

1119
template <size_t N>
1120
std::vector<scalar_type> TensorImpl<N>::dot(
789,366✔
1121
      std::vector<std::shared_ptr<Tensor>> tensors) const {
1122
  std::vector<scalar_type> ret(tensors.size(), 0.0);
789,366✔
1123

1124
  for (size_t i = 0; i < tensors.size(); ++i) {
2,445,086✔
1125
    auto tensor_ptr = std::static_pointer_cast<TensorImpl<N>>(tensors[i]);
1,655,720✔
1126
    if (ndim() != tensor_ptr->ndim()) {
1,655,720✔
1127
      throw dimension_mismatch(
×
1128
            "Dimensionality of this tensor (" + std::to_string(ndim()) +
1129
            ") does not agree with the dimensionality of the " + std::to_string(i) +
1130
            "-th tensor passed, which has dimensionality " +
1131
            std::to_string(tensor_ptr->ndim()) + ".");
×
1132
    }
1133
    if (shape() != tensor_ptr->shape()) {
1,655,720✔
1134
      throw dimension_mismatch("Shape of this tensor (" + shape_to_string(shape()) +
×
1135
                               ") does not agree with the shape of the " +
1136
                               std::to_string(i) + "-th tensor passed, which has shape " +
1137
                               shape_to_string(tensor_ptr->shape()) + ".");
×
1138
    }
1139

1140
    ret[i] = lt::btod_dotprod<N>(*libtensor_ptr(), *(tensor_ptr->libtensor_ptr()))
3,311,440✔
1141
                   .calculate();
1,655,720✔
1142
  }
1143
  return ret;
789,366✔
1144
}
×
1145

1146
template <size_t N>
1147
std::shared_ptr<Tensor> TensorImpl<N>::symmetrise(
24,005✔
1148
      const std::vector<std::vector<size_t>>& permutations) const {
1149
  if (permutations.size() == 0) {
24,005✔
1150
    return std::make_shared<TensorImpl<N>>(m_adcmem_ptr, m_axes, m_libtensor_ptr,
×
1151
                                           m_expr_ptr);  // Noop
×
1152
  }
1153

1154
  // Label this expression
1155
  auto label                                = make_label(N);
24,005✔
1156
  std::shared_ptr<ExpressionTree> expr_this = expression_ptr();
24,005✔
1157
  auto lthis                                = expr_this->attach_letters<N>(label);
24,005✔
1158

1159
  // Execute the operation
1160
  auto symmetrised = [&permutations, &lthis, this, label]() {
48,010✔
1161
    // this assumes that all permutations have the same length
1162
    // but the parse functions throw if this is not the case
1163
    const auto perm_size = permutations.front().size();
24,005✔
1164
    if (perm_size == 2) {
24,005✔
1165
      if (permutations.size() == 1) {
23,987✔
NEW
1166
        const auto parsed =
×
1167
              parse_permutation<2, 1>(m_axes, strip_safe<N>(label), permutations);
9,860✔
1168
        return 0.5 * lt::expr::symm(std::get<0>(parsed), std::get<1>(parsed), lthis);
19,720✔
1169
      } else if (permutations.size() == 2) {
14,127✔
NEW
1170
        const auto parsed =
×
1171
              parse_permutation<2, 2>(m_axes, strip_safe<N>(label), permutations);
14,127✔
1172
        return 0.5 * lt::expr::symm(std::get<0>(parsed), std::get<1>(parsed), lthis);
28,254✔
1173
      } else {
NEW
1174
        throw runtime_error(
×
1175
              "Symmetrisation not implemented for more than two index pairs.");
1176
      }
1177
    } else if (perm_size == 3) {
18✔
1178
      if (permutations.size() == 1) {
18✔
NEW
1179
        const auto parsed =
×
1180
              parse_permutation<3, 1>(m_axes, strip_safe<N>(label), permutations);
18✔
NEW
1181
        return 1. / 6. *
×
1182
               lt::expr::symm(std::get<0>(parsed), std::get<1>(parsed),
36✔
1183
                              std::get<2>(parsed), lthis);
72✔
1184
      } else {
NEW
1185
        throw runtime_error(
×
1186
              "Symmetrisation not implemented for more than one index triple.");
1187
      }
1188
    } else {
1189
      throw runtime_error(
×
1190
            "Symmetrisation only implemented for permutations of length 2 and 3.");
1191
    }
1192
  }();
1193

1194
  auto expr_ptr = std::make_shared<ExpressionTree>(
24,005✔
1195
        symmetrised.get_expr(), extract_expr_permutation(symmetrised, label),
1196
        expr_this->keepalives);
24,005✔
1197
  return std::make_shared<TensorImpl<N>>(m_adcmem_ptr, m_axes, expr_ptr);
24,005✔
1198
}
24,005✔
1199

1200
template <size_t N>
1201
std::shared_ptr<Tensor> TensorImpl<N>::antisymmetrise(
119,064✔
1202
      const std::vector<std::vector<size_t>>& permutations) const {
1203
  if (permutations.size() == 0) {
119,064✔
1204
    return std::make_shared<TensorImpl<N>>(m_adcmem_ptr, m_axes, m_libtensor_ptr,
×
1205
                                           m_expr_ptr);  // Noop
×
1206
  }
1207

1208
  // Label this expression
1209
  auto label                                = make_label(N);
119,064✔
1210
  std::shared_ptr<ExpressionTree> expr_this = expression_ptr();
119,064✔
1211
  auto lthis                                = expr_this->attach_letters<N>(label);
119,064✔
1212

1213
  // Execute the operation
1214
  auto antisymmetrised = [&permutations, &lthis, this, &label]() {
238,128✔
1215
    // this assumes that all permutations have the same length
1216
    // but the parse functions throw if this is not the case
1217
    const auto perm_size = permutations.front().size();
119,064✔
1218
    if (perm_size == 2) {
119,064✔
1219
      if (permutations.size() == 1) {
119,046✔
1220
        const auto parsed =
119,044✔
1221
              parse_permutation<2, 1>(m_axes, strip_safe<N>(label), permutations);
119,044✔
1222
        return 0.5 * lt::expr::asymm(std::get<0>(parsed), std::get<1>(parsed), lthis);
238,088✔
1223
      } else if (permutations.size() == 2) {
2✔
1224
        const auto parsed =
2✔
1225
              parse_permutation<2, 2>(m_axes, strip_safe<N>(label), permutations);
2✔
1226
        return 0.5 * lt::expr::asymm(std::get<0>(parsed), std::get<1>(parsed), lthis);
4✔
1227
      } else {
NEW
1228
        throw runtime_error(
×
1229
              "Antisymmetrisation not implemented for more than two index pairs.");
1230
      }
1231
    } else if (perm_size == 3) {
18✔
1232
      if (permutations.size() == 1) {
18✔
1233
        const auto parsed =
18✔
1234
              parse_permutation<3, 1>(m_axes, strip_safe<N>(label), permutations);
18✔
NEW
1235
        return 1. / 6. *
×
1236
               lt::expr::asymm(std::get<0>(parsed), std::get<1>(parsed),
36✔
1237
                               std::get<2>(parsed), lthis);
72✔
1238
      } else {
NEW
1239
        throw runtime_error(
×
1240
              "Antisymmetrisation not implemented for more than one index triple.");
1241
      }
1242
    } else {
1243
      throw runtime_error(
×
1244
            "Antisymmetrisation only implemented for permutations of length 2 and 3.");
1245
    }
1246
  }();
1247

1248
  auto expr_ptr = std::make_shared<ExpressionTree>(
119,064✔
1249
        antisymmetrised.get_expr(), extract_expr_permutation(antisymmetrised, label),
1250
        expr_this->keepalives);
119,064✔
1251
  return std::make_shared<TensorImpl<N>>(m_adcmem_ptr, m_axes, expr_ptr);
119,064✔
1252
}
119,064✔
1253

1254
template <size_t N>
1255
void TensorImpl<N>::set_element(const std::vector<size_t>& tidx, scalar_type value) {
29,560✔
1256
  if (!is_element_allowed(tidx)) {
29,560✔
1257
    throw runtime_error("Setting tensor index (" + shape_to_string(tidx) +
×
1258
                        ") not allowed, since zero by symmetry.");
1259
  }
1260

1261
  lt::index<N> block_idx;
29,560✔
1262
  lt::index<N> in_block_idx;
29,560✔
1263
  std::tie(block_idx, in_block_idx) = assert_convert_tensor_index(*libtensor_ptr(), tidx);
29,560✔
1264
  lt::btod_set_elem<N>{}.perform(*libtensor_ptr(), block_idx, in_block_idx, value);
29,560✔
1265
}
29,560✔
1266

1267
template <size_t N>
1268
scalar_type TensorImpl<N>::get_element(const std::vector<size_t>& tidx) const {
1,860,200✔
1269
  lt::index<N> block_idx;
1,860,200✔
1270
  lt::index<N> in_block_idx;
1,860,200✔
1271
  std::tie(block_idx, in_block_idx) = assert_convert_tensor_index(*libtensor_ptr(), tidx);
1,860,200✔
1272

1273
  // The value we want to return
1274
  scalar_type ret;
1275

1276
  // Make an orbit over the block index, i.e. find the canonical block
1277
  lt::block_tensor_ctrl<N, scalar_type> ctrl(*libtensor_ptr());
1,860,200✔
1278
  lt::dimensions<N> bidims(libtensor_ptr()->get_bis().get_block_index_dims());
1,860,200✔
1279
  lt::orbit<N, scalar_type> obit(ctrl.req_const_symmetry(), block_idx);
1,860,200✔
1280

1281
  // If the orbit (i.e. the index) is not allowed by symmetry, than the element is
1282
  // zero.
1283
  if (!obit.is_allowed()) return 0;
1,860,200✔
1284

1285
  // Absolute index of the canonical block
1286
  const lt::abs_index<N> idx_abscanon(obit.get_acindex(), bidims);
1,860,200✔
1287

1288
  // Check if we have hit a zero block, then return 0 as well.
1289
  if (ctrl.req_is_zero_block(idx_abscanon.get_index())) return 0;
1,860,200✔
1290

1291
  // The transformation between the selected block and the canonical block
1292
  const lt::tensor_transf<N, scalar_type>& tr = obit.get_transf(block_idx);
511,032✔
1293

1294
  // Transform in-block index.
1295
  lt::index<N> pibidx(in_block_idx);  //< permuted in-block index
511,032✔
1296
  pibidx.permute(tr.get_perm());
511,032✔
1297

1298
  // Get the actual value from the dense tensor representing the block
1299
  {
1300
    auto& block = ctrl.req_const_block(idx_abscanon.get_index());
511,032✔
1301
    lt::dense_tensor_rd_ctrl<N, scalar_type> blkctrl(block);
511,032✔
1302

1303
    const scalar_type* p = blkctrl.req_const_dataptr();
511,032✔
1304
    ret                  = p[lt::abs_index<N>(pibidx, block.get_dims()).get_abs_index()];
511,032✔
1305
    blkctrl.ret_const_dataptr(p);
511,032✔
1306
  }
511,032✔
1307

1308
  // Transform value
1309
  tr.get_scalar_tr().apply(ret);
511,032✔
1310

1311
  ctrl.ret_const_block(idx_abscanon.get_index());
511,032✔
1312
  return ret;
511,032✔
1313
}
1,860,200✔
1314

1315
template <size_t N>
1316
bool TensorImpl<N>::is_element_allowed(const std::vector<size_t>& tidx) const {
72,972✔
1317
  lt::index<N> block_idx;
72,972✔
1318
  std::tie(block_idx, std::ignore) = assert_convert_tensor_index(*libtensor_ptr(), tidx);
72,972✔
1319

1320
  // Check if the block is allowed in the symmetry of the guess
1321
  lt::block_tensor_ctrl<N, scalar_type> ctrl(*libtensor_ptr());
72,972✔
1322
  lt::orbit<N, scalar_type> orb(ctrl.req_const_symmetry(), block_idx);
72,972✔
1323
  const bool block_allowed = orb.is_allowed();
72,972✔
1324

1325
  // TODO Even if the block is allowed, the index might not be
1326
  // (e.g. diagonal of an anti-symmetric tensor).
1327
  const bool index_allowed = true;
72,972✔
1328

1329
  return block_allowed && index_allowed;
72,972✔
1330
}
72,972✔
1331

1332
template <size_t N>
1333
std::vector<std::pair<std::vector<size_t>, scalar_type>> TensorImpl<N>::select_n_absmax(
18✔
1334
      size_t n, bool unique_by_symmetry) const {
1335
  return execute_select_n<lt::compare4absmax>(*libtensor_ptr(), n, unique_by_symmetry);
36✔
1336
}
1337

1338
template <size_t N>
1339
std::vector<std::pair<std::vector<size_t>, scalar_type>> TensorImpl<N>::select_n_absmin(
18✔
1340
      size_t n, bool unique_by_symmetry) const {
1341
  return execute_select_n<lt::compare4absmin>(*libtensor_ptr(), n, unique_by_symmetry);
36✔
1342
}
1343

1344
template <size_t N>
1345
std::vector<std::pair<std::vector<size_t>, scalar_type>> TensorImpl<N>::select_n_max(
12✔
1346
      size_t n, bool unique_by_symmetry) const {
1347
  return execute_select_n<lt::compare4max>(*libtensor_ptr(), n, unique_by_symmetry);
24✔
1348
}
1349

1350
template <size_t N>
1351
std::vector<std::pair<std::vector<size_t>, scalar_type>> TensorImpl<N>::select_n_min(
2,954✔
1352
      size_t n, bool unique_by_symmetry) const {
1353
  return execute_select_n<lt::compare4min>(*libtensor_ptr(), n, unique_by_symmetry);
5,908✔
1354
}
1355

1356
template <size_t N>
1357
void TensorImpl<N>::export_to(scalar_type* memptr, size_t size) const {
49,742✔
1358
  if (this->size() != size) {
49,742✔
1359
    throw invalid_argument("The memory provided (== " + std::to_string(size) +
×
1360
                           ") does not agree with the number of tensor elements (== " +
1361
                           std::to_string(this->size()) + ")");
1362
  }
1363
  lt::btod_export<N>(*libtensor_ptr()).perform(memptr);
49,742✔
1364
}
49,742✔
1365

1366
template <size_t N>
1367
void TensorImpl<N>::import_from(const scalar_type* memptr, size_t size,
8,460✔
1368
                                scalar_type tolerance, bool symmetry_check) {
1369
  if (this->size() != size) {
8,460✔
1370
    throw invalid_argument("The memory size provided (== " + std::to_string(size) +
×
1371
                           ") does not agree with the number of tensor elements (== " +
1372
                           std::to_string(this->size()) + ")");
1373
  }
1374

1375
  if (symmetry_check) {
8,460✔
1376
    // Slow algorithm with symmetry check (via libtensor)
1377

1378
    // Zero this memory.
1379
    // TODO Check this really needs to be done for proper functioning
1380
    //      There is some indication that yes, unfortunately.
1381
    lt::btod_set<N>(0.0).perform(*libtensor_ptr());
8,460✔
1382

1383
    scalar_type* noconst_mem = const_cast<scalar_type*>(memptr);
8,460✔
1384
    libtensor::btod_import_raw<N>(noconst_mem, libtensor_ptr()->get_bis().get_dims(),
16,920✔
1385
                                  tolerance)
1386
          .perform(*libtensor_ptr());
8,460✔
1387
  } else {
1388
    // Fast algorithm without symmetry check (via import_from with generator)
1389
    auto fast_importer = [this, memptr](
×
1390
                               const std::vector<std::pair<size_t, size_t>>& range,
1391
                               scalar_type* ptr) {
1392
      if (range.size() != N) {
×
1393
        throw runtime_error("Internal error: Dimension mismatch in fast_importer");
×
1394
      }
1395

1396
      // Strides for accessing the full memptr
1397
      std::array<size_t, N> strides_full;
1398
      strides_full[N - 1] = 1;
×
1399
      for (size_t idim = N - 1; idim > 0; --idim) {
×
1400
        strides_full[idim - 1] = strides_full[idim] * shape()[idim];
×
1401
      }
1402

1403
      // Dimensionalities for access into the small ptr
1404
      std::array<size_t, N> dim_ptr;
1405
      for (size_t idim = 0; idim != N; ++idim) {
×
1406
        dim_ptr[idim] = range[idim].second - range[idim].first;
×
1407
      }
1408

1409
      // Strides for accessing the small ptr
1410
      std::array<size_t, N> strides_ptr;
1411
      strides_ptr[N - 1] = 1;
×
1412
      for (size_t idim = N - 1; idim > 0; --idim) {
×
1413
        strides_ptr[idim - 1] = strides_ptr[idim] * dim_ptr[idim];
×
1414
      }
1415
      const size_t size_ptr = strides_ptr[0] * dim_ptr[0];
×
1416

1417
      for (size_t iabs = 0; iabs < size_ptr; ++iabs) {
×
1418
        size_t full_abs = 0;
×
1419
        for (size_t idim = 0; idim != N; ++idim) {
×
1420
          size_t idx = range[idim].first + (iabs / strides_ptr[idim]) % dim_ptr[idim];
×
1421
          full_abs += strides_full[idim] * idx;
×
1422
        }
1423
        ptr[iabs] = memptr[full_abs];
×
1424
      }
1425
    };
1426
    import_from(fast_importer, tolerance, false);
×
1427
  }
1428
}
8,460✔
1429

1430
template <size_t N>
1431
void TensorImpl<N>::import_from(
10,912✔
1432
      std::function<void(const std::vector<std::pair<size_t, size_t>>&, scalar_type*)>
1433
            generator,
1434
      scalar_type tolerance, bool symmetry_check) {
1435

1436
  if (symmetry_check) {
10,912✔
1437
    // Slow algorithm with symmetry check (via import_from with libtensor)
1438

1439
    // Get complete data via generator ...
1440
    std::vector<std::pair<size_t, size_t>> full_range(N);
×
1441
    for (size_t i = 0; i < N; ++i) {
×
1442
      full_range[i].first  = 0;
×
1443
      full_range[i].second = shape()[i];
×
1444
    }
1445
    std::vector<scalar_type> data(this->size());
×
1446
    generator(full_range, data.data());
×
1447

1448
    // ... and import it
1449
    import_from(data, tolerance, true);
×
1450
  } else {
×
1451
    // Fast algorithm by only considering canonical blocks
1452
    // First zero out the tensor completely.
1453
    lt::block_tensor_ctrl<N, scalar_type> ctrl(*libtensor_ptr());
10,912✔
1454
    ctrl.req_zero_all_blocks();
10,912✔
1455

1456
    // Now loop over all orbits, which thus gives access to all
1457
    // canonical blocks
1458
    lt::orbit_list<N, scalar_type> orbitlist(ctrl.req_const_symmetry());
10,912✔
1459
    lt::index<N> blk_idx;  // Holder for canonical-block indices
10,912✔
1460
    for (auto it = orbitlist.begin(); it != orbitlist.end(); ++it) {
53,260✔
1461
      orbitlist.get_index(it, blk_idx);
42,348✔
1462

1463
      // Get the dense tensor for the current canonical block
1464
      // and import the data from the generator
1465
      lt::dense_tensor_wr_i<N, scalar_type>& blk = ctrl.req_block(blk_idx);
42,348✔
1466

1467
      // Compute range to import
1468
      std::vector<std::pair<size_t, size_t>> range(N);
42,348✔
1469
      const lt::block_index_space<N>& bis = libtensor_ptr()->get_bis();
42,348✔
1470
      lt::index<N> blk_start(bis.get_block_start(blk_idx));
42,348✔
1471
      lt::dimensions<N> blk_dims(bis.get_block_dims(blk_idx));
42,348✔
1472
      for (size_t i = 0; i < N; ++i) {
184,792✔
1473
        range[i].first  = blk_start[i];
142,444✔
1474
        range[i].second = blk_start[i] + blk_dims[i];
142,444✔
1475
      }
1476

1477
      bool all_zero = false;
42,348✔
1478
      {
1479
        lt::dense_tensor_wr_ctrl<N, scalar_type> cblk(blk);
42,348✔
1480
        cblk.req_prefetch();
42,348✔
1481
        scalar_type* ptr = cblk.req_dataptr();
42,348✔
1482

1483
        generator(range, ptr);  // Import data from the generator
42,348✔
1484

1485
        if (tolerance > 0) {
42,348✔
1486
          // Check that some are non-zero
1487
          all_zero = std::all_of(
42,348✔
1488
                ptr, ptr + blk_dims.get_size(),
42,348✔
1489
                [tolerance](scalar_type e) { return std::abs(e) < tolerance; });
2,279,190✔
1490
        }
1491

1492
        cblk.ret_dataptr(ptr);
42,348✔
1493
      }
42,348✔
1494
      ctrl.ret_block(blk_idx);
42,348✔
1495

1496
      if (all_zero) {
42,348✔
1497
        ctrl.req_zero_block(blk_idx);
20,196✔
1498
      }
1499
    }
1500
  }
10,912✔
1501
}
10,912✔
1502

1503
template <size_t N>
1504
std::string TensorImpl<N>::describe_symmetry() const {
56✔
1505
  std::stringstream ss;
56✔
1506

1507
  lt::block_tensor_ctrl<N, scalar_type> ctrl(*libtensor_ptr());
56✔
1508
  ss << ctrl.req_const_symmetry();
56✔
1509

1510
  return ss.str();
112✔
1511
}
56✔
1512

1513
template <size_t N>
1514
std::string TensorImpl<N>::describe_expression(std::string stage) const {
×
1515
  if (needs_evaluation()) {
×
1516
    std::stringstream ss;
×
1517
    if (stage == "unoptimised") {
×
1518
      ss << *m_expr_ptr;
×
1519
    } else if (stage == "optimised") {
×
1520
      lt::expr::print_tree(m_expr_ptr->optimised_tree(), ss, 2);
×
1521
    } else if (stage == "evaluation") {
×
1522
      auto newtensor_ptr =
×
1523
            std::make_shared<lt::btensor<N, scalar_type>>(as_bispace<N>(m_axes));
×
1524
      lt::expr::expr_tree evaltree =
×
1525
            m_expr_ptr->evaluation_tree(*newtensor_ptr, /* add = */ false);
×
1526
      lt::expr::print_tree(evaltree, ss, 2);
×
1527
    } else {
×
1528
      throw invalid_argument("Stage " + stage +
×
1529
                             " not valid for describe_expression. Try 'unoptimised', "
1530
                             "'optimised', 'evaluation' or 'evaluation'");
1531
    }
1532
    return ss.str();
×
1533
  } else {
×
1534
    return "btensor of shape " + shape_to_string(m_shape);
×
1535
  }
1536
}
1537

1538
template <size_t N>
1539
std::shared_ptr<ExpressionTree> TensorImpl<N>::expression_ptr() const {
2,441,632✔
1540
  if (m_expr_ptr != nullptr) {
2,441,632✔
1541
    if (m_libtensor_ptr != nullptr) {
1,466,070✔
1542
      throw runtime_error(
×
1543
            "Internal error: m_libtensor_ptr is not a nullptr and neither is "
1544
            "m_expr_ptr.");
1545
    }
1546
    return m_expr_ptr;  // Already have an expression for myself, just return it
1,466,070✔
1547
  } else {
1548
    if (m_libtensor_ptr == nullptr) {
975,562✔
1549
      throw runtime_error(
×
1550
            "Internal error: Both m_libtensor_ptr and m_expr_ptr are nullptrs.");
1551
    }
1552

1553
    // Make a new expression tree with just this tensor in it
1554
    return std::make_shared<ExpressionTree>(
2,926,686✔
1555
          lt::expr::node_ident_any_tensor<N, scalar_type>(*m_libtensor_ptr),
1,951,124✔
1556
          identity_permutation(N), std::vector<std::shared_ptr<void>>{m_libtensor_ptr});
2,926,686✔
1557
  }
1558
}
975,562✔
1559

1560
std::shared_ptr<ExpressionTree> as_expression(const std::shared_ptr<Tensor>& tensor) {
781,727✔
1561
  std::shared_ptr<ExpressionTree> ret;
781,727✔
1562
  if (tensor->ndim() == 1) {
781,727✔
1563
    ret = std::static_pointer_cast<TensorImpl<1>>(tensor)->expression_ptr();
7,052✔
1564
  } else if (tensor->ndim() == 2) {
774,675✔
1565
    ret = std::static_pointer_cast<TensorImpl<2>>(tensor)->expression_ptr();
335,993✔
1566
  } else if (tensor->ndim() == 3) {
438,682✔
1567
    ret = std::static_pointer_cast<TensorImpl<3>>(tensor)->expression_ptr();
×
1568
  } else if (tensor->ndim() == 4) {
438,682✔
1569
    ret = std::static_pointer_cast<TensorImpl<4>>(tensor)->expression_ptr();
438,666✔
1570
  } else if (tensor->ndim() == 5) {
16✔
NEW
1571
    ret = std::static_pointer_cast<TensorImpl<5>>(tensor)->expression_ptr();
×
1572
  } else if (tensor->ndim() == 6) {
16✔
1573
    ret = std::static_pointer_cast<TensorImpl<6>>(tensor)->expression_ptr();
16✔
1574
  } else {
NEW
1575
    throw not_implemented_error("Only implemented for dimensionality <= 6.");
×
1576
  }
1577

1578
  if (ret->permutation.size() != tensor->ndim()) {
781,727✔
1579
    throw runtime_error("Internal error: Mismatch between permutation.size() == " +
×
1580
                        std::to_string(ret->permutation.size()) +
×
1581
                        " and tensor dimensionality " + std::to_string(tensor->ndim()) +
×
1582
                        ".");
×
1583
  }
1584

1585
  return ret;
781,727✔
1586
}
×
1587

1588
template <size_t N>
1589
lt::btensor<N, scalar_type>& as_btensor(const std::shared_ptr<Tensor>& in) {
1,903,243✔
1590
  if (N != in->ndim()) {
1,903,243✔
1591
    throw dimension_mismatch("Requested dimensionality " + std::to_string(N) +
×
1592
                             ", but passed tensor has dimensionality " +
1593
                             std::to_string(in->ndim()));
1594
  }
1595
  return static_cast<lt::btensor<N, scalar_type>&>(
1,903,243✔
1596
        *std::static_pointer_cast<TensorImpl<N>>(in));
3,806,486✔
1597
}
1598

1599
template <size_t N>
1600
std::shared_ptr<lt::btensor<N, scalar_type>> as_btensor_ptr(
×
1601
      const std::shared_ptr<Tensor>& in) {
1602
  if (N != in->ndim()) {
×
1603
    throw dimension_mismatch("Requested dimensionality " + std::to_string(N) +
×
1604
                             ", but passed tensor has dimensionality " +
1605
                             std::to_string(in->ndim()));
1606
  }
1607
  return std::static_pointer_cast<TensorImpl<N>>(in)->libtensor_ptr();
×
1608
}
1609

1610
namespace {
1611
template <size_t N>
1612
std::shared_ptr<Tensor> make_tensor_inner(std::shared_ptr<Symmetry> symmetry) {
51,510✔
1613
  auto ltsym_ptr = as_lt_symmetry<N>(*symmetry);
51,510✔
1614
  auto newtensor_ptr =
51,510✔
1615
        std::make_shared<lt::btensor<N, scalar_type>>(ltsym_ptr->get_bis());
1616

1617
  lt::block_tensor_ctrl<N, scalar_type> ctrl_to(*newtensor_ptr);
51,510✔
1618
  lt::so_copy<N, scalar_type>(*ltsym_ptr).perform(ctrl_to.req_symmetry());
51,510✔
1619
  return std::make_shared<TensorImpl<N>>(symmetry->adcmem_ptr(), symmetry->axes(),
1620
                                         std::move(newtensor_ptr));
103,020✔
1621
}
51,510✔
1622
}  // namespace
1623

1624
std::shared_ptr<Tensor> make_tensor(std::shared_ptr<Symmetry> symmetry) {
51,510✔
1625
  if (symmetry->ndim() == 1) {
51,510✔
1626
    return make_tensor_inner<1>(symmetry);
1,036✔
1627
  } else if (symmetry->ndim() == 2) {
50,474✔
1628
    return make_tensor_inner<2>(symmetry);
33,574✔
1629
  } else if (symmetry->ndim() == 3) {
16,900✔
1630
    return make_tensor_inner<3>(symmetry);
×
1631
  } else if (symmetry->ndim() == 4) {
16,900✔
1632
    return make_tensor_inner<4>(symmetry);
16,898✔
1633
  } else if (symmetry->ndim() == 5) {
2✔
NEW
1634
    return make_tensor_inner<5>(symmetry);
×
1635
  } else if (symmetry->ndim() == 6) {
2✔
1636
    return make_tensor_inner<6>(symmetry);
2✔
1637
  } else {
NEW
1638
    throw not_implemented_error("Only implemented for dimensionality <= 6.");
×
1639
  }
1640
}
1641

1642
std::shared_ptr<Tensor> make_tensor(std::shared_ptr<const AdcMemory> adcmem_ptr,
32✔
1643
                                    std::vector<AxisInfo> axes) {
1644
  if (axes.size() == 1) {
32✔
1645
    return std::make_shared<TensorImpl<1>>(adcmem_ptr, axes);
×
1646
  } else if (axes.size() == 2) {
32✔
1647
    return std::make_shared<TensorImpl<2>>(adcmem_ptr, axes);
32✔
1648
  } else if (axes.size() == 3) {
×
1649
    return std::make_shared<TensorImpl<3>>(adcmem_ptr, axes);
×
1650
  } else if (axes.size() == 4) {
×
1651
    return std::make_shared<TensorImpl<4>>(adcmem_ptr, axes);
×
NEW
1652
  } else if (axes.size() == 5) {
×
NEW
1653
    return std::make_shared<TensorImpl<5>>(adcmem_ptr, axes);
×
NEW
1654
  } else if (axes.size() == 6) {
×
NEW
1655
    return std::make_shared<TensorImpl<6>>(adcmem_ptr, axes);
×
1656
  } else {
NEW
1657
    throw not_implemented_error("Only implemented for dimensionality <= 6.");
×
1658
  }
1659
}
1660

1661
//
1662
// Explicit instantiation
1663
//
1664

1665
#define INSTANTIATE(DIM)                                                                 \
1666
  template class TensorImpl<DIM>;                                                        \
1667
                                                                                         \
1668
  template lt::btensor<DIM, scalar_type>& as_btensor(const std::shared_ptr<Tensor>& in); \
1669
                                                                                         \
1670
  template std::shared_ptr<lt::btensor<DIM, scalar_type>> as_btensor_ptr(                \
1671
        const std::shared_ptr<Tensor>& in);
1672

1673
INSTANTIATE(1)
1674
INSTANTIATE(2)
1675
INSTANTIATE(3)
1676
INSTANTIATE(4)
1677
INSTANTIATE(5)
1678
INSTANTIATE(6)
1679

1680
#undef INSTANTIATE
1681

1682
}  // namespace libadcc
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc