• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

adc-connect / adcc / 4899696005

pending completion
4899696005

push

github

Maximilian Scheurer
Bump version: 0.15.15 → 0.15.16

1488 of 2311 branches covered (64.39%)

Branch coverage included in aggregate %.

1 of 1 new or added line in 1 file covered. (100.0%)

7144 of 9343 relevant lines covered (76.46%)

291077.06 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

80.54
/libadcc/TensorImpl.cc
1
//
2
// Copyright (C) 2020 by the adcc authors
3
//
4
// This file is part of adcc.
5
//
6
// adcc is free software: you can redistribute it and/or modify
7
// it under the terms of the GNU General Public License as published
8
// by the Free Software Foundation, either version 3 of the License, or
9
// (at your option) any later version.
10
//
11
// adcc is distributed in the hope that it will be useful,
12
// but WITHOUT ANY WARRANTY; without even the implied warranty of
13
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14
// GNU General Public License for more details.
15
//
16
// You should have received a copy of the GNU General Public License
17
// along with adcc. If not, see <http://www.gnu.org/licenses/>.
18
//
19

20
#include "TensorImpl.hh"
21
#include "TensorImpl/as_bispace.hh"
22
#include "TensorImpl/as_lt_symmetry.hh"
23
#include "TensorImpl/get_block_starts.hh"
24
#include "shape_to_string.hh"
25

26
// Change visibility of libtensor singletons to public
27
#pragma GCC visibility push(default)
28
#include <libtensor/block_tensor/btod_add.h>
29
#include <libtensor/block_tensor/btod_copy.h>
30
#include <libtensor/block_tensor/btod_dotprod.h>
31
#include <libtensor/block_tensor/btod_export.h>
32
#include <libtensor/block_tensor/btod_random.h>
33
#include <libtensor/block_tensor/btod_select.h>
34
#include <libtensor/block_tensor/btod_set.h>
35
#include <libtensor/block_tensor/btod_set_diag.h>
36
#include <libtensor/block_tensor/btod_set_elem.h>
37
#include <libtensor/symmetry/print_symmetry.h>
38
#pragma GCC visibility pop
39

40
namespace libadcc {
41
namespace lt = libtensor;
42

43
#define DIMENSIONALITY_CHECK(OTHER)                                                    \
44
  {                                                                                    \
45
    if (ndim() != OTHER->ndim()) {                                                     \
46
      throw dimension_mismatch(                                                        \
47
            "Dimensionality of this tensor (" + std::to_string(ndim()) +               \
48
            ") does not agree with the dimensionality of the other tensor"             \
49
            " passed, which has dimensionality " +                                     \
50
            std::to_string(OTHER->ndim()) + ".");                                      \
51
    }                                                                                  \
52
    if (shape() != OTHER->shape()) {                                                   \
53
      throw dimension_mismatch("Shape of this tensor (" + shape_to_string(shape()) +   \
54
                               ") does not agree with the shape of the other tensor" + \
55
                               " passed, which has shape " +                           \
56
                               shape_to_string(OTHER->shape()) + ".");                 \
57
    }                                                                                  \
58
    if (axes() != OTHER->axes()) {                                                     \
59
      throw dimension_mismatch("Axes of this tensor (" + axes_to_string(axes()) +      \
60
                               ") do not agree with the axes of the other tensor "     \
61
                               "passed, which has axis labels " +                      \
62
                               axes_to_string(OTHER->axes()) + ".");                   \
63
    }                                                                                  \
64
  }
65

66
namespace {
67
std::string axes_to_string(const std::vector<AxisInfo>& axes) {
×
68
  std::string res = "";
×
69
  for (auto& ax : axes) res.append(ax.label);
×
70
  return res;
×
71
}
×
72

73
/** Build an identity permutation of length n */
74
std::vector<size_t> identity_permutation(size_t n) {
1,340,230✔
75
  std::vector<size_t> permutation;
1,340,230✔
76
  for (size_t i = 0; i < n; ++i) {
5,364,246✔
77
    permutation.push_back(i);
4,024,016✔
78
  }
1,335,844✔
79
  return permutation;
1,340,230✔
80
}
444,957✔
81

82
template <size_t N>
83
lt::expr::label<N> strip_safe(const std::vector<std::shared_ptr<const lt::letter>>& in) {
2,524,700✔
84
  std::vector<const lt::letter*> label_unsafe;
2,524,700✔
85
  for (size_t i = 0; i < in.size(); ++i) {
9,416,703✔
86
    label_unsafe.push_back(in[i].get());
6,892,003✔
87
  }
2,285,775✔
88
  return lt::expr::label<N>(label_unsafe);
4,211,806✔
89
}
2,524,700✔
90

91
template <size_t N>
92
std::vector<size_t> extract_expr_permutation(
1,731,569✔
93
      const lt::expr::expr_rhs<N, scalar_type>& expr,
94
      const std::vector<std::shared_ptr<const lt::letter>>& label) {
95
  std::vector<size_t> permutation;
1,731,569✔
96
  lt::permutation<N> perm = expr.get_label().permutation_of(strip_safe<N>(label));
1,731,569✔
97
  perm.invert();
1,731,569✔
98
  for (size_t i = 0; i < N; ++i) {
6,916,168✔
99
    permutation.push_back(perm[i]);
5,184,599✔
100
  }
1,719,023✔
101
  return permutation;
2,888,963✔
102
}
574,175✔
103

104
/** Merge two keepalive lists */
105
std::vector<std::shared_ptr<void>> merge(std::vector<std::shared_ptr<void>> lhs,
1,067,821✔
106
                                         const std::vector<std::shared_ptr<void>>& rhs) {
107
  for (auto& ptr : rhs) lhs.push_back(ptr);
2,980,348✔
108
  return lhs;
1,067,821✔
109
}
110

111
std::vector<std::shared_ptr<const lt::letter>> make_label(size_t n) {
2,331,104✔
112
  std::vector<std::shared_ptr<const lt::letter>> ret;
2,331,104✔
113
  for (size_t i = 0; i < n; ++i) {
9,407,165✔
114
    ret.push_back(std::make_shared<lt::letter>());
7,076,061✔
115
  }
2,347,657✔
116
  return ret;
2,331,104✔
117
}
773,426✔
118

119
template <size_t M, size_t N>
120
std::pair<lt::expr::label<M>, lt::expr::label<M>> parse_permutation(
187,509✔
121
      const std::vector<AxisInfo>& axes, const lt::expr::label<N>& label,
122
      const std::vector<std::vector<size_t>>& permutations) {
123
  std::vector<const lt::letter*> set1;
187,509✔
124
  std::vector<const lt::letter*> set2;
187,509✔
125

126
  std::vector<size_t> processed_indices;
187,509✔
127
  for (const auto& perm : permutations) {
393,769✔
128
    if (perm.size() < 2) {
206,260✔
129
      throw invalid_argument("A permutation tuple has to have 2 or more indices.");
×
130
    } else if (perm.size() == 2) {
206,260✔
131
      if (perm[0] == perm[1]) {
206,260✔
132
        throw invalid_argument(
×
133
              "A permutation tuple cannot have duplicate indices. Here " +
134
              std::to_string(perm[0]) + " is a duplicate.");
×
135
      }
136
      auto find_0 =
137
            std::find(processed_indices.begin(), processed_indices.end(), perm[0]);
206,260✔
138
      auto find_1 =
139
            std::find(processed_indices.begin(), processed_indices.end(), perm[1]);
206,260✔
140

141
      if (find_0 != processed_indices.end() or find_1 != processed_indices.end()) {
206,260✔
142
        throw invalid_argument(
×
143
              "Provided index tuples in a permutation list have to be disjoint.");
144
      }
145
      if (perm[0] >= N || perm[1] >= N) {
206,260✔
146
        throw invalid_argument(
×
147
              "Index in permutation list cannot be larger than dimension.");
148
      }
149
      if (axes[perm[0]] != axes[perm[1]]) {
206,260✔
150
        throw invalid_argument(
×
151
              "(Anti)-Symmetrisation can only be performed over equivalent axes (not '" +
152
              axes[perm[0]].label + "' and '" + axes[perm[1]].label + "').");
×
153
      }
154

155
      set1.push_back(&label.letter_at(perm[0]));
206,260✔
156
      set2.push_back(&label.letter_at(perm[1]));
206,260✔
157

158
      processed_indices.push_back(perm[0]);
206,260✔
159
      processed_indices.push_back(perm[1]);
206,260✔
160
    } else {
68,331✔
161
      throw not_implemented_error(
×
162
            "Permutations for tuple length larger 2 not implemented.");
163
    }
164
  }
165
  return {lt::expr::label<M>(set1), lt::expr::label<M>(set2)};
312,879✔
166
}
187,509✔
167

168
template <size_t N, typename T>
169
std::pair<lt::index<N>, lt::index<N>> assert_convert_tensor_index(
2,887,971✔
170
      lt::btensor<N, T>& tensor, const std::vector<size_t>& idx) {
171
  if (idx.size() != N) {
2,887,971✔
172
    throw dimension_mismatch("Tensor is of dimension " + std::to_string(N) +
×
173
                             ", but passed index has a dimennsion of " +
174
                             std::to_string(idx.size()) + ".");
175
  }
176
  const lt::dimensions<N>& dims = tensor.get_bis().get_dims();
2,887,971✔
177
  for (size_t i = 0; i < N; ++i) {
14,343,681✔
178
    if (idx[i] >= dims[i]) {
11,455,710✔
179
      throw invalid_argument("Passed index " + shape_to_string(idx) +
×
180
                             " overshoots Tensor at dimension " + std::to_string(i) +
181
                             " (with extent: " + std::to_string(dims[i]) + ")");
182
    }
183
  }
3,818,570✔
184

185
  lt::index<N> block_idx;
2,887,971✔
186
  for (size_t idim = 0; idim < N; ++idim) {
12,438,899✔
187
    // Find splits
188
    const size_t dim_type     = tensor.get_bis().get_type(idim);
13,360,492✔
189
    const lt::split_points sp = tensor.get_bis().get_splits(dim_type);
13,360,492✔
190
    block_idx[idim]           = 0;
13,360,492✔
191
    for (size_t isp = 0; isp < sp.get_num_points(); ++isp) {
17,170,101✔
192
      if (sp[isp] > idx[idim]) break;
13,360,537✔
193
      block_idx[idim] = isp + 1;
7,619,173✔
194
    }
1,904,797✔
195
  }
5,723,352✔
196

197
  lt::index<N> inblock_idx;
4,772,221✔
198
  lt::index<N> bstart    = tensor.get_bis().get_block_start(block_idx);
4,772,221✔
199
  lt::dimensions<N> bdim = tensor.get_bis().get_block_dims(block_idx);
4,772,221✔
200
  for (size_t idim = 0; idim < N; ++idim) {
16,227,931✔
201
    inblock_idx[idim] = idx[idim] - bstart[idim];
11,455,710✔
202
    if (inblock_idx[idim] >= bdim[idim]) {
11,455,710✔
203
      throw runtime_error(
×
204
            "Internal error: Determined in-block index overshoots block dimensionality");
205
    }
206
  }
3,818,570✔
207

208
  return std::make_pair(block_idx, inblock_idx);
6,697,535✔
209
}
3,809,564✔
210

211
template <typename Comparator, size_t N>
212
std::vector<std::pair<std::vector<size_t>, scalar_type>> execute_select_n(
1,695✔
213
      lt::btensor<N, scalar_type>& tensor, size_t n, bool unique_by_symmetry) {
214
  using btod_select_t = lt::btod_select<N, Comparator>;
215
  std::list<lt::block_tensor_element<N, scalar_type>> il;
1,695✔
216
  if (!unique_by_symmetry) {
1,695✔
217
    lt::symmetry<N, scalar_type> nosym(tensor.get_bis());
×
218
    btod_select_t(tensor, nosym).perform(il, n);
×
219
  } else {
×
220
    btod_select_t(tensor).perform(il, n);
1,695✔
221
  }
222

223
  std::vector<std::pair<std::vector<size_t>, scalar_type>> ret;
1,695✔
224
  for (auto it = il.begin(); it != il.end(); ++it) {
30,354✔
225
    std::vector<size_t> fidx(N);
28,659✔
226
    const lt::index<N> bstart = tensor.get_bis().get_block_start(it->get_block_index());
28,659✔
227
    for (size_t i = 0; i < N; ++i) {
85,977✔
228
      fidx[i] = bstart[i] + it->get_in_block_index()[i];
57,318✔
229
    }
19,106✔
230
    ret.emplace_back(fidx, it->get_value());
28,659✔
231
  }
9,553✔
232
  return ret;
2,825✔
233
}
1,695✔
234
}  // namespace
235

236
template <size_t N>
237
void TensorImpl<N>::check_state() const {
23,091,312✔
238
  if (m_expr_ptr == nullptr && m_libtensor_ptr == nullptr) {
23,091,312✔
239
    throw runtime_error(
×
240
          "Internal error: m_libtensor_ptr and m_expr_ptr cannot both be nullptr.");
241
  }
242
  if (m_expr_ptr != nullptr && m_libtensor_ptr != nullptr) {
23,091,312✔
243
    throw runtime_error(
×
244
          "Internal error: m_libtensor_ptr and m_expr_ptr cannot both be set pointers.");
245
  }
246

247
  if (N != ndim()) {
23,091,312✔
248
    throw runtime_error("Internal error: libtensor dimension (== " + std::to_string(N) +
×
249
                        ") and tensor dimension (==" + std::to_string(ndim()) +
250
                        ") differ.");
251
  }
252

253
  if (m_libtensor_ptr) {
23,091,312✔
254
    std::vector<size_t> btshape(N);
20,610,963✔
255
    const lt::dimensions<N>& dims = m_libtensor_ptr->get_bis().get_dims();
20,610,963✔
256
    for (size_t i = 0; i < N; ++i) {
89,818,873✔
257
      btshape[i] = dims.get_dim(i);
69,207,910✔
258
    }
23,080,302✔
259
    if (shape() != btshape) {
20,610,963✔
260
      throw runtime_error(
×
261
            "Internal error: libtensor shape (== " + shape_to_string(btshape) +
262
            ") and tensor shape (==" + shape_to_string(shape()) + ") differ.");
263
    }
264

265
    const std::vector<std::vector<size_t>> tensorblocks =
13,736,937✔
266
          get_block_starts(*m_libtensor_ptr);
20,610,963✔
267
    for (size_t i = 0; i < N; ++i) {
89,818,873✔
268
      if (axes()[i].block_starts != tensorblocks[i]) {
69,207,910✔
269
        throw runtime_error("Internal error: Block starts of btensor " +
×
270
                            shape_to_string(tensorblocks[i]) + " at dimension " +
×
271
                            std::to_string(i) +
272
                            " do not agree with the cached block sarts " +
273
                            shape_to_string(axes()[i].block_starts) + ".");
×
274
      }
275
    }
23,080,302✔
276
  }
20,610,963✔
277

278
  if (m_expr_ptr) {
23,091,312✔
279
    if (m_expr_ptr->permutation.size() != N) {
2,480,349✔
280
      throw runtime_error(
×
281
            "Internal error: Expression dimension (== " + std::to_string(N) +
282
            ") and tensor dimension (==" + std::to_string(ndim()) + ") differ.");
283
    }
284
  }
822,215✔
285
}
23,091,312✔
286

287
template <size_t N>
288
void TensorImpl<N>::reset_state(
562,869✔
289
      std::shared_ptr<lt::btensor<N, scalar_type>> libtensor_ptr) const {
290
  if (m_expr_ptr != nullptr && m_libtensor_ptr != nullptr) {
562,869✔
291
    throw runtime_error(
×
292
          "Internal error: m_libtensor_ptr and m_expr_ptr cannot both be set pointers.");
293
  }
294
  if (libtensor_ptr == nullptr) {
562,869✔
295
    throw runtime_error(
×
296
          "Internal error: libtensor_ptr to be used for reset_state is a nullptr.");
297
  }
298
  m_libtensor_ptr = libtensor_ptr;
562,869✔
299
  m_expr_ptr.reset();
562,869✔
300
  check_state();
562,869✔
301
}
562,869✔
302

303
template <size_t N>
304
void TensorImpl<N>::reset_state(std::shared_ptr<ExpressionTree> expr_ptr) const {
2,179,928✔
305
  if (m_expr_ptr != nullptr && m_libtensor_ptr != nullptr) {
2,179,928✔
306
    throw runtime_error(
×
307
          "Internal error: m_libtensor_ptr and m_expr_ptr cannot both be set pointers.");
308
  }
309
  if (expr_ptr == nullptr) {
2,179,928✔
310
    throw runtime_error(
×
311
          "Internal error: expr_ptr to be used for reset_state is a nullptr.");
312
  }
313
  m_expr_ptr = expr_ptr;
2,179,928✔
314
  m_libtensor_ptr.reset();
2,179,928✔
315
  check_state();
2,179,928✔
316
}
2,179,928✔
317

318
template <size_t N>
319
TensorImpl<N>::TensorImpl(std::shared_ptr<const AdcMemory> adcmem_ptr,
3,253,897✔
320
                          std::vector<AxisInfo> axes,
321
                          std::shared_ptr<lt::btensor<N, scalar_type>> libtensor_ptr,
322
                          std::shared_ptr<ExpressionTree> expr_ptr)
323
      : Tensor(adcmem_ptr, axes), m_libtensor_ptr(nullptr), m_expr_ptr(nullptr) {
3,253,897✔
324
  if (axes.size() != N) {
2,443,393✔
325
    throw invalid_argument("axes length (== " + std::to_string(axes.size()) +
×
326
                           ") does not agree with tensor dimensionality " +
327
                           std::to_string(N));
328
  }
329

330
  if (expr_ptr != nullptr && libtensor_ptr != nullptr) {
2,443,393✔
331
    throw invalid_argument("libtensor_ptr and expr_ptr cannot both be set pointers.");
×
332
  }
333
  if (expr_ptr == nullptr && libtensor_ptr == nullptr) {
2,443,393✔
334
    // Allocate an empty tensor.
335
    libtensor_ptr = std::make_shared<lt::btensor<N, scalar_type>>(as_bispace<N>(axes));
126✔
336
  }
42✔
337

338
  if (expr_ptr != nullptr) reset_state(expr_ptr);
2,443,393✔
339
  if (libtensor_ptr != nullptr) reset_state(libtensor_ptr);
2,443,393✔
340
}
3,253,897✔
341

342
template <size_t N>
343
void TensorImpl<N>::evaluate() const {
20,194,334✔
344
  check_state();
20,194,334✔
345
  if (!needs_evaluation()) return;
20,194,334✔
346

347
  // Allocate output tensor and evaluate
348
  auto newtensor_ptr =
200,269✔
349
        std::make_shared<lt::btensor<N, scalar_type>>(as_bispace<N>(m_axes));
299,404✔
350
  m_expr_ptr->evaluate_to(*newtensor_ptr, /* add = */ false);
299,404✔
351

352
  // Check and test new tensor, cleanup expression
353
  reset_state(newtensor_ptr);
299,404✔
354
}
6,935,536✔
355

356
template <size_t N>
357
std::shared_ptr<Tensor> TensorImpl<N>::empty_like() const {
154,181✔
358
  check_state();
154,181✔
359

360
  // TODO This evaluates the expression, which is probably an unexpected effect.
361

362
  // Create new btensor using the old bispace
363
  auto newtensor_ptr =
102,846✔
364
        std::make_shared<lt::btensor<N, scalar_type>>(libtensor_ptr()->get_bis());
154,181✔
365

366
  // Copy the symmetry over
367
  lt::block_tensor_ctrl<N, scalar_type> ctrl_to(*newtensor_ptr);
154,181✔
368
  lt::block_tensor_ctrl<N, scalar_type> ctrl_from(*libtensor_ptr());
154,181✔
369
  lt::so_copy<N, scalar_type>(ctrl_from.req_const_symmetry())
154,181✔
370
        .perform(ctrl_to.req_symmetry());
154,181✔
371

372
  // Enwrap inside TensorImpl and return
373
  return std::make_shared<TensorImpl<N>>(m_adcmem_ptr, m_axes, std::move(newtensor_ptr));
257,027✔
374
}
154,181✔
375

376
template <size_t N>
377
std::shared_ptr<Tensor> TensorImpl<N>::nosym_like() const {
78✔
378
  return std::make_shared<TensorImpl<N>>(m_adcmem_ptr, m_axes);
78✔
379
}
380

381
template <size_t N>
382
void TensorImpl<N>::set_mask(std::string mask, scalar_type value) {
253,952✔
383
  if (N != mask.size()) {
253,952✔
384
    throw invalid_argument("The number of characters in the index mask (== " + mask +
×
385
                           ") does not agree with the Tensor dimensionality (== " +
386
                           std::to_string(N) + ")");
387
  }
388

389
  // Non-obviously the indices for the mask have to start with 1,
390
  // 0 gives utterly weird values
391
  size_t next_idx = 1;
253,952✔
392
  std::map<char, size_t> char_to_idx;
253,952✔
393
  lt::sequence<N, size_t> seq(0);
253,952✔
394
  for (size_t i = 0; i < mask.size(); ++i) {
919,544✔
395
    const char c  = mask[i];
665,592✔
396
    const auto it = char_to_idx.find(c);
665,592✔
397
    if (it == char_to_idx.end()) {
665,592✔
398
      char_to_idx[c] = next_idx;
661,848✔
399
      seq[i]         = next_idx;
661,848✔
400
      next_idx += 1;
661,848✔
401
    } else {
220,500✔
402
      seq[i] = it->second;
3,744✔
403
    }
404
  }
221,748✔
405

406
  if (char_to_idx.size() == N) {
253,952✔
407
    // Every character in the mask is different ... just use bto_set
408
    // TODO Optimise: This evaluates, but here there is no point ... Just allocate
409
    lt::btod_set<N>(value).perform(*libtensor_ptr());
250,208✔
410
  } else {
83,364✔
411
    lt::btod_set_diag<N>(seq, value).perform(*libtensor_ptr());
3,744✔
412
  }
413
}
253,952✔
414

415
template <size_t N>
416
void TensorImpl<N>::set_random() {
609✔
417
  // TODO optimise: No point in evaluating ... just allocate
418
  lt::btod_random<N>().perform(*libtensor_ptr());
609✔
419
}
609✔
420

421
namespace {
422

423
template <size_t R, size_t D, size_t N>
424
std::shared_ptr<Tensor> execute_diagonal(
16,116✔
425
      std::shared_ptr<const AdcMemory> adcmem_ptr,
426
      const std::vector<std::shared_ptr<const lt::letter>>& label_result,
427
      const std::vector<std::shared_ptr<const lt::letter>>& label_diag,
428
      const std::vector<std::shared_ptr<const lt::letter>>& label_expr,
429
      std::shared_ptr<ExpressionTree> expr, std::vector<AxisInfo> axes) {
430
  auto lthis    = expr->attach_letters<N>(label_expr);
16,116✔
431
  auto res      = lt::expr::diag(*label_diag[0], strip_safe<D>(label_diag), lthis);
16,116✔
432
  auto expr_ptr = std::make_shared<ExpressionTree>(
16,116✔
433
        res.get_expr(), extract_expr_permutation(res, label_result), expr->keepalives);
16,116✔
434
  return std::make_shared<TensorImpl<R>>(adcmem_ptr, axes, expr_ptr);
26,860✔
435
}
16,116✔
436

437
}  // namespace
438

439
template <size_t N>
440
std::shared_ptr<Tensor> TensorImpl<N>::diagonal(std::vector<size_t> axes) {
16,116✔
441
  if (axes.size() <= 1) {
16,116✔
442
    throw invalid_argument("Axes needs to have at least two entries.");
×
443
  }
444
  auto label                                = make_label(N);
16,116✔
445
  std::shared_ptr<ExpressionTree> expr_this = expression_ptr();
16,116✔
446

447
  std::vector<std::shared_ptr<const lt::letter>> diag;
16,116✔
448
  std::unique_ptr<AxisInfo> diagaxis_ptr;
16,116✔
449
  std::vector<size_t> used_indices;
16,116✔
450
  for (size_t& i : axes) {
48,348✔
451
    auto it = std::find(used_indices.begin(), used_indices.end(), i);
32,232✔
452
    if (it != used_indices.end()) {
32,232✔
453
      throw invalid_argument("Axes may not have repeated indices.");
×
454
    }
455
    if (i >= N) {
32,232✔
456
      throw invalid_argument("Axis index (== " + std::to_string(i) +
×
457
                             ") goes beyond dimensionality of tensor (" +
458
                             std::to_string(N) + ")");
459
    }
460
    if (diagaxis_ptr == nullptr) {
32,232✔
461
      diagaxis_ptr.reset(new AxisInfo(m_axes[i]));
16,116✔
462
    } else {
5,372✔
463
      if (*diagaxis_ptr != m_axes[i]) {
16,116✔
464
        throw invalid_argument("Cannot form diagonal over differing axes. " +
×
465
                               diagaxis_ptr->label + " versus " + m_axes[i].label + ".");
×
466
      }
467
    }
468

469
    diag.push_back(label[i]);
32,232✔
470
    used_indices.push_back(i);
32,232✔
471
  }
472

473
  // Collect letters, which are to be left unchanged.
474
  std::vector<AxisInfo> axes_result;
16,116✔
475
  std::vector<std::shared_ptr<const lt::letter>> label_result;
16,116✔
476
  for (size_t i = 0; i < N; ++i) {
60,018✔
477
    auto it = std::find(used_indices.begin(), used_indices.end(), i);
43,902✔
478
    if (it == used_indices.end()) {
43,902✔
479
      label_result.push_back(label[i]);
11,670✔
480
      axes_result.push_back(m_axes[i]);
11,670✔
481
    }
3,890✔
482
  }
14,634✔
483
  label_result.push_back(diag[0]);
16,116✔
484
  axes_result.push_back(*diagaxis_ptr);
16,116✔
485

486
#define IF_MATCHES_EXECUTE(NTHIS, DIAG)                                            \
487
  if (N == NTHIS && DIAG == diag.size()) {                                         \
488
    constexpr size_t DIMOUT = NTHIS - DIAG + 1;                                    \
489
    static_assert((DIMOUT > 0 && DIMOUT < 100),                                    \
490
                  "Internal error with DIMOUT computation");                       \
491
    return execute_diagonal<DIMOUT, DIAG, NTHIS>(m_adcmem_ptr, label_result, diag, \
492
                                                 label, expr_this, axes_result);   \
493
  }
494

495
  IF_MATCHES_EXECUTE(2, 2)  //
8,340✔
496
  IF_MATCHES_EXECUTE(3, 2)  //
3,882✔
497
  IF_MATCHES_EXECUTE(3, 3)  //
×
498
  IF_MATCHES_EXECUTE(4, 2)  //
3,894✔
499
  IF_MATCHES_EXECUTE(4, 3)  //
×
500
  IF_MATCHES_EXECUTE(4, 3)  //
×
501

502
  throw not_implemented_error("diagonal not implemented for dimensionality " +
×
503
                              std::to_string(N) + " and " + std::to_string(diag.size()) +
504
                              " axes indices.");
505

506
#undef IF_MATCHES_EXECUTE
507
}
16,116✔
508

509
template <size_t N>
510
std::shared_ptr<Tensor> TensorImpl<N>::scale(scalar_type c) const {
460,123✔
511
  // Collect labelled expressions
512
  std::vector<std::shared_ptr<const lt::letter>> label = make_label(N);
460,123✔
513
  std::shared_ptr<ExpressionTree> expr_this            = expression_ptr();
460,123✔
514
  auto lthis = expr_this->attach_letters<N>(label);
460,123✔
515

516
  // Execute the operation
517
  auto scaled = c * lthis;
460,123✔
518

519
  auto expr_ptr = std::make_shared<ExpressionTree>(
460,123✔
520
        scaled.get_expr(), extract_expr_permutation(scaled, label),
152,307✔
521
        expr_this->keepalives);
460,123✔
522
  return std::make_shared<TensorImpl<N>>(m_adcmem_ptr, m_axes, expr_ptr);
767,939✔
523
}
460,123✔
524

525
template <size_t N>
526
std::shared_ptr<Tensor> TensorImpl<N>::add(std::shared_ptr<Tensor> other) const {
423,314✔
527
  DIMENSIONALITY_CHECK(other);
423,314✔
528

529
  // Collect labelled expressions
530
  auto label                                 = make_label(N);
423,308✔
531
  std::shared_ptr<ExpressionTree> expr_this  = expression_ptr();
423,308✔
532
  std::shared_ptr<ExpressionTree> expr_other = as_expression(other);
423,308✔
533
  auto lthis                                 = expr_this->attach_letters<N>(label);
423,308✔
534
  auto lother                                = expr_other->attach_letters<N>(label);
423,308✔
535

536
  // Execute the operation
537
  auto sum = lthis + lother;
423,308✔
538

539
  auto expr_ptr = std::make_shared<ExpressionTree>(
706,402✔
540
        sum.get_expr(), extract_expr_permutation(sum, label),
140,214✔
541
        merge(expr_this->keepalives, expr_other->keepalives));
423,308✔
542
  return std::make_shared<TensorImpl<N>>(m_adcmem_ptr, m_axes, expr_ptr);
706,402✔
543
}
423,310✔
544

545
template <size_t N>
546
void TensorImpl<N>::add_linear_combination(
103,619✔
547
      std::vector<scalar_type> scalars,
548
      std::vector<std::shared_ptr<Tensor>> tensors) const {
549
  if (scalars.size() != tensors.size()) {
103,619✔
550
    throw dimension_mismatch(
×
551
          "std::vector of scalars has size " + std::to_string(scalars.size()) +
552
          ", but passed vector of tensors has size " + std::to_string(tensors.size()));
553
  }
554
  if (scalars.size() == 0) return;
103,619✔
555

556
  std::unique_ptr<lt::btod_add<N>> operator_ptr;
103,619✔
557
  for (size_t i = 0; i < scalars.size(); ++i) {
4,822,621✔
558
    DIMENSIONALITY_CHECK(tensors[i]);
4,719,002✔
559
    if (!operator_ptr) {
4,719,002✔
560
      operator_ptr.reset(new lt::btod_add<N>(as_btensor<N>(tensors[i]), scalars[i]));
103,619✔
561
    } else {
34,505✔
562
      operator_ptr->add_op(as_btensor<N>(tensors[i]), scalars[i]);
4,615,383✔
563
    }
564
  }
1,574,860✔
565
  operator_ptr->perform(*libtensor_ptr(), 1.0);
103,619✔
566
}
103,619✔
567

568
template <size_t N>
569
std::shared_ptr<Tensor> TensorImpl<N>::multiply(std::shared_ptr<Tensor> other) const {
744✔
570
  DIMENSIONALITY_CHECK(other);
744✔
571

572
  // Collect labelled expressions
573
  auto label                                 = make_label(N);
744✔
574
  std::shared_ptr<ExpressionTree> expr_this  = expression_ptr();
744✔
575
  std::shared_ptr<ExpressionTree> expr_other = as_expression(other);
744✔
576
  auto lthis                                 = expr_this->attach_letters<N>(label);
744✔
577
  auto lother                                = expr_other->attach_letters<N>(label);
744✔
578

579
  // Execute the operation
580
  auto mult = lt::expr::mult(lthis, lother);
744✔
581

582
  auto expr_ptr = std::make_shared<ExpressionTree>(
1,240✔
583
        mult.get_expr(), extract_expr_permutation(mult, label),
248✔
584
        merge(expr_this->keepalives, expr_other->keepalives));
744✔
585
  return std::make_shared<TensorImpl<N>>(m_adcmem_ptr, m_axes, expr_ptr);
1,240✔
586
}
744✔
587

588
template <size_t N>
589
std::shared_ptr<Tensor> TensorImpl<N>::divide(std::shared_ptr<Tensor> other) const {
44,279✔
590
  DIMENSIONALITY_CHECK(other);
44,279✔
591

592
  // Collect labelled expressions
593
  auto label                                 = make_label(N);
44,279✔
594
  std::shared_ptr<ExpressionTree> expr_this  = expression_ptr();
44,279✔
595
  std::shared_ptr<ExpressionTree> expr_other = as_expression(other);
44,279✔
596
  auto lthis                                 = expr_this->attach_letters<N>(label);
44,279✔
597
  auto lother                                = expr_other->attach_letters<N>(label);
44,279✔
598

599
  // Execute the operation
600
  auto div = lt::expr::div(lthis, lother);
44,279✔
601

602
  auto expr_ptr = std::make_shared<ExpressionTree>(
73,899✔
603
        div.get_expr(), extract_expr_permutation(div, label),
14,659✔
604
        merge(expr_this->keepalives, expr_other->keepalives));
44,279✔
605
  return std::make_shared<TensorImpl<N>>(m_adcmem_ptr, m_axes, expr_ptr);
73,899✔
606
}
44,279✔
607

608
template <size_t N>
609
std::shared_ptr<Tensor> TensorImpl<N>::copy() const {
15,990✔
610
  if (needs_evaluation()) {
15,990✔
611
    // Return deep copy to the expression
612
    return std::make_shared<TensorImpl<N>>(m_adcmem_ptr, m_axes, m_expr_ptr);
3,450✔
613
  } else {
614
    // Actually make a deep copy of the tensor
615
    auto ret_ptr = empty_like();
12,540✔
616
    auto lt_ptr  = std::static_pointer_cast<TensorImpl<N>>(ret_ptr)->libtensor_ptr();
12,540✔
617
    lt::btod_copy<N>(*libtensor_ptr()).perform(*lt_ptr);
12,540✔
618
    return ret_ptr;
12,540✔
619
  }
12,540✔
620
}
5,310✔
621

622
template <size_t N>
623
std::shared_ptr<Tensor> TensorImpl<N>::transpose(std::vector<size_t> permutation) const {
444,909✔
624
  if (permutation.size() != N) {
444,909✔
625
    throw invalid_argument(
×
626
          "Number of indices in provided transposition axes (== " +
627
          std::to_string(permutation.size()) +
628
          ") does not agree with tensor dimension (== " + std::to_string(N) + ").");
629
  }
630

631
  // Reorder the axes
632
  std::vector<AxisInfo> newaxes;
444,909✔
633
  for (size_t i = 0; i < N; ++i) {
1,733,513✔
634
    for (size_t j = 0; j < i; ++j) {
2,730,469✔
635
      if (permutation[i] == permutation[j]) {
1,441,865✔
636
        throw invalid_argument("Duplicate index in transposition axes (" +
×
637
                               std::to_string(permutation[i]) + ") at indices " +
×
638
                               std::to_string(i) + " and " + std::to_string(j) + ".");
639
      }
640
    }
478,458✔
641
    if (permutation[i] >= N) {
1,288,604✔
642
      throw invalid_argument("Invalid axes specifier " + std::to_string(permutation[i]) +
×
643
                             ". Exceeds tensor dimension -1 (==" + std::to_string(N - 1) +
644
                             ").");
645
    }
646

647
    newaxes.push_back(m_axes[permutation[i]]);
1,288,604✔
648
  }
427,250✔
649

650
  // Chain permutations
651
  std::shared_ptr<ExpressionTree> expr_this = expression_ptr();
444,909✔
652
  std::vector<size_t> result_permutation;
444,909✔
653
  for (size_t i = 0; i < N; ++i) {
1,733,513✔
654
    result_permutation.push_back(expr_this->permutation[permutation[i]]);
1,288,604✔
655
  }
427,250✔
656
  auto expr_ptr = std::make_shared<ExpressionTree>(
444,909✔
657
        *expr_this->tree_ptr, result_permutation, expr_this->keepalives);
444,909✔
658
  return std::make_shared<TensorImpl<N>>(m_adcmem_ptr, newaxes, expr_ptr);
742,402✔
659
}
444,909✔
660

661
namespace {
662
template <size_t R, size_t N, size_t M>
663
std::shared_ptr<Tensor> execute_direct_sum(
7,785✔
664
      std::shared_ptr<const AdcMemory> adcmem_ptr,
665
      const std::vector<std::shared_ptr<const lt::letter>>& label_result,
666
      const std::vector<std::shared_ptr<const lt::letter>>& label_first,
667
      const std::vector<std::shared_ptr<const lt::letter>>& label_second,
668
      std::shared_ptr<ExpressionTree> expr_first,
669
      std::shared_ptr<ExpressionTree> expr_second, std::vector<AxisInfo> axes_result) {
670
  auto lfirst  = expr_first->attach_letters<N>(label_first);
7,785✔
671
  auto lsecond = expr_second->attach_letters<M>(label_second);
7,785✔
672

673
  // Execute the operation (dirsum)
674
  auto res      = lt::dirsum(lfirst, lsecond);
7,785✔
675
  auto expr_ptr = std::make_shared<ExpressionTree>(
12,975✔
676
        res.get_expr(), extract_expr_permutation(res, label_result),
2,595✔
677
        merge(expr_first->keepalives, expr_second->keepalives));
7,785✔
678
  return std::make_shared<TensorImpl<R>>(adcmem_ptr, axes_result, expr_ptr);
12,975✔
679
}
7,785✔
680
}  // namespace
681

682
template <size_t N>
683
std::shared_ptr<Tensor> TensorImpl<N>::direct_sum(std::shared_ptr<Tensor> other) const {
7,785✔
684
  typedef std::vector<std::shared_ptr<const lt::letter>> lalvec_t;
685
  lalvec_t label_first  = make_label(N);
7,785✔
686
  lalvec_t label_second = make_label(other->ndim());
7,785✔
687

688
  lalvec_t label_result;
7,785✔
689
  for (auto& v : label_first) label_result.push_back(v);
20,001✔
690
  for (auto& v : label_second) label_result.push_back(v);
18,747✔
691
  std::vector<AxisInfo> axes_result;
7,785✔
692
  for (auto& ax : m_axes) axes_result.push_back(ax);
20,001✔
693
  for (auto& ax : other->axes()) axes_result.push_back(ax);
18,747✔
694

695
  std::shared_ptr<ExpressionTree> expr_first  = expression_ptr();
7,785✔
696
  std::shared_ptr<ExpressionTree> expr_second = as_expression(other);
7,785✔
697
#define IF_MATCHES_EXECUTE(DIMA, DIMB)                                                   \
698
  if (DIMA == label_first.size() && DIMB == label_second.size()) {                       \
699
    constexpr size_t DIMOUT = DIMA + DIMB;                                               \
700
    static_assert((DIMOUT > 0 && DIMOUT < 100),                                          \
701
                  "Internal error with DIMOUT computation");                             \
702
    if (DIMOUT != label_result.size()) {                                                 \
703
      throw runtime_error(                                                               \
704
            "Internal error: Inconsistency with DIMOUT and label_contracted.size()");    \
705
    }                                                                                    \
706
    return execute_direct_sum<DIMOUT, DIMA, DIMB>(m_adcmem_ptr, label_result,            \
707
                                                  label_first, label_second, expr_first, \
708
                                                  expr_second, axes_result);             \
709
  }
710

711
  IF_MATCHES_EXECUTE(1, 1)  //
7,785✔
712
  IF_MATCHES_EXECUTE(1, 2)  //
4,014✔
713
  IF_MATCHES_EXECUTE(1, 3)  //
4,011✔
714
  IF_MATCHES_EXECUTE(2, 1)  //
4,011✔
715
  IF_MATCHES_EXECUTE(2, 2)  //
3,594✔
716
  IF_MATCHES_EXECUTE(3, 1)  //
420✔
717

718
  throw not_implemented_error(
×
719
        "Did not implement the case of a direct_sum of two tensors of dimension " +
720
        std::to_string(ndim()) + " and " + std::to_string(other->ndim()) + ".");
721
#undef IF_MATCHES_EXECUTE
722
}
7,785✔
723

724
template <>
725
double TensorImpl<1>::trace(std::string) const {
×
726
  throw runtime_error("Trace can only be applied to tensors of even rank.");
×
727
}
728

729
template <>
730
double TensorImpl<3>::trace(std::string) const {
×
731
  throw runtime_error("Trace can only be applied to tensors of even rank.");
×
732
}
733

734
template <size_t N>
735
double TensorImpl<N>::trace(std::string contraction) const {
15✔
736
  if (contraction.size() != N) {
15✔
737
    throw invalid_argument(
×
738
          "Number of passed contraction indices needs to match tensor dimensionality.");
739
  }
740

741
  std::vector<std::pair<size_t, size_t>> trace_pairs;
15✔
742
  std::vector<bool> index_done(N, false);
15✔
743
  for (size_t i = 0; i < N; ++i) {
69✔
744
    if (index_done[i]) continue;
54✔
745
    index_done[i]   = true;
27✔
746
    bool found_pair = false;
27✔
747
    for (size_t j = i + 1; j < N; ++j) {
45✔
748
      if (contraction[i] == contraction[j]) {
45✔
749
        if (m_axes[i] != m_axes[j]) {
27✔
750
          throw invalid_argument("Axes to be traced along do not agree: " +
×
751
                                 m_axes[i].label + " versus " + m_axes[j].label);
×
752
        }
753
        index_done[j] = true;
27✔
754
        trace_pairs.push_back({i, j});
27✔
755
        found_pair = true;
27✔
756
        break;
27✔
757
      }
758
    }
6✔
759
    if (!found_pair) {
27✔
760
      throw("Found no matching second index for '" + std::string(1, contraction[i]) +
×
761
            "'.");
762
    }
763
  }
9✔
764

765
  if (2 * trace_pairs.size() != N) {
15✔
766
    throw invalid_argument(
×
767
          "Expected to find half as many trace indices as there are tensor dimensions, "
768
          "i.e. " +
769
          std::to_string(N / 2) + " indices and not " +
770
          std::to_string(trace_pairs.size()) + ".");
771
  }
772

773
  typedef std::vector<std::shared_ptr<const lt::letter>> lalvec_t;
774
  lalvec_t label = make_label(N);
15✔
775
  lalvec_t tlal_first;
15✔
776
  lalvec_t tlal_second;
15✔
777
  for (const auto& p : trace_pairs) {
42✔
778
    tlal_first.push_back(label[p.first]);
27✔
779
    tlal_second.push_back(label[p.second]);
27✔
780
  }
781

782
  std::shared_ptr<ExpressionTree> expr_this = expression_ptr();
15✔
783
  auto lfirst                               = expr_this->attach_letters<N>(label);
15✔
784
  constexpr size_t K                        = N / 2;
15✔
785
  return lt::trace(strip_safe<K>(tlal_first), strip_safe<K>(tlal_second), lfirst);
25✔
786
}
15✔
787

788
namespace {
789
template <size_t R, size_t K, size_t N, size_t M>
790
TensorOrScalar execute_tensordot_contract(
589,476✔
791
      std::shared_ptr<const AdcMemory> adcmem_ptr,
792
      const std::vector<std::shared_ptr<const lt::letter>>& label_result,
793
      const std::vector<std::shared_ptr<const lt::letter>>& label_contracted,
794
      const std::vector<std::shared_ptr<const lt::letter>>& label_first,
795
      const std::vector<std::shared_ptr<const lt::letter>>& label_second,
796
      std::shared_ptr<ExpressionTree> expr_first,
797
      std::shared_ptr<ExpressionTree> expr_second, std::vector<AxisInfo> axes_result) {
798
  // Build labelled expressions:
799
  auto lfirst  = expr_first->attach_letters<N>(label_first);
589,476✔
800
  auto lsecond = expr_second->attach_letters<M>(label_second);
589,476✔
801

802
  // Execute the operation (contract)
803
  auto res      = lt::contract(strip_safe<K>(label_contracted), lfirst, lsecond);
589,476✔
804
  auto expr_ptr = std::make_shared<ExpressionTree>(
983,054✔
805
        res.get_expr(), extract_expr_permutation(res, label_result),
195,898✔
806
        merge(expr_first->keepalives, expr_second->keepalives));
589,476✔
807
  auto tensor_ptr = std::make_shared<TensorImpl<R>>(adcmem_ptr, axes_result, expr_ptr);
589,476✔
808
  return TensorOrScalar{tensor_ptr, 0.0};
983,054✔
809
}
589,476✔
810

811
template <size_t R, size_t N, size_t M>
812
TensorOrScalar execute_tensordot_tensorprod(
2,229✔
813
      std::shared_ptr<const AdcMemory> adcmem_ptr,
814
      const std::vector<std::shared_ptr<const lt::letter>>& label_result,
815
      const std::vector<std::shared_ptr<const lt::letter>>& label_first,
816
      const std::vector<std::shared_ptr<const lt::letter>>& label_second,
817
      std::shared_ptr<ExpressionTree> expr_first,
818
      std::shared_ptr<ExpressionTree> expr_second, std::vector<AxisInfo> axes_result) {
819
  auto lfirst  = expr_first->attach_letters<N>(label_first);
2,229✔
820
  auto lsecond = expr_second->attach_letters<M>(label_second);
2,229✔
821

822
  // Execute the operation (tensor product)
823
  auto res      = lfirst * lsecond;
2,229✔
824
  auto expr_ptr = std::make_shared<ExpressionTree>(
3,715✔
825
        res.get_expr(), extract_expr_permutation(res, label_result),
743✔
826
        merge(expr_first->keepalives, expr_second->keepalives));
2,229✔
827
  auto tensor_ptr = std::make_shared<TensorImpl<R>>(adcmem_ptr, axes_result, expr_ptr);
2,229✔
828
  return TensorOrScalar{tensor_ptr, 0.0};
3,715✔
829
}
2,229✔
830

831
}  // namespace
832

833
template <size_t N>
834
TensorOrScalar TensorImpl<N>::tensordot(
591,720✔
835
      std::shared_ptr<Tensor> other,
836
      std::pair<std::vector<size_t>, std::vector<size_t>> axes) const {
837
  const std::vector<size_t>& axes_first  = axes.first;
591,720✔
838
  const std::vector<size_t>& axes_second = axes.second;
591,720✔
839

840
  if (axes_first.size() != axes_second.size()) {
591,720✔
841
    throw invalid_argument(
×
842
          "Length of the passed axes does not agree "
843
          " (first == " +
844
          shape_to_string(axes_first) + " and second == " + shape_to_string(axes_second) +
845
          ")");
846
  }
847
  if (axes_first.size() > N) {
591,720✔
848
    throw invalid_argument(
×
849
          "Length of the passed axes overshoots dimensionality of the first "
850
          "tensor.");
851
  }
852
  if (axes_first.size() > other->ndim()) {
591,720✔
853
    throw invalid_argument(
×
854
          "Length of the passed axes overshoots dimensionality of the second "
855
          "tensor.");
856
  }
857

858
  // Build label for first and second tensor and contraction indices
859
  typedef std::vector<std::shared_ptr<const lt::letter>> lalvec_t;
860
  lalvec_t label_first  = make_label(N);
591,720✔
861
  lalvec_t label_second = make_label(other->ndim());
591,720✔
862
  lalvec_t label_contracted;
591,720✔
863
  for (size_t i = 0; i < axes_first.size(); ++i) {
1,529,366✔
864
    std::shared_ptr<const lt::letter> l_contraction = label_first[axes_first[i]];
937,646✔
865
    label_second[axes_second[i]]                    = l_contraction;
937,646✔
866

867
    if (m_axes[axes_first[i]] != other->axes()[axes_second[i]]) {
937,646✔
868
      throw invalid_argument(
×
869
            "tensordot can only contract equivalent axes together. The " +
870
            std::to_string(i) + "-th axis clashes (" + m_axes[axes_first[i]].label +
×
871
            " versus " + other->axes()[axes_second[i]].label + "). Tensor spaces are " +
×
872
            space() + " and " + other->space());
873
    }
874
    label_contracted.push_back(l_contraction);
937,646✔
875
  }
311,622✔
876

877
  // Build labels of the result
878
  lalvec_t label_result;
591,720✔
879
  std::vector<AxisInfo> axes_result;
591,720✔
880
  for (size_t i = 0; i < N; ++i) {
2,223,574✔
881
    auto it = std::find(axes_first.begin(), axes_first.end(), i);
1,631,854✔
882
    if (it == axes_first.end()) {
1,631,854✔
883
      label_result.push_back(label_first[i]);
694,208✔
884
      axes_result.push_back(m_axes[i]);
694,208✔
885
    }
230,644✔
886
  }
542,266✔
887
  for (size_t j = 0; j < other->ndim(); ++j) {
2,431,981✔
888
    auto it = std::find(axes_second.begin(), axes_second.end(), j);
1,840,261✔
889
    if (it == axes_second.end()) {
1,840,261✔
890
      label_result.push_back(label_second[j]);
902,615✔
891
      axes_result.push_back(other->axes()[j]);
902,615✔
892
    }
299,945✔
893
  }
611,567✔
894

895
  if (label_result.size() != N + other->ndim() - 2 * label_contracted.size()) {
591,720✔
896
    throw runtime_error(
×
897
          "Internal error: Result index count does not agree with expected "
898
          "number.");
899
  }
900

901
  // Build labelled expressions:
902
  std::shared_ptr<ExpressionTree> expr_first  = expression_ptr();
591,720✔
903
  std::shared_ptr<ExpressionTree> expr_second = as_expression(other);
591,720✔
904

905
  // Branch into the different cases
906
  if (label_result.size() == 0 && N == label_contracted.size() &&
591,745✔
907
      N == label_first.size() && N == label_second.size()) {
395,099✔
908
    // Full contraction => execute dot_product
909

910
    auto lfirst  = expr_first->attach_letters<N>(label_first);
15✔
911
    auto lsecond = expr_second->attach_letters<N>(label_second);
15✔
912
    return TensorOrScalar{nullptr, lt::dot_product(lfirst, lsecond)};
15✔
913
  } else if (label_contracted.size() == 0) {
591,720✔
914
#define IF_DIMENSIONS_MATCH_EXECUTE_TENSORPROD(DIMA, DIMB)                            \
915
  if (DIMA == label_first.size() && DIMB == label_second.size()) {                    \
916
    constexpr size_t DIMOUT = DIMB + DIMA;                                            \
917
    static_assert((DIMOUT > 0 && DIMOUT < 100),                                       \
918
                  "Internal error with DIMOUT computation");                          \
919
    if (DIMOUT != label_result.size()) {                                              \
920
      throw runtime_error(                                                            \
921
            "Internal error: Inconsistency with DIMOUT and label_contracted.size()"); \
922
    }                                                                                 \
923
    return execute_tensordot_tensorprod<DIMOUT, DIMA, DIMB>(                          \
924
          m_adcmem_ptr, label_result, label_first, label_second, expr_first,          \
925
          expr_second, axes_result);                                                  \
926
  }
927

928
    //
929
    // Instantiation generated from TensorImpl/instantiate_valid.py
930
    //
931
    IF_DIMENSIONS_MATCH_EXECUTE_TENSORPROD(1, 1)  //
2,229✔
932
    IF_DIMENSIONS_MATCH_EXECUTE_TENSORPROD(1, 2)  //
2,226✔
933
    IF_DIMENSIONS_MATCH_EXECUTE_TENSORPROD(1, 3)  //
2,226✔
934
    IF_DIMENSIONS_MATCH_EXECUTE_TENSORPROD(2, 1)  //
2,226✔
935
    IF_DIMENSIONS_MATCH_EXECUTE_TENSORPROD(2, 2)  //
2,226✔
936
    IF_DIMENSIONS_MATCH_EXECUTE_TENSORPROD(3, 1)  //
×
937

938
#undef IF_DIMENSIONS_MATCH_EXECUTE_TENSORPROD
939
  } else {
940
    // Other cases => normal contraction
941
#define IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(N_CONTR_IDCS, DIMA, DIMB)                \
942
  if (DIMA == label_first.size() && DIMB == label_second.size() &&                    \
943
      N_CONTR_IDCS == label_contracted.size()) {                                      \
944
    constexpr size_t DIMOUT = DIMB + DIMA - N_CONTR_IDCS - N_CONTR_IDCS;              \
945
    static_assert((DIMOUT > 0 && DIMOUT < 100),                                       \
946
                  "Internal error with DIMOUT computation");                          \
947
    if (DIMOUT != label_result.size()) {                                              \
948
      throw runtime_error(                                                            \
949
            "Internal error: Inconsistency with DIMOUT and label_contracted.size()"); \
950
    }                                                                                 \
951
    return execute_tensordot_contract<DIMOUT, N_CONTR_IDCS, DIMA, DIMB>(              \
952
          m_adcmem_ptr, label_result, label_contracted, label_first, label_second,    \
953
          expr_first, expr_second, axes_result);                                      \
954
  }
955

956
    //
957
    // Instantiation generated from TensorImpl/instantiate_valid.py
958
    //
959
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(1, 1, 2)  //
589,476✔
960
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(1, 1, 3)  //
589,473✔
961
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(1, 1, 4)  //
589,473✔
962
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(1, 2, 1)  //
589,473✔
963
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(1, 2, 2)  //
589,467✔
964
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(1, 2, 3)  //
396,311✔
965
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(1, 2, 4)  //
396,311✔
966
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(1, 3, 1)  //
319,028✔
967
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(1, 3, 2)  //
319,028✔
968
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(1, 3, 3)  //
319,022✔
969
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(1, 4, 1)  //
319,022✔
970
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(1, 4, 2)  //
319,022✔
971
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(2, 2, 3)  //
253,901✔
972
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(2, 2, 4)  //
253,901✔
973
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(2, 3, 2)  //
159,080✔
974
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(2, 3, 3)  //
159,080✔
975
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(2, 3, 4)  //
159,080✔
976
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(2, 4, 2)  //
159,080✔
977
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(2, 4, 3)  //
156,305✔
978
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(2, 4, 4)  //
156,305✔
979
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(3, 3, 4)  //
94,227✔
980
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(3, 4, 3)  //
94,227✔
981
    IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT(3, 4, 4)  //
94,227✔
982

983
#undef IF_DIMENSIONS_MATCH_EXECUTE_CONTRACT
984
  }
985

986
  throw not_implemented_error(
×
987
        "Did not implement the case of a tensordot over " +
988
        std::to_string(label_contracted.size()) +
989
        " indices for two tensors of dimensions " + std::to_string(label_first.size()) +
990
        " and " + std::to_string(label_second.size()) +
991
        ", yielding a tensor of dimension " + std::to_string(label_result.size()) + ".");
992
}
591,720✔
993

994
template <size_t N>
995
std::vector<scalar_type> TensorImpl<N>::dot(
327,564✔
996
      std::vector<std::shared_ptr<Tensor>> tensors) const {
997
  std::vector<scalar_type> ret(tensors.size(), 0.0);
327,564✔
998

999
  for (size_t i = 0; i < tensors.size(); ++i) {
3,163,990✔
1000
    auto tensor_ptr = std::static_pointer_cast<TensorImpl<N>>(tensors[i]);
2,836,426✔
1001
    if (ndim() != tensor_ptr->ndim()) {
2,836,426✔
1002
      throw dimension_mismatch(
×
1003
            "Dimensionality of this tensor (" + std::to_string(ndim()) +
1004
            ") does not agree with the dimensionality of the " + std::to_string(i) +
1005
            "-th tensor passed, which has dimensionality " +
1006
            std::to_string(tensor_ptr->ndim()) + ".");
×
1007
    }
1008
    if (shape() != tensor_ptr->shape()) {
2,836,426✔
1009
      throw dimension_mismatch("Shape of this tensor (" + shape_to_string(shape()) +
×
1010
                               ") does not agree with the shape of the " +
1011
                               std::to_string(i) + "-th tensor passed, which has shape " +
1012
                               shape_to_string(tensor_ptr->shape()) + ".");
×
1013
    }
1014

1015
    ret[i] = lt::btod_dotprod<N>(*libtensor_ptr(), *(tensor_ptr->libtensor_ptr()))
5,672,852✔
1016
                   .calculate();
2,836,426✔
1017
  }
946,750✔
1018
  return ret;
327,564✔
1019
}
108,706✔
1020

1021
template <size_t N>
1022
std::shared_ptr<Tensor> TensorImpl<N>::symmetrise(
27,958✔
1023
      const std::vector<std::vector<size_t>>& permutations) const {
1024
  if (permutations.size() == 0) {
27,958✔
1025
    return std::make_shared<TensorImpl<N>>(m_adcmem_ptr, m_axes, m_libtensor_ptr,
×
1026
                                           m_expr_ptr);  // Noop
×
1027
  }
1028

1029
  // Label this expression
1030
  auto label                                = make_label(N);
27,958✔
1031
  std::shared_ptr<ExpressionTree> expr_this = expression_ptr();
27,958✔
1032
  auto lthis                                = expr_this->attach_letters<N>(label);
27,958✔
1033

1034
  // Execute the operation
1035
  auto symmetrised = [&permutations, &lthis, this, label]() {
124,564✔
1036
    if (permutations.size() == 1) {
27,958✔
1037
      auto parsed = parse_permutation<1>(m_axes, strip_safe<N>(label), permutations);
9,210✔
1038
      return 0.5 * lt::expr::symm(parsed.first, parsed.second, lthis);
15,350✔
1039
    } else if (permutations.size() == 2) {
18,748✔
1040
      auto parsed = parse_permutation<2>(m_axes, strip_safe<N>(label), permutations);
18,748✔
1041
      return 0.5 * lt::expr::symm(parsed.first, parsed.second, lthis);
31,305✔
1042
    } else {
1043
      throw runtime_error(
×
1044
            "Antisymmetrisation not implemented for more than two index pairs.");
1045
    }
1046
  }();
9,261✔
1047

1048
  auto expr_ptr = std::make_shared<ExpressionTree>(
27,958✔
1049
        symmetrised.get_expr(), extract_expr_permutation(symmetrised, label),
9,261✔
1050
        expr_this->keepalives);
27,958✔
1051
  return std::make_shared<TensorImpl<N>>(m_adcmem_ptr, m_axes, expr_ptr);
27,958✔
1052
}
27,958✔
1053

1054
template <size_t N>
1055
std::shared_ptr<Tensor> TensorImpl<N>::antisymmetrise(
159,551✔
1056
      const std::vector<std::vector<size_t>>& permutations) const {
1057
  if (permutations.size() == 0) {
159,551✔
1058
    return std::make_shared<TensorImpl<N>>(m_adcmem_ptr, m_axes, m_libtensor_ptr,
×
1059
                                           m_expr_ptr);  // Noop
×
1060
  }
1061

1062
  // Label this expression
1063
  auto label                                = make_label(N);
159,551✔
1064
  std::shared_ptr<ExpressionTree> expr_this = expression_ptr();
159,551✔
1065
  auto lthis                                = expr_this->attach_letters<N>(label);
159,551✔
1066

1067
  // Execute the operation
1068
  auto antisymmetrised = [&permutations, &lthis, this, &label]() {
639,123✔
1069
    if (permutations.size() == 1) {
159,551✔
1070

1071
      auto parsed = parse_permutation<1>(m_axes, strip_safe<N>(label), permutations);
159,548✔
1072
      return 0.5 * lt::expr::asymm(parsed.first, parsed.second, lthis);
266,219✔
1073
    } else if (permutations.size() == 2) {
3✔
1074
      auto parsed = parse_permutation<2>(m_axes, strip_safe<N>(label), permutations);
3✔
1075
      return 0.5 * lt::expr::asymm(parsed.first, parsed.second, lthis);
5✔
1076
    } else {
1077
      throw runtime_error(
×
1078
            "Antisymmetrisation not implemented for more than two index pairs.");
1079
    }
1080
  }();
52,878✔
1081

1082
  auto expr_ptr = std::make_shared<ExpressionTree>(
159,551✔
1083
        antisymmetrised.get_expr(), extract_expr_permutation(antisymmetrised, label),
52,878✔
1084
        expr_this->keepalives);
159,551✔
1085
  return std::make_shared<TensorImpl<N>>(m_adcmem_ptr, m_axes, expr_ptr);
159,551✔
1086
}
159,551✔
1087

1088
template <size_t N>
1089
void TensorImpl<N>::set_element(const std::vector<size_t>& tidx, scalar_type value) {
34,704✔
1090
  if (!is_element_allowed(tidx)) {
34,704✔
1091
    throw runtime_error("Setting tensor index (" + shape_to_string(tidx) +
×
1092
                        ") not allowed, since zero by symmetry.");
1093
  }
1094

1095
  lt::index<N> block_idx;
34,704✔
1096
  lt::index<N> in_block_idx;
34,704✔
1097
  std::tie(block_idx, in_block_idx) = assert_convert_tensor_index(*libtensor_ptr(), tidx);
34,704✔
1098
  lt::btod_set_elem<N>{}.perform(*libtensor_ptr(), block_idx, in_block_idx, value);
34,704✔
1099
}
34,704✔
1100

1101
template <size_t N>
1102
scalar_type TensorImpl<N>::get_element(const std::vector<size_t>& tidx) const {
2,790,300✔
1103
  lt::index<N> block_idx;
2,790,300✔
1104
  lt::index<N> in_block_idx;
2,790,300✔
1105
  std::tie(block_idx, in_block_idx) = assert_convert_tensor_index(*libtensor_ptr(), tidx);
2,790,300✔
1106

1107
  // The value we want to return
1108
  scalar_type ret;
1109

1110
  // Make an orbit over the block index, i.e. find the canonical block
1111
  lt::block_tensor_ctrl<N, scalar_type> ctrl(*libtensor_ptr());
2,790,300✔
1112
  lt::dimensions<N> bidims(libtensor_ptr()->get_bis().get_block_index_dims());
2,790,300✔
1113
  lt::orbit<N, scalar_type> obit(ctrl.req_const_symmetry(), block_idx);
2,790,300✔
1114

1115
  // If the orbit (i.e. the index) is not allowed by symmetry, than the element is
1116
  // zero.
1117
  if (!obit.is_allowed()) return 0;
2,790,300✔
1118

1119
  // Absolute index of the canonical block
1120
  const lt::abs_index<N> idx_abscanon(obit.get_acindex(), bidims);
2,790,300✔
1121

1122
  // Check if we have hit a zero block, then return 0 as well.
1123
  if (ctrl.req_is_zero_block(idx_abscanon.get_index())) return 0;
2,790,300✔
1124

1125
  // The transformation between the selected block and the canonical block
1126
  const lt::tensor_transf<N, scalar_type>& tr = obit.get_transf(block_idx);
766,548✔
1127

1128
  // Transform in-block index.
1129
  lt::index<N> pibidx(in_block_idx);  //< permuted in-block index
766,548✔
1130
  pibidx.permute(tr.get_perm());
766,548✔
1131

1132
  // Get the actual value from the dense tensor representing the block
1133
  {
1134
    auto& block = ctrl.req_const_block(idx_abscanon.get_index());
766,548✔
1135
    lt::dense_tensor_rd_ctrl<N, scalar_type> blkctrl(block);
766,548✔
1136

1137
    const scalar_type* p = blkctrl.req_const_dataptr();
766,548✔
1138
    ret                  = p[lt::abs_index<N>(pibidx, block.get_dims()).get_abs_index()];
766,548✔
1139
    blkctrl.ret_const_dataptr(p);
766,548✔
1140
  }
766,548✔
1141

1142
  // Transform value
1143
  tr.get_scalar_tr().apply(ret);
766,548✔
1144

1145
  ctrl.ret_const_block(idx_abscanon.get_index());
766,548✔
1146
  return ret;
766,548✔
1147
}
2,790,300✔
1148

1149
template <size_t N>
1150
bool TensorImpl<N>::is_element_allowed(const std::vector<size_t>& tidx) const {
62,967✔
1151
  lt::index<N> block_idx;
62,967✔
1152
  std::tie(block_idx, std::ignore) = assert_convert_tensor_index(*libtensor_ptr(), tidx);
62,967✔
1153

1154
  // Check if the block is allowed in the symmetry of the guess
1155
  lt::block_tensor_ctrl<N, scalar_type> ctrl(*libtensor_ptr());
62,967✔
1156
  lt::orbit<N, scalar_type> orb(ctrl.req_const_symmetry(), block_idx);
62,967✔
1157
  const bool block_allowed = orb.is_allowed();
62,967✔
1158

1159
  // TODO Even if the block is allowed, the index might not be
1160
  // (e.g. diagonal of an anti-symmetric tensor).
1161
  const bool index_allowed = true;
62,967✔
1162

1163
  return block_allowed && index_allowed;
62,967✔
1164
}
62,967✔
1165

1166
template <size_t N>
1167
std::vector<std::pair<std::vector<size_t>, scalar_type>> TensorImpl<N>::select_n_absmax(
27✔
1168
      size_t n, bool unique_by_symmetry) const {
1169
  return execute_select_n<lt::compare4absmax>(*libtensor_ptr(), n, unique_by_symmetry);
45✔
1170
}
1171

1172
template <size_t N>
1173
std::vector<std::pair<std::vector<size_t>, scalar_type>> TensorImpl<N>::select_n_absmin(
27✔
1174
      size_t n, bool unique_by_symmetry) const {
1175
  return execute_select_n<lt::compare4absmin>(*libtensor_ptr(), n, unique_by_symmetry);
45✔
1176
}
1177

1178
template <size_t N>
1179
std::vector<std::pair<std::vector<size_t>, scalar_type>> TensorImpl<N>::select_n_max(
18✔
1180
      size_t n, bool unique_by_symmetry) const {
1181
  return execute_select_n<lt::compare4max>(*libtensor_ptr(), n, unique_by_symmetry);
30✔
1182
}
1183

1184
template <size_t N>
1185
std::vector<std::pair<std::vector<size_t>, scalar_type>> TensorImpl<N>::select_n_min(
1,623✔
1186
      size_t n, bool unique_by_symmetry) const {
1187
  return execute_select_n<lt::compare4min>(*libtensor_ptr(), n, unique_by_symmetry);
2,705✔
1188
}
1189

1190
template <size_t N>
1191
void TensorImpl<N>::export_to(scalar_type* memptr, size_t size) const {
87,252✔
1192
  if (this->size() != size) {
87,252✔
1193
    throw invalid_argument("The memory provided (== " + std::to_string(size) +
×
1194
                           ") does not agree with the number of tensor elements (== " +
1195
                           std::to_string(this->size()) + ")");
1196
  }
1197
  lt::btod_export<N>(*libtensor_ptr()).perform(memptr);
87,252✔
1198
}
87,252✔
1199

1200
template <size_t N>
1201
void TensorImpl<N>::import_from(const scalar_type* memptr, size_t size,
60,624✔
1202
                                scalar_type tolerance, bool symmetry_check) {
1203
  if (this->size() != size) {
60,624✔
1204
    throw invalid_argument("The memory size provided (== " + std::to_string(size) +
×
1205
                           ") does not agree with the number of tensor elements (== " +
1206
                           std::to_string(this->size()) + ")");
1207
  }
1208

1209
  if (symmetry_check) {
60,624✔
1210
    // Slow algorithm with symmetry check (via libtensor)
1211

1212
    // Zero this memory.
1213
    // TODO Check this really needs to be done for proper functioning
1214
    //      There is some indication that yes, unfortunately.
1215
    lt::btod_set<N>(0.0).perform(*libtensor_ptr());
60,624✔
1216

1217
    scalar_type* noconst_mem = const_cast<scalar_type*>(memptr);
60,624✔
1218
    libtensor::btod_import_raw<N>(noconst_mem, libtensor_ptr()->get_bis().get_dims(),
121,248✔
1219
                                  tolerance)
20,208✔
1220
          .perform(*libtensor_ptr());
60,624✔
1221
  } else {
20,208✔
1222
    // Fast algorithm without symmetry check (via import_from with generator)
1223
    auto fast_importer = [this, memptr](
×
1224
                               const std::vector<std::pair<size_t, size_t>>& range,
1225
                               scalar_type* ptr) {
1226
      if (range.size() != N) {
×
1227
        throw runtime_error("Internal error: Dimension mismatch in fast_importer");
×
1228
      }
1229

1230
      // Strides for accessing the full memptr
1231
      std::array<size_t, N> strides_full;
1232
      strides_full[N - 1] = 1;
×
1233
      for (size_t idim = N - 1; idim > 0; --idim) {
×
1234
        strides_full[idim - 1] = strides_full[idim] * shape()[idim];
×
1235
      }
1236

1237
      // Dimensionalities for access into the small ptr
1238
      std::array<size_t, N> dim_ptr;
1239
      for (size_t idim = 0; idim != N; ++idim) {
×
1240
        dim_ptr[idim] = range[idim].second - range[idim].first;
×
1241
      }
1242

1243
      // Strides for accessing the small ptr
1244
      std::array<size_t, N> strides_ptr;
1245
      strides_ptr[N - 1] = 1;
×
1246
      for (size_t idim = N - 1; idim > 0; --idim) {
×
1247
        strides_ptr[idim - 1] = strides_ptr[idim] * dim_ptr[idim];
×
1248
      }
1249
      const size_t size_ptr = strides_ptr[0] * dim_ptr[0];
×
1250

1251
      for (size_t iabs = 0; iabs < size_ptr; ++iabs) {
×
1252
        size_t full_abs = 0;
×
1253
        for (size_t idim = 0; idim != N; ++idim) {
×
1254
          size_t idx = range[idim].first + (iabs / strides_ptr[idim]) % dim_ptr[idim];
×
1255
          full_abs += strides_full[idim] * idx;
×
1256
        }
1257
        ptr[iabs] = memptr[full_abs];
×
1258
      }
1259
    };
1260
    import_from(fast_importer, tolerance, false);
×
1261
  }
1262
}
60,624✔
1263

1264
template <size_t N>
1265
void TensorImpl<N>::import_from(
19,059✔
1266
      std::function<void(const std::vector<std::pair<size_t, size_t>>&, scalar_type*)>
1267
            generator,
1268
      scalar_type tolerance, bool symmetry_check) {
1269

1270
  if (symmetry_check) {
19,059✔
1271
    // Slow algorithm with symmetry check (via import_from with libtensor)
1272

1273
    // Get complete data via generator ...
1274
    std::vector<std::pair<size_t, size_t>> full_range(N);
×
1275
    for (size_t i = 0; i < N; ++i) {
×
1276
      full_range[i].first  = 0;
×
1277
      full_range[i].second = shape()[i];
×
1278
    }
1279
    std::vector<scalar_type> data(this->size());
×
1280
    generator(full_range, data.data());
×
1281

1282
    // ... and import it
1283
    import_from(data, tolerance, true);
×
1284
  } else {
×
1285
    // Fast algorithm by only considering canonical blocks
1286
    // First zero out the tensor completely.
1287
    lt::block_tensor_ctrl<N, scalar_type> ctrl(*libtensor_ptr());
19,059✔
1288
    ctrl.req_zero_all_blocks();
19,059✔
1289

1290
    // Now loop over all orbits, which thus gives access to all
1291
    // canonical blocks
1292
    lt::orbit_list<N, scalar_type> orbitlist(ctrl.req_const_symmetry());
19,059✔
1293
    lt::index<N> blk_idx;  // Holder for canonical-block indices
19,059✔
1294
    for (auto it = orbitlist.begin(); it != orbitlist.end(); ++it) {
69,765✔
1295
      orbitlist.get_index(it, blk_idx);
50,706✔
1296

1297
      // Get the dense tensor for the current canonical block
1298
      // and import the data from the generator
1299
      lt::dense_tensor_wr_i<N, scalar_type>& blk = ctrl.req_block(blk_idx);
50,706✔
1300

1301
      // Compute range to import
1302
      std::vector<std::pair<size_t, size_t>> range(N);
50,706✔
1303
      const lt::block_index_space<N>& bis = libtensor_ptr()->get_bis();
50,706✔
1304
      lt::index<N> blk_start(bis.get_block_start(blk_idx));
50,706✔
1305
      lt::dimensions<N> blk_dims(bis.get_block_dims(blk_idx));
50,706✔
1306
      for (size_t i = 0; i < N; ++i) {
202,881✔
1307
        range[i].first  = blk_start[i];
152,175✔
1308
        range[i].second = blk_start[i] + blk_dims[i];
152,175✔
1309
      }
50,725✔
1310

1311
      bool all_zero = false;
50,706✔
1312
      {
1313
        lt::dense_tensor_wr_ctrl<N, scalar_type> cblk(blk);
50,706✔
1314
        cblk.req_prefetch();
50,706✔
1315
        scalar_type* ptr = cblk.req_dataptr();
50,706✔
1316

1317
        generator(range, ptr);  // Import data from the generator
50,706✔
1318

1319
        if (tolerance > 0) {
50,706✔
1320
          // Check that some are non-zero
1321
          all_zero = std::all_of(
50,706✔
1322
                ptr, ptr + blk_dims.get_size(),
50,706✔
1323
                [tolerance](scalar_type e) { return std::abs(e) < tolerance; });
5,112,099✔
1324
        }
16,902✔
1325

1326
        cblk.ret_dataptr(ptr);
50,706✔
1327
      }
50,706✔
1328
      ctrl.ret_block(blk_idx);
50,706✔
1329

1330
      if (all_zero) {
50,706✔
1331
        ctrl.req_zero_block(blk_idx);
19,092✔
1332
      }
6,364✔
1333
    }
16,902✔
1334
  }
19,059✔
1335
}
19,059✔
1336

1337
template <size_t N>
1338
std::string TensorImpl<N>::describe_symmetry() const {
84✔
1339
  std::stringstream ss;
84✔
1340

1341
  lt::block_tensor_ctrl<N, scalar_type> ctrl(*libtensor_ptr());
84✔
1342
  ss << ctrl.req_const_symmetry();
84✔
1343

1344
  return ss.str();
140✔
1345
}
84✔
1346

1347
template <size_t N>
1348
std::string TensorImpl<N>::describe_expression(std::string stage) const {
×
1349
  if (needs_evaluation()) {
×
1350
    std::stringstream ss;
×
1351
    if (stage == "unoptimised") {
×
1352
      ss << *m_expr_ptr;
×
1353
    } else if (stage == "optimised") {
×
1354
      lt::expr::print_tree(m_expr_ptr->optimised_tree(), ss, 2);
×
1355
    } else if (stage == "evaluation") {
×
1356
      auto newtensor_ptr =
1357
            std::make_shared<lt::btensor<N, scalar_type>>(as_bispace<N>(m_axes));
×
1358
      lt::expr::expr_tree evaltree =
1359
            m_expr_ptr->evaluation_tree(*newtensor_ptr, /* add = */ false);
×
1360
      lt::expr::print_tree(evaltree, ss, 2);
×
1361
    } else {
×
1362
      throw invalid_argument("Stage " + stage +
×
1363
                             " not valid for describe_expression. Try 'unoptimised', "
1364
                             "'optimised', 'evaluation' or 'evaluation'");
1365
    }
1366
    return ss.str();
×
1367
  } else {
×
1368
    return "btensor of shape " + shape_to_string(m_shape);
×
1369
  }
1370
}
1371

1372
template <size_t N>
1373
std::shared_ptr<ExpressionTree> TensorImpl<N>::expression_ptr() const {
3,244,344✔
1374
  if (m_expr_ptr != nullptr) {
3,244,344✔
1375
    if (m_libtensor_ptr != nullptr) {
1,904,114✔
1376
      throw runtime_error(
×
1377
            "Internal error: m_libtensor_ptr is not a nullptr and neither is "
1378
            "m_expr_ptr.");
1379
    }
1380
    return m_expr_ptr;  // Already have an expression for myself, just return it
1,904,114✔
1381
  } else {
1382
    if (m_libtensor_ptr == nullptr) {
1,340,230✔
1383
      throw runtime_error(
×
1384
            "Internal error: Both m_libtensor_ptr and m_expr_ptr are nullptrs.");
1385
    }
1386

1387
    // Make a new expression tree with just this tensor in it
1388
    return std::make_shared<ExpressionTree>(
3,130,776✔
1389
          lt::expr::node_ident_any_tensor<N, scalar_type>(*m_libtensor_ptr),
2,235,503✔
1390
          identity_permutation(N), std::vector<std::shared_ptr<void>>{m_libtensor_ptr});
4,026,049✔
1391
  }
1392
}
1,075,963✔
1393

1394
std::shared_ptr<ExpressionTree> as_expression(const std::shared_ptr<Tensor>& tensor) {
1,067,836✔
1395
  std::shared_ptr<ExpressionTree> ret;
1,067,836✔
1396
  if (tensor->ndim() == 1) {
1,067,836✔
1397
    ret = std::static_pointer_cast<TensorImpl<1>>(tensor)->expression_ptr();
4,647✔
1398
  } else if (tensor->ndim() == 2) {
1,064,738✔
1399
    ret = std::static_pointer_cast<TensorImpl<2>>(tensor)->expression_ptr();
512,859✔
1400
  } else if (tensor->ndim() == 3) {
720,465✔
1401
    ret = std::static_pointer_cast<TensorImpl<3>>(tensor)->expression_ptr();
×
1402
  } else if (tensor->ndim() == 4) {
550,330✔
1403
    ret = std::static_pointer_cast<TensorImpl<4>>(tensor)->expression_ptr();
550,330✔
1404
  } else {
182,678✔
1405
    throw not_implemented_error("Only implemented for dimensionality <= 4.");
×
1406
  }
1407

1408
  if (ret->permutation.size() != tensor->ndim()) {
1,067,836✔
1409
    throw runtime_error("Internal error: Mismatch between permutation.size() == " +
×
1410
                        std::to_string(ret->permutation.size()) +
×
1411
                        " and tensor dimensionality " + std::to_string(tensor->ndim()) +
×
1412
                        ".");
1413
  }
1414

1415
  return ret;
1,067,836✔
1416
}
354,362✔
1417

1418
template <size_t N>
1419
lt::btensor<N, scalar_type>& as_btensor(const std::shared_ptr<Tensor>& in) {
4,726,206✔
1420
  if (N != in->ndim()) {
4,726,206✔
1421
    throw dimension_mismatch("Requested dimensionality " + std::to_string(N) +
×
1422
                             ", but passed tensor has dimensionality " +
1423
                             std::to_string(in->ndim()));
1424
  }
1425
  return static_cast<lt::btensor<N, scalar_type>&>(
4,726,206✔
1426
        *std::static_pointer_cast<TensorImpl<N>>(in));
7,875,146✔
1427
}
1428

1429
template <size_t N>
1430
std::shared_ptr<lt::btensor<N, scalar_type>> as_btensor_ptr(
×
1431
      const std::shared_ptr<Tensor>& in) {
1432
  if (N != in->ndim()) {
×
1433
    throw dimension_mismatch("Requested dimensionality " + std::to_string(N) +
×
1434
                             ", but passed tensor has dimensionality " +
1435
                             std::to_string(in->ndim()));
1436
  }
1437
  return std::static_pointer_cast<TensorImpl<N>>(in)->libtensor_ptr();
×
1438
}
1439

1440
namespace {
1441
template <size_t N>
1442
std::shared_ptr<Tensor> make_tensor_inner(std::shared_ptr<Symmetry> symmetry) {
108,579✔
1443
  auto ltsym_ptr = as_lt_symmetry<N>(*symmetry);
108,579✔
1444
  auto newtensor_ptr =
72,386✔
1445
        std::make_shared<lt::btensor<N, scalar_type>>(ltsym_ptr->get_bis());
36,193✔
1446

1447
  lt::block_tensor_ctrl<N, scalar_type> ctrl_to(*newtensor_ptr);
108,579✔
1448
  lt::so_copy<N, scalar_type>(*ltsym_ptr).perform(ctrl_to.req_symmetry());
108,579✔
1449
  return std::make_shared<TensorImpl<N>>(symmetry->adcmem_ptr(), symmetry->axes(),
36,193✔
1450
                                         std::move(newtensor_ptr));
180,965✔
1451
}
108,579✔
1452
}  // namespace
1453

1454
std::shared_ptr<Tensor> make_tensor(std::shared_ptr<Symmetry> symmetry) {
108,579✔
1455
  if (symmetry->ndim() == 1) {
108,579✔
1456
    return make_tensor_inner<1>(symmetry);
2,613✔
1457
  } else if (symmetry->ndim() == 2) {
105,966✔
1458
    return make_tensor_inner<2>(symmetry);
93,363✔
1459
  } else if (symmetry->ndim() == 3) {
12,603✔
1460
    return make_tensor_inner<3>(symmetry);
×
1461
  } else if (symmetry->ndim() == 4) {
12,603✔
1462
    return make_tensor_inner<4>(symmetry);
12,603✔
1463
  } else {
1464
    throw not_implemented_error("Only implemented for dimensionality <= 4.");
×
1465
  }
1466
}
36,193✔
1467

1468
std::shared_ptr<Tensor> make_tensor(std::shared_ptr<const AdcMemory> adcmem_ptr,
48✔
1469
                                    std::vector<AxisInfo> axes) {
1470
  if (axes.size() == 1) {
48✔
1471
    return std::make_shared<TensorImpl<1>>(adcmem_ptr, axes);
×
1472
  } else if (axes.size() == 2) {
48✔
1473
    return std::make_shared<TensorImpl<2>>(adcmem_ptr, axes);
48✔
1474
  } else if (axes.size() == 3) {
×
1475
    return std::make_shared<TensorImpl<3>>(adcmem_ptr, axes);
×
1476
  } else if (axes.size() == 4) {
×
1477
    return std::make_shared<TensorImpl<4>>(adcmem_ptr, axes);
×
1478
  } else {
1479
    throw not_implemented_error("Only implemented for dimensionality <= 4.");
×
1480
  }
1481
}
16✔
1482

1483
//
1484
// Explicit instantiation
1485
//
1486

1487
#define INSTANTIATE(DIM)                                                                 \
1488
  template class TensorImpl<DIM>;                                                        \
1489
                                                                                         \
1490
  template lt::btensor<DIM, scalar_type>& as_btensor(const std::shared_ptr<Tensor>& in); \
1491
                                                                                         \
1492
  template std::shared_ptr<lt::btensor<DIM, scalar_type>> as_btensor_ptr(                \
1493
        const std::shared_ptr<Tensor>& in);
1494

1495
INSTANTIATE(1)
1496
INSTANTIATE(2)
1497
INSTANTIATE(3)
1498
INSTANTIATE(4)
1499

1500
#undef INSTANTIATE
1501

1502
}  // namespace libadcc
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc