• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

IntelPython / dpnp / 16126000227

07 Jul 2025 07:24PM UTC coverage: 22.684% (-49.4%) from 72.051%
16126000227

Pull #2519

github

web-flow
Merge bd753a3a3 into 624f14f20
Pull Request #2519: tmp changes

889 of 9756 branches covered (9.11%)

Branch coverage included in aggregate %.

6317 of 22011 relevant lines covered (28.7%)

35.96 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

24.6
/dpnp/backend/extensions/lapack/heevd_batch.cpp
1
//*****************************************************************************
2
// Copyright (c) 2024-2025, Intel Corporation
3
// All rights reserved.
4
//
5
// Redistribution and use in source and binary forms, with or without
6
// modification, are permitted provided that the following conditions are met:
7
// - Redistributions of source code must retain the above copyright notice,
8
//   this list of conditions and the following disclaimer.
9
// - Redistributions in binary form must reproduce the above copyright notice,
10
//   this list of conditions and the following disclaimer in the documentation
11
//   and/or other materials provided with the distribution.
12
//
13
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
14
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
17
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
18
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
23
// THE POSSIBILITY OF SUCH DAMAGE.
24
//*****************************************************************************
25

26
#include <stdexcept>
27

28
#include <pybind11/stl.h>
29

30
#include "common_helpers.hpp"
31
#include "evd_batch_common.hpp"
32
#include "heevd_batch.hpp"
33

34
// dpctl tensor headers
35
#include "utils/type_utils.hpp"
36

37
namespace dpnp::extensions::lapack
38
{
39
namespace mkl_lapack = oneapi::mkl::lapack;
40
namespace type_utils = dpctl::tensor::type_utils;
41

42
template <typename T, typename RealT>
43
static sycl::event heevd_batch_impl(sycl::queue &exec_q,
44
                                    const oneapi::mkl::job jobz,
45
                                    const oneapi::mkl::uplo upper_lower,
46
                                    const std::int64_t batch_size,
47
                                    const std::int64_t n,
48
                                    char *in_a,
49
                                    char *out_w,
50
                                    const std::vector<sycl::event> &depends)
51
{
×
52
    type_utils::validate_type_for_device<T>(exec_q);
×
53
    type_utils::validate_type_for_device<RealT>(exec_q);
×
54

55
    T *a = reinterpret_cast<T *>(in_a);
×
56
    RealT *w = reinterpret_cast<RealT *>(out_w);
×
57

58
    const std::int64_t a_size = n * n;
×
59
    const std::int64_t w_size = n;
×
60

61
    const std::int64_t lda = std::max<size_t>(1UL, n);
×
62

63
    // Get the number of independent linear streams
64
    const std::int64_t n_linear_streams =
×
65
        (batch_size > 16) ? 4 : ((batch_size > 4 ? 2 : 1));
×
66

67
    const std::int64_t scratchpad_size =
×
68
        mkl_lapack::heevd_scratchpad_size<T>(exec_q, jobz, upper_lower, n, lda);
×
69

70
    T *scratchpad = helper::alloc_scratchpad_batch<T>(scratchpad_size,
×
71
                                                      n_linear_streams, exec_q);
×
72

73
    // Computation events to manage dependencies for each linear stream
74
    std::vector<std::vector<sycl::event>> comp_evs(n_linear_streams, depends);
×
75

76
    std::stringstream error_msg;
×
77
    std::int64_t info = 0;
×
78

79
    // Release GIL to avoid serialization of host task
80
    // submissions to the same queue in OneMKL
81
    py::gil_scoped_release release;
×
82

83
    for (std::int64_t batch_id = 0; batch_id < batch_size; ++batch_id) {
×
84
        T *a_batch = a + batch_id * a_size;
×
85
        RealT *w_batch = w + batch_id * w_size;
×
86

87
        std::int64_t stream_id = (batch_id % n_linear_streams);
×
88

89
        T *current_scratch_heevd = scratchpad + stream_id * scratchpad_size;
×
90

91
        // Get the event dependencies for the current stream
92
        const auto &current_dep = comp_evs[stream_id];
×
93

94
        sycl::event heevd_event;
×
95
        try {
×
96
            heevd_event = mkl_lapack::heevd(
×
97
                exec_q,
×
98
                jobz, // 'jobz == job::vec' means eigenvalues and eigenvectors
×
99
                      // are computed.
100
                upper_lower, // 'upper_lower == job::upper' means the upper
×
101
                             // triangular part of A, or the lower triangular
102
                             // otherwise
103
                n,           // The order of the matrix A (0 <= n)
×
104
                a_batch,     // Pointer to the square A (n x n)
×
105
                             // If 'jobz == job::vec', then on exit it will
106
                             // contain the eigenvectors of A
107
                lda, // The leading dimension of A, must be at least max(1, n)
×
108
                w_batch, // Pointer to array of size at least n, it will contain
×
109
                         // the eigenvalues of A in ascending order
110
                current_scratch_heevd, // Pointer to scratchpad memory to be
×
111
                                       // used by MKL routine for storing
112
                                       // intermediate results
113
                scratchpad_size, current_dep);
×
114
        } catch (mkl_lapack::exception const &e) {
×
115
            error_msg << "Unexpected MKL exception caught during heevd() "
×
116
                         "call:\nreason: "
×
117
                      << e.what() << "\ninfo: " << e.info();
×
118
            info = e.info();
×
119
        } catch (sycl::exception const &e) {
×
120
            error_msg
×
121
                << "Unexpected SYCL exception caught during heevd() call:\n"
×
122
                << e.what();
×
123
            info = -1;
×
124
        }
×
125

126
        // Update the event dependencies for the current stream
127
        comp_evs[stream_id] = {heevd_event};
×
128
    }
×
129

130
    if (info != 0) // an unexpected error occurs
×
131
    {
×
132
        if (scratchpad != nullptr) {
×
133
            dpctl::tensor::alloc_utils::sycl_free_noexcept(scratchpad, exec_q);
×
134
        }
×
135
        throw std::runtime_error(error_msg.str());
×
136
    }
×
137

138
    sycl::event ht_ev = exec_q.submit([&](sycl::handler &cgh) {
×
139
        for (const auto &ev : comp_evs) {
×
140
            cgh.depends_on(ev);
×
141
        }
×
142
        auto ctx = exec_q.get_context();
×
143
        cgh.host_task([ctx, scratchpad]() {
×
144
            dpctl::tensor::alloc_utils::sycl_free_noexcept(scratchpad, ctx);
×
145
        });
×
146
    });
×
147

148
    return ht_ev;
×
149
}
×
150

151
template <typename fnT, typename T, typename RealT>
152
struct HeevdBatchContigFactory
153
{
154
    fnT get()
155
    {
392✔
156
        if constexpr (types::HeevdTypePairSupportFactory<T, RealT>::is_defined)
157
        {
4✔
158
            return heevd_batch_impl<T, RealT>;
4✔
159
        }
160
        else {
388✔
161
            return nullptr;
388✔
162
        }
388✔
163
    }
392✔
164
};
165

166
using evd::evd_batch_impl_fn_ptr_t;
167

168
void init_heevd_batch(py::module_ m)
169
{
2✔
170
    using arrayT = dpctl::tensor::usm_ndarray;
2✔
171
    using event_vecT = std::vector<sycl::event>;
2✔
172

173
    static evd_batch_impl_fn_ptr_t
2✔
174
        heevd_batch_dispatch_table[dpctl_td_ns::num_types]
2✔
175
                                  [dpctl_td_ns::num_types];
2✔
176

177
    {
2✔
178
        evd::init_evd_dispatch_table<evd_batch_impl_fn_ptr_t,
2✔
179
                                     HeevdBatchContigFactory>(
2✔
180
            heevd_batch_dispatch_table);
2✔
181

182
        auto heevd_batch_pyapi =
2✔
183
            [&](sycl::queue &exec_q, const std::int8_t jobz,
2✔
184
                const std::int8_t upper_lower, const arrayT &eig_vecs,
2✔
185
                const arrayT &eig_vals, const event_vecT &depends = {}) {
2✔
186
                return evd::evd_batch_func(exec_q, jobz, upper_lower, eig_vecs,
×
187
                                           eig_vals, depends,
×
188
                                           heevd_batch_dispatch_table);
×
189
            };
×
190
        m.def(
2✔
191
            "_heevd_batch", heevd_batch_pyapi,
2✔
192
            "Call `heevd` from OneMKL LAPACK library in a loop to return "
2✔
193
            "the eigenvalues and eigenvectors of a batch of complex Hermitian "
2✔
194
            "matrices",
2✔
195
            py::arg("sycl_queue"), py::arg("jobz"), py::arg("upper_lower"),
2✔
196
            py::arg("eig_vecs"), py::arg("eig_vals"),
2✔
197
            py::arg("depends") = py::list());
2✔
198
    }
2✔
199
}
2✔
200
} // namespace dpnp::extensions::lapack
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc