]> git.proxmox.com Git - ceph.git/blob - ceph/src/boost/boost/numeric/ublas/tensor/functions.hpp
import new upstream nautilus stable release 14.2.8
[ceph.git] / ceph / src / boost / boost / numeric / ublas / tensor / functions.hpp
1 //
2 // Copyright (c) 2018-2019, Cem Bassoy, cem.bassoy@gmail.com
3 //
4 // Distributed under the Boost Software License, Version 1.0. (See
5 // accompanying file LICENSE_1_0.txt or copy at
6 // http://www.boost.org/LICENSE_1_0.txt)
7 //
8 // The authors gratefully acknowledge the support of
9 // Fraunhofer IOSB, Ettlingen, Germany
10 //
11
12
13 #ifndef BOOST_UBLAS_TENSOR_FUNCTIONS_HPP
14 #define BOOST_UBLAS_TENSOR_FUNCTIONS_HPP
15
16
17 #include <stdexcept>
18 #include <vector>
19 #include <algorithm>
20 #include <numeric>
21
22
23 #include "multiplication.hpp"
24 #include "algorithms.hpp"
25 #include "expression.hpp"
26 #include "expression_evaluation.hpp"
27 #include "storage_traits.hpp"
28
29 namespace boost {
30 namespace numeric {
31 namespace ublas {
32
33 template<class Value, class Format, class Allocator>
34 class tensor;
35
36 template<class Value, class Format, class Allocator>
37 class matrix;
38
39 template<class Value, class Allocator>
40 class vector;
41
42
43
44
45 /** @brief Computes the m-mode tensor-times-vector product
46 *
47 * Implements C[i1,...,im-1,im+1,...,ip] = A[i1,i2,...,ip] * b[im]
48 *
49 * @note calls ublas::ttv
50 *
51 * @param[in] m contraction dimension with 1 <= m <= p
52 * @param[in] a tensor object A with order p
53 * @param[in] b vector object B
54 *
55 * @returns tensor object C with order p-1, the same storage format and allocator type as A
56 */
57 template<class V, class F, class A1, class A2>
58 auto prod(tensor<V,F,A1> const& a, vector<V,A2> const& b, const std::size_t m)
59 {
60
61 using tensor_type = tensor<V,F,A1>;
62 using extents_type = typename tensor_type::extents_type;
63 using ebase_type = typename extents_type::base_type;
64 using value_type = typename tensor_type::value_type;
65 using size_type = typename extents_type::value_type;
66
67 auto const p = std::size_t(a.rank());
68
69 if( m == 0)
70 throw std::length_error("error in boost::numeric::ublas::prod(ttv): contraction mode must be greater than zero.");
71
72 if( p < m )
73 throw std::length_error("error in boost::numeric::ublas::prod(ttv): rank of tensor must be greater than or equal to the modus.");
74
75 if( p == 0)
76 throw std::length_error("error in boost::numeric::ublas::prod(ttv): rank of tensor must be greater than zero.");
77
78 if( a.empty() )
79 throw std::length_error("error in boost::numeric::ublas::prod(ttv): first argument tensor should not be empty.");
80
81 if( b.size() == 0)
82 throw std::length_error("error in boost::numeric::ublas::prod(ttv): second argument vector should not be empty.");
83
84
85 auto nc = ebase_type(std::max(p-1, size_type(2)) , size_type(1));
86 auto nb = ebase_type{b.size(),1};
87
88
89 for(auto i = 0u, j = 0u; i < p; ++i)
90 if(i != m-1)
91 nc[j++] = a.extents().at(i);
92
93 auto c = tensor_type(extents_type(nc),value_type{});
94
95 auto bb = &(b(0));
96
97 ttv(m, p,
98 c.data(), c.extents().data(), c.strides().data(),
99 a.data(), a.extents().data(), a.strides().data(),
100 bb, nb.data(), nb.data());
101
102
103 return c;
104 }
105
106
107
108 /** @brief Computes the m-mode tensor-times-matrix product
109 *
110 * Implements C[i1,...,im-1,j,im+1,...,ip] = A[i1,i2,...,ip] * B[j,im]
111 *
112 * @note calls ublas::ttm
113 *
114 * @param[in] a tensor object A with order p
115 * @param[in] b vector object B
116 * @param[in] m contraction dimension with 1 <= m <= p
117 *
118 * @returns tensor object C with order p, the same storage format and allocator type as A
119 */
120 template<class V, class F, class A1, class A2>
121 auto prod(tensor<V,F,A1> const& a, matrix<V,F,A2> const& b, const std::size_t m)
122 {
123
124 using tensor_type = tensor<V,F,A1>;
125 using extents_type = typename tensor_type::extents_type;
126 using strides_type = typename tensor_type::strides_type;
127 using value_type = typename tensor_type::value_type;
128
129
130 auto const p = a.rank();
131
132 if( m == 0)
133 throw std::length_error("error in boost::numeric::ublas::prod(ttm): contraction mode must be greater than zero.");
134
135 if( p < m || m > a.extents().size())
136 throw std::length_error("error in boost::numeric::ublas::prod(ttm): rank of the tensor must be greater equal the modus.");
137
138 if( p == 0)
139 throw std::length_error("error in boost::numeric::ublas::prod(ttm): rank of the tensor must be greater than zero.");
140
141 if( a.empty() )
142 throw std::length_error("error in boost::numeric::ublas::prod(ttm): first argument tensor should not be empty.");
143
144 if( b.size1()*b.size2() == 0)
145 throw std::length_error("error in boost::numeric::ublas::prod(ttm): second argument matrix should not be empty.");
146
147
148 auto nc = a.extents().base();
149 auto nb = extents_type {b.size1(),b.size2()};
150 auto wb = strides_type (nb);
151
152 nc[m-1] = nb[0];
153
154 auto c = tensor_type(extents_type(nc),value_type{});
155
156 auto bb = &(b(0,0));
157
158 ttm(m, p,
159 c.data(), c.extents().data(), c.strides().data(),
160 a.data(), a.extents().data(), a.strides().data(),
161 bb, nb.data(), wb.data());
162
163
164 return c;
165 }
166
167
168
169
170 /** @brief Computes the q-mode tensor-times-tensor product
171 *
172 * Implements C[i1,...,ir,j1,...,js] = sum( A[i1,...,ir+q] * B[j1,...,js+q] )
173 *
174 * @note calls ublas::ttt
175 *
176 * na[phia[x]] = nb[phib[x]] for 1 <= x <= q
177 *
178 * @param[in] phia one-based permutation tuple of length q for the first input tensor a
179 * @param[in] phib one-based permutation tuple of length q for the second input tensor b
180 * @param[in] a left-hand side tensor with order r+q
181 * @param[in] b right-hand side tensor with order s+q
182 * @result tensor with order r+s
183 */
184 template<class V, class F, class A1, class A2>
185 auto prod(tensor<V,F,A1> const& a, tensor<V,F,A2> const& b,
186 std::vector<std::size_t> const& phia, std::vector<std::size_t> const& phib)
187 {
188
189 using tensor_type = tensor<V,F,A1>;
190 using extents_type = typename tensor_type::extents_type;
191 using value_type = typename tensor_type::value_type;
192 using size_type = typename extents_type::value_type;
193
194 auto const pa = a.rank();
195 auto const pb = b.rank();
196
197 auto const q = size_type(phia.size());
198
199 if(pa == 0ul)
200 throw std::runtime_error("error in ublas::prod: order of left-hand side tensor must be greater than 0.");
201 if(pb == 0ul)
202 throw std::runtime_error("error in ublas::prod: order of right-hand side tensor must be greater than 0.");
203 if(pa < q)
204 throw std::runtime_error("error in ublas::prod: number of contraction dimensions cannot be greater than the order of the left-hand side tensor.");
205 if(pb < q)
206 throw std::runtime_error("error in ublas::prod: number of contraction dimensions cannot be greater than the order of the right-hand side tensor.");
207
208 if(q != phib.size())
209 throw std::runtime_error("error in ublas::prod: permutation tuples must have the same length.");
210
211 if(pa < phia.size())
212 throw std::runtime_error("error in ublas::prod: permutation tuple for the left-hand side tensor cannot be greater than the corresponding order.");
213 if(pb < phib.size())
214 throw std::runtime_error("error in ublas::prod: permutation tuple for the right-hand side tensor cannot be greater than the corresponding order.");
215
216
217 auto const& na = a.extents();
218 auto const& nb = b.extents();
219
220 for(auto i = 0ul; i < q; ++i)
221 if( na.at(phia.at(i)-1) != nb.at(phib.at(i)-1))
222 throw std::runtime_error("error in ublas::prod: permutations of the extents are not correct.");
223
224 auto const r = pa - q;
225 auto const s = pb - q;
226
227
228 std::vector<std::size_t> phia1(pa), phib1(pb);
229 std::iota(phia1.begin(), phia1.end(), 1ul);
230 std::iota(phib1.begin(), phib1.end(), 1ul);
231
232 std::vector<std::size_t> nc( std::max ( r+s , size_type(2) ), size_type(1) );
233
234 for(auto i = 0ul; i < phia.size(); ++i)
235 * std::remove(phia1.begin(), phia1.end(), phia.at(i)) = phia.at(i);
236
237 //phia1.erase( std::remove(phia1.begin(), phia1.end(), phia.at(i)), phia1.end() ) ;
238
239 assert(phia1.size() == pa);
240
241 for(auto i = 0ul; i < r; ++i)
242 nc[ i ] = na[ phia1[ i ] - 1 ];
243
244 for(auto i = 0ul; i < phib.size(); ++i)
245 * std::remove(phib1.begin(), phib1.end(), phib.at(i)) = phib.at(i) ;
246 //phib1.erase( std::remove(phib1.begin(), phib1.end(), phia.at(i)), phib1.end() ) ;
247
248 assert(phib1.size() == pb);
249
250 for(auto i = 0ul; i < s; ++i)
251 nc[ r + i ] = nb[ phib1[ i ] - 1 ];
252
253 // std::copy( phib.begin(), phib.end(), phib1.end() );
254
255 assert( phia1.size() == pa );
256 assert( phib1.size() == pb );
257
258 auto c = tensor_type(extents_type(nc), value_type{});
259
260 ttt(pa, pb, q,
261 phia1.data(), phib1.data(),
262 c.data(), c.extents().data(), c.strides().data(),
263 a.data(), a.extents().data(), a.strides().data(),
264 b.data(), b.extents().data(), b.strides().data());
265
266 return c;
267 }
268
269 //template<class V, class F, class A1, class A2, std::size_t N, std::size_t M>
270 //auto operator*( tensor_index<V,F,A1,N> const& lhs, tensor_index<V,F,A2,M> const& rhs)
271
272
273
274
275 /** @brief Computes the q-mode tensor-times-tensor product
276 *
277 * Implements C[i1,...,ir,j1,...,js] = sum( A[i1,...,ir+q] * B[j1,...,js+q] )
278 *
279 * @note calls ublas::ttt
280 *
281 * na[phi[x]] = nb[phi[x]] for 1 <= x <= q
282 *
283 * @param[in] phi one-based permutation tuple of length q for bot input tensors
284 * @param[in] a left-hand side tensor with order r+q
285 * @param[in] b right-hand side tensor with order s+q
286 * @result tensor with order r+s
287 */
288 template<class V, class F, class A1, class A2>
289 auto prod(tensor<V,F,A1> const& a, tensor<V,F,A2> const& b,
290 std::vector<std::size_t> const& phi)
291 {
292 return prod(a, b, phi, phi);
293 }
294
295
296 /** @brief Computes the inner product of two tensors
297 *
298 * Implements c = sum(A[i1,i2,...,ip] * B[i1,i2,...,jp])
299 *
300 * @note calls inner function
301 *
302 * @param[in] a tensor object A
303 * @param[in] b tensor object B
304 *
305 * @returns a value type.
306 */
307 template<class V, class F, class A1, class A2>
308 auto inner_prod(tensor<V,F,A1> const& a, tensor<V,F,A2> const& b)
309 {
310 using value_type = typename tensor<V,F,A1>::value_type;
311
312 if( a.rank() != b.rank() )
313 throw std::length_error("error in boost::numeric::ublas::inner_prod: Rank of both tensors must be the same.");
314
315 if( a.empty() || b.empty())
316 throw std::length_error("error in boost::numeric::ublas::inner_prod: Tensors should not be empty.");
317
318 if( a.extents() != b.extents())
319 throw std::length_error("error in boost::numeric::ublas::inner_prod: Tensor extents should be the same.");
320
321 return inner(a.rank(), a.extents().data(),
322 a.data(), a.strides().data(),
323 b.data(), b.strides().data(), value_type{0});
324 }
325
326 /** @brief Computes the outer product of two tensors
327 *
328 * Implements C[i1,...,ip,j1,...,jq] = A[i1,i2,...,ip] * B[j1,j2,...,jq]
329 *
330 * @note calls outer function
331 *
332 * @param[in] a tensor object A
333 * @param[in] b tensor object B
334 *
335 * @returns tensor object C with the same storage format F and allocator type A1
336 */
337 template<class V, class F, class A1, class A2>
338 auto outer_prod(tensor<V,F,A1> const& a, tensor<V,F,A2> const& b)
339 {
340 using tensor_type = tensor<V,F,A1>;
341 using extents_type = typename tensor_type::extents_type;
342
343 if( a.empty() || b.empty() )
344 throw std::runtime_error("error in boost::numeric::ublas::outer_prod: tensors should not be empty.");
345
346 auto nc = typename extents_type::base_type(a.rank() + b.rank());
347 for(auto i = 0u; i < a.rank(); ++i)
348 nc.at(i) = a.extents().at(i);
349
350 for(auto i = 0u; i < b.rank(); ++i)
351 nc.at(a.rank()+i) = b.extents().at(i);
352
353 auto c = tensor_type(extents_type(nc));
354
355 outer(c.data(), c.rank(), c.extents().data(), c.strides().data(),
356 a.data(), a.rank(), a.extents().data(), a.strides().data(),
357 b.data(), b.rank(), b.extents().data(), b.strides().data());
358
359 return c;
360 }
361
362
363
364 /** @brief Transposes a tensor according to a permutation tuple
365 *
366 * Implements C[tau[i1],tau[i2]...,tau[ip]] = A[i1,i2,...,ip]
367 *
368 * @note calls trans function
369 *
370 * @param[in] a tensor object of rank p
371 * @param[in] tau one-based permutation tuple of length p
372 * @returns a transposed tensor object with the same storage format F and allocator type A
373 */
374 template<class V, class F, class A>
375 auto trans(tensor<V,F,A> const& a, std::vector<std::size_t> const& tau)
376 {
377 using tensor_type = tensor<V,F,A>;
378 using extents_type = typename tensor_type::extents_type;
379 // using strides_type = typename tensor_type::strides_type;
380
381 if( a.empty() )
382 return tensor<V,F,A>{};
383
384 auto const p = a.rank();
385 auto const& na = a.extents();
386
387 auto nc = typename extents_type::base_type (p);
388 for(auto i = 0u; i < p; ++i)
389 nc.at(tau.at(i)-1) = na.at(i);
390
391 // auto wc = strides_type(extents_type(nc));
392
393 auto c = tensor_type(extents_type(nc));
394
395
396 trans( a.rank(), a.extents().data(), tau.data(),
397 c.data(), c.strides().data(),
398 a.data(), a.strides().data());
399
400 // auto wc_pi = typename strides_type::base_type (p);
401 // for(auto i = 0u; i < p; ++i)
402 // wc_pi.at(tau.at(i)-1) = c.strides().at(i);
403
404
405 //copy(a.rank(),
406 // a.extents().data(),
407 // c.data(), wc_pi.data(),
408 // a.data(), a.strides().data() );
409
410 return c;
411 }
412
413 /** @brief Computes the frobenius norm of a tensor expression
414 *
415 * @note evaluates the tensor expression and calls the accumulate function
416 *
417 *
418 * Implements the two-norm with
419 * k = sqrt( sum_(i1,...,ip) A(i1,...,ip)^2 )
420 *
421 * @param[in] a tensor object of rank p
422 * @returns the frobenius norm of the tensor
423 */
424 //template<class V, class F, class A>
425 //auto norm(tensor<V,F,A> const& a)
426 template<class T, class D>
427 auto norm(detail::tensor_expression<T,D> const& expr)
428 {
429
430 using tensor_type = typename detail::tensor_expression<T,D>::tensor_type;
431 using value_type = typename tensor_type::value_type;
432
433 auto a = tensor_type( expr );
434
435 if( a.empty() )
436 throw std::runtime_error("error in boost::numeric::ublas::norm: tensors should not be empty.");
437
438 return std::sqrt( accumulate( a.order(), a.extents().data(), a.data(), a.strides().data(), value_type{},
439 [](auto const& l, auto const& r){ return l + r*r; } ) ) ;
440 }
441
442
443
444 /** @brief Extract the real component of tensor elements within a tensor expression
445 *
446 * @param[in] lhs tensor expression
447 * @returns unary tensor expression
448 */
449 template<class T, class D>
450 auto real(detail::tensor_expression<T,D> const& expr) {
451 return detail::make_unary_tensor_expression<T> (expr(), [] (auto const& l) { return std::real( l ); } );
452 }
453
454 /** @brief Extract the real component of tensor elements within a tensor expression
455 *
456 * @param[in] lhs tensor expression
457 * @returns unary tensor expression
458 */
459 template<class V, class F, class A, class D>
460 auto real(detail::tensor_expression<tensor<std::complex<V>,F,A>,D> const& expr)
461 {
462 using tensor_complex_type = tensor<std::complex<V>,F,A>;
463 using tensor_type = tensor<V,F,typename storage_traits<A>::template rebind<V>>;
464
465 if( detail::retrieve_extents( expr ).empty() )
466 throw std::runtime_error("error in boost::numeric::ublas::real: tensors should not be empty.");
467
468 auto a = tensor_complex_type( expr );
469 auto c = tensor_type( a.extents() );
470
471 std::transform( a.begin(), a.end(), c.begin(), [](auto const& l){ return std::real(l) ; } );
472
473 return c;
474 }
475
476
477 /** @brief Extract the imaginary component of tensor elements within a tensor expression
478 *
479 * @param[in] lhs tensor expression
480 * @returns unary tensor expression
481 */
482 template<class T, class D>
483 auto imag(detail::tensor_expression<T,D> const& lhs) {
484 return detail::make_unary_tensor_expression<T> (lhs(), [] (auto const& l) { return std::imag( l ); } );
485 }
486
487
488 /** @brief Extract the imag component of tensor elements within a tensor expression
489 *
490 * @param[in] lhs tensor expression
491 * @returns unary tensor expression
492 */
493 template<class V, class A, class F, class D>
494 auto imag(detail::tensor_expression<tensor<std::complex<V>,F,A>,D> const& expr)
495 {
496 using tensor_complex_type = tensor<std::complex<V>,F,A>;
497 using tensor_type = tensor<V,F,typename storage_traits<A>::template rebind<V>>;
498
499 if( detail::retrieve_extents( expr ).empty() )
500 throw std::runtime_error("error in boost::numeric::ublas::real: tensors should not be empty.");
501
502 auto a = tensor_complex_type( expr );
503 auto c = tensor_type( a.extents() );
504
505 std::transform( a.begin(), a.end(), c.begin(), [](auto const& l){ return std::imag(l) ; } );
506
507 return c;
508 }
509
510 /** @brief Computes the complex conjugate component of tensor elements within a tensor expression
511 *
512 * @param[in] expr tensor expression
513 * @returns complex tensor
514 */
515 template<class T, class D>
516 auto conj(detail::tensor_expression<T,D> const& expr)
517 {
518 using tensor_type = T;
519 using value_type = typename tensor_type::value_type;
520 using layout_type = typename tensor_type::layout_type;
521 using array_type = typename tensor_type::array_type;
522
523 using new_value_type = std::complex<value_type>;
524 using new_array_type = typename storage_traits<array_type>::template rebind<new_value_type>;
525
526 using tensor_complex_type = tensor<new_value_type,layout_type, new_array_type>;
527
528 if( detail::retrieve_extents( expr ).empty() )
529 throw std::runtime_error("error in boost::numeric::ublas::conj: tensors should not be empty.");
530
531 auto a = tensor_type( expr );
532 auto c = tensor_complex_type( a.extents() );
533
534 std::transform( a.begin(), a.end(), c.begin(), [](auto const& l){ return std::conj(l) ; } );
535
536 return c;
537 }
538
539
540 /** @brief Computes the complex conjugate component of tensor elements within a tensor expression
541 *
542 * @param[in] lhs tensor expression
543 * @returns unary tensor expression
544 */
545 template<class V, class A, class F, class D>
546 auto conj(detail::tensor_expression<tensor<std::complex<V>,F,A>,D> const& expr)
547 {
548 return detail::make_unary_tensor_expression<tensor<std::complex<V>,F,A>> (expr(), [] (auto const& l) { return std::conj( l ); } );
549 }
550
551
552
553 }
554 }
555 }
556
557
558 #endif