peroxide/structure/vector.rs
1//! Extra tools for `Vec<f64>`
2//!
3//! ## Print `Vec<f64>`
4//!
5//! * There are two ways to print vector
6//! * Original way: `print!("{:?}", a);`
7//! * Peroxide way: `a.print();` - **Round-off to fourth digit**
8//!
9//! ```rust
10//! extern crate peroxide;
11//! use peroxide::fuga::*;
12//!
13//! fn main() {
14//! let a = vec![2f64.sqrt()];
15//! a.print(); // [1.4142]
16//! }
17//! ```
18//!
19//! ## Syntactic sugar for `Vec<f64>`
20//!
21//! * There is useful macro for `Vec<f64>`
22//! * For `R`, there is `c`
23//!
24//! ```R
25//! # R
26//! a = c(1,2,3,4)
27//! ```
28//!
29//! * For `Peroxide`, there is `c!`
30//!
31//! ```rust
32//! // Rust
33//! #[macro_use]
34//! extern crate peroxide;
35//! use peroxide::fuga::*;
36//!
37//! fn main() {
38//! let a = c!(1,2,3,4);
39//! }
40//! ```
41//!
42//! ## From ranges to `Vec<f64>`
43//!
44//! * For `R`, there is `seq` to declare sequence.
45//!
46//! ```R
47//! # R
48//! a = seq(1, 4, 1)
49//! print(a)
50//! # [1] 1 2 3 4
51//! ```
52//!
53//! * For `peroxide`, there is `seq` to declare sequence.
54//!
55//! ```rust
56//! extern crate peroxide;
57//! use peroxide::fuga::*;
58//!
59//! fn main() {
60//! let a = seq(1, 4, 1);
61//! a.print();
62//! // [1, 2, 3, 4]
63//! }
64//! ```
65//!
66//! ## `Vec<f64>` Operation
67//!
68//! There are two ways to do vector operations.
69//!
70//! * Use functional programming tools
71//! * Use redox
72//!
73//! Here, I explain second method - for functional programming, see below.
74//!
75//! To use redox, you only need to understand two things - `ox()`, `red()`.
76//!
77//! * `ox()` : Makes vector to `Redox<T: Vector>`
78//! * `red()` : Makes `Redox<T: Vector>` to vector.
79//!
80//! ```
81//! #[macro_use]
82//! extern crate peroxide;
83//! use peroxide::fuga::*;
84//!
85//! fn main() {
86//! let a = c!(1, 2, 3);
87//! assert_eq!((a.ox() * 2f64 - 1f64).red(), c!(1f64, 3f64, 5f64));
88//! }
89//! ```
90//!
91//! ## Concatenation
92//!
93//! There are two concatenation operations.
94//!
95//! * `cat(T, Vec<T>) -> Vec<f64>`
96//! * `concat(Vec<T>, Vec<T>) -> Vec<T>`
97//!
98//! ```rust
99//! #[macro_use]
100//! extern crate peroxide;
101//! use peroxide::fuga::*;
102//!
103//! fn main() {
104//! let a = c!(1,2,3,4);
105//! cat(0f64, &a).print();
106//! // [0, 1, 2, 3, 4]
107//!
108//! let b = c!(5,6,7,8);
109//! concat(&a, &b).print();
110//! // [1, 2, 3, 4, 5, 6, 7, 8]
111//! }
112//! ```
113//!
114//! ## Conversion to Matrix
115//!
116//! There are two ways to convert `Vec<f64>` to `Matrix`.
117//!
118//! * `into(self) -> Matrix` : `Vec<f64>` to column matrix
119//!
120//! ```rust
121//! #[macro_use]
122//! extern crate peroxide;
123//! use peroxide::fuga::*;
124//!
125//! fn main() {
126//! let a = c!(1,2,3,4);
127//! let a_col: Matrix = a.into();
128//! let m_col = matrix(c!(1,2,3,4), 4, 1, Col); // (4,1) Matrix
129//! assert_eq!(a_col, m_col);
130//!
131//! let m_row = matrix(c!(1,2,3,4), 1, 4, Row); // (1,4) Matrix
132//! assert_eq!(a_col.t(), m_row);
133//! }
134//! ```
135//!
136//! # Functional Programming {#functional}
137//!
138//! ## FP for `Vec<f64>`
139//!
140//! * There are some functional programming tools for `Vec<f64>`
141//!
142//! ### fmap
143//!
144//! * `fmap` is syntactic sugar for `map`
145//! * But different to original `map` - Only `f64 -> f64` allowed.
146//!
147//! ```rust
148//! #[macro_use]
149//! extern crate peroxide;
150//! use peroxide::fuga::*;
151//!
152//! fn main() {
153//! let a = c!(1,2,3,4);
154//!
155//! // Original rust
156//! a.clone()
157//! .into_iter()
158//! .map(|x| x + 1f64)
159//! .collect::<Vec<f64>>()
160//! .print();
161//! // [2, 3, 4, 5]
162//!
163//! // fmap in Peroxide
164//! a.fmap(|x| x + 1f64).print();
165//! // [2, 3, 4, 5]
166//! }
167//! ```
168//!
169//! ### reduce
170//!
171//! * `reduce` is syntactic sugar for `fold`
172//!
173//! ```rust
174//! #[macro_use]
175//! extern crate peroxide;
176//! use peroxide::fuga::*;
177//!
178//! fn main() {
179//! let a = c!(1,2,3,4);
180//!
181//! // Original rust
182//! a.clone()
183//! .into_iter()
184//! .fold(0f64, |x, y| x + y)
185//! .print(); // 10
186//!
187//! // reduce in Peroxide
188//! a.reduce(0f64, |x, y| x + y).print(); // 10
189//! }
190//! ```
191//!
192//! ### zip_with
193//!
194//! * `zip_with` is composed of `zip` & `map`
195//!
196//! ```rust
197//! #[macro_use]
198//! extern crate peroxide;
199//! use peroxide::fuga::*;
200//!
201//! fn main() {
202//! let a = c!(1,2,3,4);
203//! let b = c!(5,6,7,8);
204//!
205//! // Original rust
206//! a.clone()
207//! .into_iter()
208//! .zip(&b)
209//! .map(|(x, y)| x + *y)
210//! .collect::<Vec<f64>>().print();
211//! // [6, 8, 10, 12]
212//!
213//! // zip_with in Peroxide
214//! zip_with(|x, y| x + y, &a, &b).print();
215//! // [6, 8, 10, 12]
216//! }
217//! ```
218//!
219//! ### filter
220//!
221//! * `filter` is just syntactic sugar for `filter`
222//!
223//! ```rust
224//! #[macro_use]
225//! extern crate peroxide;
226//! use peroxide::fuga::*;
227//!
228//! fn main() {
229//! let a = c!(1,2,3,4);
230//! a.filter(|x| x > 2f64).print();
231//! // [3, 4]
232//! }
233//! ```
234//!
235//! ### take & skip
236//!
237//! * `take` is syntactic sugar for `take`
238//!
239//! ```rust
240//! #[macro_use]
241//! extern crate peroxide;
242//! use peroxide::fuga::*;
243//!
244//! fn main() {
245//! let a = c!(1,2,3,4);
246//! a.take(2).print();
247//! // [1, 2]
248//! }
249//! ```
250//!
251//! * `skip` is syntactic sugar for `skip`
252//!
253//! ```rust
254//! #[macro_use]
255//! extern crate peroxide;
256//! use peroxide::fuga::*;
257//!
258//! fn main() {
259//! let a = c!(1,2,3,4);
260//! a.skip(2).print();
261//! // [3, 4]
262//! }
263//! ```
264
265#[cfg(feature = "O3")]
266extern crate blas;
267#[cfg(feature = "parallel")]
268use crate::rayon::iter::{
269 IndexedParallelIterator, IntoParallelIterator, IntoParallelRefIterator,
270 IntoParallelRefMutIterator, ParallelIterator,
271};
272#[cfg(feature = "O3")]
273use blas::{daxpy, ddot, dnrm2, idamax};
274
275use crate::structure::matrix::{matrix, Matrix, Row};
276use crate::traits::{
277 fp::FPVector,
278 general::Algorithm,
279 math::{InnerProduct, LinearOp, Norm, Normed, Vector, VectorProduct},
280 mutable::MutFP,
281 pointer::{Oxide, Redox, RedoxCommon},
282};
283#[cfg(feature = "parallel")]
284use crate::traits::{
285 fp::ParallelFPVector,
286 math::{ParallelInnerProduct, ParallelNormed, ParallelVectorProduct},
287 mutable::ParallelMutFP,
288};
289use std::cmp::min;
290
291impl FPVector for Vec<f64> {
292 type Scalar = f64;
293
294 /// fmap for `Vec<f64>`
295 ///
296 /// # Examples
297 /// ```
298 /// #[macro_use]
299 /// extern crate peroxide;
300 /// use peroxide::fuga::*;
301 ///
302 /// fn main() {
303 /// let a = c!(1,2,3,4,5);
304 /// assert_eq!(a.fmap(|x| x*2f64), seq!(2,10,2));
305 /// }
306 /// ```
307 fn fmap<F>(&self, f: F) -> Vec<f64>
308 where
309 F: Fn(f64) -> f64,
310 {
311 let mut v = self.clone();
312 v.iter_mut().for_each(|x| *x = f(*x));
313 v
314 }
315
316 /// reduce for `Vec<f64>`
317 ///
318 /// # Examples
319 /// ```
320 /// #[macro_use]
321 /// extern crate peroxide;
322 /// use peroxide::fuga::*;
323 ///
324 /// fn main() {
325 /// let a = seq!(1,100,1);
326 /// assert_eq!(a.reduce(0, |x,y| x + y), 5050f64);
327 /// }
328 /// ```
329 fn reduce<F, T>(&self, init: T, f: F) -> f64
330 where
331 F: Fn(f64, f64) -> f64,
332 T: Into<f64> + Copy,
333 {
334 self.iter().fold(init.into(), |x, &y| f(x, y))
335 }
336
337 fn zip_with<F>(&self, f: F, other: &Vec<f64>) -> Vec<f64>
338 where
339 F: Fn(f64, f64) -> f64,
340 {
341 self.iter()
342 .zip(other)
343 .map(|(x, y)| f(*x, *y))
344 .collect::<Vec<f64>>()
345 }
346
347 /// Filter for `Vec<f64>`
348 ///
349 /// # Examples
350 /// ```
351 /// #[macro_use]
352 /// extern crate peroxide;
353 /// use peroxide::fuga::*;
354 ///
355 /// fn main() {
356 /// let a = c!(1,2,3,4,5);
357 /// let b = a.filter(|x| x > 3.);
358 /// assert_eq!(b, c!(4,5));
359 /// }
360 /// ```
361 fn filter<F>(&self, f: F) -> Vec<f64>
362 where
363 F: Fn(f64) -> bool,
364 {
365 self.clone()
366 .into_iter()
367 .filter(|x| f(*x))
368 .collect::<Vec<f64>>()
369 }
370
371 /// Take for `Vec<f64>`
372 ///
373 /// # Examples
374 /// ```
375 /// #[macro_use]
376 /// extern crate peroxide;
377 /// use peroxide::fuga::*;
378 ///
379 /// fn main() {
380 /// let a = c!(1,2,3,4,5);
381 /// let b = a.take(3);
382 /// assert_eq!(b, c!(1,2,3));
383 /// }
384 /// ```
385 fn take(&self, n: usize) -> Vec<f64> {
386 let mut v = vec![0f64; n];
387 v[..n].copy_from_slice(&self[..n]);
388 v
389 }
390
391 /// Skip for `Vec<f64>`
392 ///
393 /// # Examples
394 /// ```
395 /// #[macro_use]
396 /// extern crate peroxide;
397 /// use peroxide::fuga::*;
398 ///
399 /// fn main() {
400 /// let a = c!(1,2,3,4,5);
401 /// let b = a.skip(3);
402 /// assert_eq!(b, c!(4,5));
403 /// }
404 /// ```
405 fn skip(&self, n: usize) -> Vec<f64> {
406 let l = self.len();
407 let mut v = vec![0f64; l - n];
408 for (i, j) in (n..l).enumerate() {
409 v[i] = self[j];
410 }
411 v
412 }
413
414 fn sum(&self) -> f64 {
415 self.iter().sum()
416 }
417
418 fn prod(&self) -> f64 {
419 self.iter().product()
420 }
421}
422
423#[cfg(feature = "parallel")]
424impl ParallelFPVector for Vec<f64> {
425 type Scalar = f64;
426
427 /// par_fmap for `Vec<f64>`
428 ///
429 /// # Examples
430 /// ```
431 /// #[macro_use]
432 /// extern crate peroxide;
433 /// use peroxide::fuga::*;
434 /// use peroxide::traits::fp::ParallelFPVector;
435 ///
436 /// fn main() {
437 /// let a = c!(1,2,3,4,5);
438 /// assert_eq!(a.par_fmap(|x| x*2f64), seq!(2,10,2));
439 /// }
440 /// ```
441 fn par_fmap<F>(&self, f: F) -> Vec<f64>
442 where
443 F: Fn(f64) -> f64 + Sync + Send,
444 {
445 let mut v = self.clone();
446 v.par_iter_mut().for_each(|x| *x = f(*x));
447 v
448 }
449
450 /// reduce for `Vec<f64>`
451 ///
452 /// # Examples
453 /// ```
454 /// #[macro_use]
455 /// extern crate peroxide;
456 /// use peroxide::fuga::*;
457 /// use peroxide::traits::fp::ParallelFPVector;
458 ///
459 /// fn main() {
460 /// let a = seq!(1,100,1);
461 /// assert_eq!(a.par_reduce(0, |x,y| x + y), 5050f64);
462 /// }
463 /// ```
464 fn par_reduce<F, T>(&self, _init: T, f: F) -> f64
465 where
466 F: Fn(f64, f64) -> f64 + Send + Sync,
467 T: Into<f64> + Send + Sync + Copy,
468 {
469 self.par_iter()
470 .cloned()
471 .reduce_with(|x, y| f(x, y))
472 .expect("Unable to perform parallel reduce operation")
473 }
474
475 fn par_zip_with<F>(&self, f: F, other: &Vec<f64>) -> Vec<f64>
476 where
477 F: Fn(f64, f64) -> f64 + Send + Sync,
478 {
479 self.par_iter()
480 .zip(other)
481 .map(|(x, y)| f(*x, *y))
482 .collect::<Vec<f64>>()
483 }
484
485 /// Filter for `Vec<f64>`
486 ///
487 /// # Examples
488 /// ```
489 /// #[macro_use]
490 /// extern crate peroxide;
491 /// use peroxide::fuga::*;
492 /// use peroxide::traits::fp::ParallelFPVector;
493 ///
494 /// fn main() {
495 /// let a = c!(1,2,3,4,5);
496 /// let b = a.par_filter(|x| x > 3.);
497 /// assert_eq!(b, c!(4,5));
498 /// }
499 /// ```
500 fn par_filter<F>(&self, f: F) -> Vec<f64>
501 where
502 F: Fn(f64) -> bool + Send + Sync,
503 {
504 self.clone()
505 .into_par_iter()
506 .filter(|x| f(*x))
507 .collect::<Vec<f64>>()
508 }
509}
510
511/// Explicit version of `map`
512pub fn map<F, T>(f: F, xs: &[T]) -> Vec<T>
513where
514 F: Fn(T) -> T,
515 T: Default + Copy,
516{
517 let l = xs.len();
518 let mut result = vec![T::default(); l];
519 for i in 0..l {
520 result[i] = f(xs[i]);
521 }
522 result
523}
524
525/// Explicit version of `map` in parallel
526#[cfg(feature = "parallel")]
527pub fn par_map<F, T>(f: F, xs: &[T]) -> Vec<T>
528where
529 F: Fn(T) -> T + Send + Sync,
530 T: Default + Copy + Send + Sync,
531{
532 let mut result = vec![T::default(); xs.len()];
533 result.par_iter_mut().for_each(|x| *x = f(*x));
534 result
535}
536
537/// Explicit version of `reduce`
538pub fn reduce<F, T>(f: F, init: T, xs: &[T]) -> T
539where
540 F: Fn(T, T) -> T,
541 T: Copy,
542{
543 let mut s = init;
544 for &x in xs {
545 s = f(s, x);
546 }
547 s
548}
549
550/// Explicit version of `zip_with`
551pub fn zip_with<F, T>(f: F, xs: &[T], ys: &[T]) -> Vec<T>
552where
553 F: Fn(T, T) -> T,
554 T: Default + Copy,
555{
556 let l = min(xs.len(), ys.len());
557 let mut result = vec![T::default(); l];
558 for i in 0..l {
559 result[i] = f(xs[i], ys[i]);
560 }
561 result
562}
563
564/// Explicit version of `zip_with` in parallel
565#[cfg(feature = "parallel")]
566pub fn par_zip_with<F, T>(f: F, xs: &[T], ys: &[T]) -> Vec<T>
567where
568 F: Fn(T, T) -> T + Send + Sync,
569 T: Default + Copy + Send + Sync,
570{
571 xs.par_iter()
572 .zip(ys.into_par_iter())
573 .map(|(x, y)| f(*x, *y))
574 .collect::<Vec<T>>()
575}
576
577impl MutFP for Vec<f64> {
578 type Scalar = f64;
579
580 fn mut_map<F>(&mut self, f: F)
581 where
582 F: Fn(Self::Scalar) -> Self::Scalar,
583 {
584 for x in self.iter_mut() {
585 *x = f(*x);
586 }
587 }
588
589 fn mut_zip_with<F>(&mut self, f: F, other: &Self)
590 where
591 F: Fn(Self::Scalar, Self::Scalar) -> Self::Scalar,
592 {
593 for i in 0..self.len() {
594 self[i] = f(self[i], other[i]);
595 }
596 }
597}
598
599#[cfg(feature = "parallel")]
600impl ParallelMutFP for Vec<f64> {
601 type Scalar = f64;
602
603 fn par_mut_map<F>(&mut self, f: F)
604 where
605 F: Fn(Self::Scalar) -> Self::Scalar + Send + Sync,
606 {
607 self.par_iter_mut().for_each(|x| *x = f(*x));
608 }
609
610 fn par_mut_zip_with<F>(&mut self, f: F, other: &Self)
611 where
612 F: Fn(Self::Scalar, Self::Scalar) -> Self::Scalar + Send + Sync,
613 {
614 self.par_iter_mut()
615 .zip(other.into_par_iter())
616 .for_each(|(x, y)| *x = f(*x, *y));
617 }
618}
619
620impl Algorithm for Vec<f64> {
621 type Scalar = f64;
622
623 /// Assign rank
624 ///
625 /// # Examples
626 /// ```
627 /// #[macro_use]
628 /// extern crate peroxide;
629 /// use peroxide::fuga::*;
630 ///
631 /// fn main() {
632 /// let v = c!(7, 5, 9, 2, 8);
633 /// assert_eq!(v.rank(), vec![2,3,0,4,1]);
634 /// }
635 /// ```
636 fn rank(&self) -> Vec<usize> {
637 let l = self.len();
638 let idx = (1..(l + 1)).collect::<Vec<usize>>();
639
640 let mut vec_tup = self
641 .clone()
642 .into_iter()
643 .zip(idx.clone())
644 .collect::<Vec<(f64, usize)>>();
645 vec_tup.sort_by(|x, y| x.0.partial_cmp(&y.0).unwrap().reverse());
646 let indices = vec_tup.into_iter().map(|(_, y)| y).collect::<Vec<usize>>();
647 idx.into_iter()
648 .map(|x| indices.clone().into_iter().position(|t| t == x).unwrap())
649 .collect::<Vec<usize>>()
650 }
651
652 /// Sign of Permutation
653 ///
654 /// # Examples
655 /// ```
656 /// #[macro_use]
657 /// extern crate peroxide;
658 /// use peroxide::fuga::*;
659 ///
660 /// fn main() {
661 /// let a = c!(1,0,2);
662 /// let b = c!(1,2,0);
663 /// let c = c!(0,1,2);
664 ///
665 /// assert_eq!((a.sign(), b.sign(), c.sign()), (-1f64, 1f64, 1f64));
666 /// }
667 /// ```
668 fn sign(&self) -> f64 {
669 let l = self.len();
670 let mut v = self.clone();
671 let mut sgn = 1f64;
672
673 for i in 0..(l - 1) {
674 for j in 0..(l - 1 - i) {
675 if v[j] > v[j + 1] {
676 sgn *= -1f64;
677 let (a, b) = (v[j], v[j + 1]);
678 v[j] = b;
679 v[j + 1] = a;
680 }
681 }
682 }
683 sgn
684 }
685
686 /// arg max
687 ///
688 /// # Examples
689 /// ```
690 /// #[macro_use]
691 /// extern crate peroxide;
692 /// use peroxide::fuga::*;
693 ///
694 /// fn main() {
695 /// let v = c!(1,3,2,4,3,7);
696 /// assert_eq!(v.arg_max(),5);
697 ///
698 /// let v2 = c!(1,3,2,5,6,6);
699 /// assert_eq!(v2.arg_max(),4);
700 /// }
701 /// ```
702 fn arg_max(&self) -> usize {
703 #[cfg(feature = "O3")]
704 {
705 let n_i32 = self.len() as i32;
706 let idx: usize;
707 unsafe {
708 idx = idamax(n_i32, self, 1) - 1;
709 }
710 idx
711 }
712 #[cfg(not(feature = "O3"))]
713 {
714 self.into_iter()
715 .enumerate()
716 .fold((0usize, f64::MIN), |acc, (ics, &val)| {
717 if acc.1 < val {
718 (ics, val)
719 } else {
720 acc
721 }
722 })
723 .0
724 }
725 }
726
727 /// arg min
728 ///
729 /// # Examples
730 /// ```
731 /// #[macro_use]
732 /// extern crate peroxide;
733 /// use peroxide::fuga::*;
734 ///
735 /// fn main() {
736 /// let v = c!(1,3,2,4,3,7);
737 /// assert_eq!(v.arg_min(),0);
738 ///
739 /// let v2 = c!(4,3,2,5,1,6);
740 /// assert_eq!(v2.arg_min(),4);
741 /// }
742 fn arg_min(&self) -> usize {
743 self.iter()
744 .enumerate()
745 .fold((0usize, f64::MAX), |acc, (ics, &val)| {
746 if acc.1 > val {
747 (ics, val)
748 } else {
749 acc
750 }
751 })
752 .0
753 }
754
755 fn max(&self) -> f64 {
756 #[cfg(feature = "O3")]
757 {
758 let n_i32 = self.len() as i32;
759 let idx: usize;
760 unsafe {
761 idx = idamax(n_i32, self, 1) - 1;
762 }
763 self[idx]
764 }
765 #[cfg(not(feature = "O3"))]
766 {
767 self.into_iter()
768 .fold(f64::MIN, |acc, &val| if acc < val { val } else { acc })
769 }
770 }
771
772 fn min(&self) -> f64 {
773 self.iter()
774 .fold(f64::MAX, |acc, &val| if acc > val { val } else { acc })
775 }
776
777 fn swap_with_perm(&mut self, p: &Vec<(usize, usize)>) {
778 for (i, j) in p.iter() {
779 self.swap(*i, *j);
780 }
781 }
782}
783
784impl Vector for Vec<f64> {
785 type Scalar = f64;
786
787 fn add_vec(&self, rhs: &Self) -> Self {
788 self.zip_with(|x, y| x + y, rhs)
789 }
790
791 fn sub_vec(&self, rhs: &Self) -> Self {
792 self.zip_with(|x, y| x - y, rhs)
793 }
794
795 fn mul_scalar(&self, rhs: Self::Scalar) -> Self {
796 let alpha: f64 = rhs;
797 self.fmap(|x| x * alpha)
798 }
799}
800
801impl Normed for Vec<f64> {
802 type UnsignedScalar = f64;
803 fn norm(&self, kind: Norm) -> f64 {
804 match kind {
805 Norm::L1 => self.iter().map(|x| x.abs()).sum(),
806 Norm::L2 => {
807 #[cfg(feature = "O3")]
808 {
809 let n_i32 = self.len() as i32;
810 let res: f64;
811 unsafe {
812 res = dnrm2(n_i32, self, 1);
813 }
814 res
815 }
816 #[cfg(not(feature = "O3"))]
817 {
818 self.iter().map(|x| x.powi(2)).sum::<f64>().sqrt()
819 }
820 }
821 Norm::Lp(p) => {
822 assert!(
823 p >= 1f64,
824 "lp norm is only defined for p>=1, the given value was p={}",
825 p
826 );
827 self.iter().map(|x| x.powf(p)).sum::<f64>().powf(1f64 / p)
828 }
829 Norm::LInf => self.iter().fold(0f64, |x, y| x.max(y.abs())),
830 Norm::F => unimplemented!(),
831 Norm::Lpq(_, _) => unimplemented!(),
832 }
833 }
834 fn normalize(&self, kind: Norm) -> Self
835 where
836 Self: Sized,
837 {
838 let denom = self.norm(kind);
839 self.fmap(|x| x / denom)
840 }
841}
842
843#[cfg(feature = "parallel")]
844impl ParallelNormed for Vec<f64> {
845 type UnsignedScalar = f64;
846
847 fn par_norm(&self, kind: Norm) -> f64 {
848 match kind {
849 Norm::L1 => self.iter().map(|x| x.abs()).sum(),
850 Norm::L2 => {
851 #[cfg(feature = "O3")]
852 {
853 let n_i32 = self.len() as i32;
854 let res: f64;
855 unsafe {
856 res = dnrm2(n_i32, self, 1);
857 }
858 res
859 }
860 #[cfg(not(feature = "O3"))]
861 {
862 self.par_iter()
863 .map(|x| x.powi(2))
864 .sum::<Self::UnsignedScalar>()
865 .sqrt()
866 }
867 }
868 Norm::Lp(p) => {
869 assert!(
870 p >= 1f64,
871 "lp norm is only defined for p>=1, the given value was p={}",
872 p
873 );
874 self.par_iter()
875 .map(|x| x.powf(p))
876 .sum::<Self::UnsignedScalar>()
877 .powf(1_f64 / p)
878 }
879 Norm::LInf => self
880 .par_iter()
881 .fold(|| 0f64, |x, y| x.max(y.abs()))
882 .reduce(|| 0f64, |acc1, acc2| acc1.max(acc2.abs())),
883 Norm::F => unimplemented!(),
884 Norm::Lpq(_, _) => unimplemented!(),
885 }
886 }
887}
888
889impl InnerProduct for Vec<f64> {
890 fn dot(&self, rhs: &Self) -> f64 {
891 #[cfg(feature = "O3")]
892 {
893 let n_i32 = self.len() as i32;
894 let res: f64;
895 unsafe {
896 res = ddot(n_i32, self, 1, rhs, 1);
897 }
898 res
899 }
900 #[cfg(not(feature = "O3"))]
901 {
902 self.iter()
903 .zip(rhs.iter())
904 .fold(0f64, |x, (y1, y2)| x + y1 * y2)
905 }
906 }
907}
908
909#[cfg(feature = "parallel")]
910impl ParallelInnerProduct for Vec<f64> {
911 fn par_dot(&self, rhs: &Self) -> f64 {
912 #[cfg(feature = "O3")]
913 {
914 let n_i32 = self.len() as i32;
915 let res: f64;
916 unsafe {
917 res = ddot(n_i32, self, 1, rhs, 1);
918 }
919 res
920 }
921 #[cfg(not(feature = "O3"))]
922 {
923 self.par_iter()
924 .zip(rhs.into_par_iter())
925 .fold(|| 0_f64, |x, (y1, y2)| x + y1 * y2)
926 .sum::<f64>()
927 }
928 }
929}
930
931impl LinearOp<Vec<f64>, f64> for Vec<f64> {
932 fn apply(&self, rhs: &Vec<f64>) -> f64 {
933 self.dot(rhs)
934 }
935}
936
937impl Oxide for Vec<f64> {
938 fn ox(self) -> Redox<Vec<f64>> {
939 Redox::<Vec<f64>>::from_vec(self)
940 }
941}
942
943impl VectorProduct for Vec<f64> {
944 fn cross(&self, other: &Self) -> Self {
945 assert_eq!(self.len(), other.len());
946 // 2D cross product is ill-defined
947 match self.len() {
948 2 => {
949 let mut v = vec![0f64; 1];
950 v[0] = self[0] * other[1] - self[1] * other[0];
951 v
952 }
953 3 => {
954 let mut v = vec![0f64; 3];
955 v[0] = self[1] * other[2] - self[2] * other[1];
956 v[1] = self[2] * other[0] - self[0] * other[2];
957 v[2] = self[0] * other[1] - self[1] * other[0];
958 v
959 }
960 _ => {
961 panic!("Cross product can be defined only in 2 or 3 dimension")
962 }
963 }
964 }
965
966 fn outer(&self, other: &Self) -> Matrix {
967 let m: Matrix = self.into();
968 let n = matrix(other.to_owned(), 1, other.len(), Row);
969 m * n
970 }
971}
972
973#[cfg(feature = "parallel")]
974impl ParallelVectorProduct for Vec<f64> {
975 fn par_cross(&self, other: &Self) -> Self {
976 assert_eq!(self.len(), other.len());
977 // 2D cross product is ill-defined
978 match self.len() {
979 2 => {
980 let mut v = vec![0f64; 1];
981 v[0] = self[0] * other[1] - self[1] * other[0];
982 v
983 }
984 3 => {
985 let v = (0..3)
986 .into_par_iter()
987 .map(|index| {
988 self[(index + 1) % 3] * other[(index + 2) % 3]
989 - self[(index + 2) % 3] * other[(index + 1) % 3]
990 })
991 .collect::<Vec<f64>>();
992 v
993 }
994 _ => {
995 panic!("Cross product can be defined only in 2 or 3 dimension")
996 }
997 }
998 }
999}
1000
1001// /// Convenient Vec<f64> Operations (No Clone, No Copy)
1002// impl VecOps for Vec<f64> {
1003// fn s_add(&self, scala: f64) -> Self {
1004// match () {
1005// #[cfg(feature = "O3")]
1006// () => {
1007// match self.len() {
1008// n if n % 8 == 0 => {
1009// let mut z = vec![0f64; n];
1010// self.chunks_exact(8)
1011// .map(f64x8::from_slice_unaligned)
1012// .map(|x| x + scala)
1013// .for_each(|x| x.write_to_slice_unaligned(&mut z));
1014// z
1015// }
1016// n if n % 4 == 0 => {
1017// let mut z = vec![0f64; n];
1018// self.chunks_exact(4)
1019// .map(f64x4::from_slice_unaligned)
1020// .map(|x| x + scala)
1021// .for_each(|x| x.write_to_slice_unaligned(&mut z));
1022// z
1023// }
1024// _ => self.fmap(|x| x + scala)
1025// }
1026// }
1027// _ => self.fmap(|x| x + scala),
1028// }
1029// }
1030//
1031// fn s_sub(&self, scala: f64) -> Self {
1032// match () {
1033// #[cfg(feature = "O3")]
1034// () => {
1035// match self.len() {
1036// n if n % 8 == 0 => {
1037// let mut z = vec![0f64; n];
1038// self.chunks_exact(8)
1039// .map(f64x8::from_slice_unaligned)
1040// .map(|x| x - scala)
1041// .for_each(|x| x.write_to_slice_unaligned(&mut z));
1042// z
1043// }
1044// n if n % 4 == 0 => {
1045// let mut z = vec![0f64; n];
1046// self.chunks_exact(4)
1047// .map(f64x4::from_slice_unaligned)
1048// .map(|x| x - scala)
1049// .for_each(|x| x.write_to_slice_unaligned(&mut z));
1050// z
1051// }
1052// _ => self.fmap(|x| x - scala)
1053// }
1054// }
1055// _ => self.fmap(|x| x - scala),
1056// }
1057// }
1058//
1059// fn s_mul(&self, scala: f64) -> Self {
1060// match () {
1061// #[cfg(feature = "O3")]
1062// () => {
1063// match self.len() {
1064// n if n % 8 == 0 => {
1065// let mut z = vec![0f64; n];
1066// self.chunks_exact(8)
1067// .map(f64x8::from_slice_unaligned)
1068// .map(|x| x * scala)
1069// .for_each(|x| x.write_to_slice_unaligned(&mut z));
1070// z
1071// }
1072// n if n % 4 == 0 => {
1073// let mut z = vec![0f64; n];
1074// self.chunks_exact(4)
1075// .map(f64x4::from_slice_unaligned)
1076// .map(|x| x * scala)
1077// .for_each(|x| x.write_to_slice_unaligned(&mut z));
1078// z
1079// }
1080// _ => self.fmap(|x| x * scala)
1081// }
1082// }
1083// _ => self.fmap(|x| scala * x),
1084// }
1085// }
1086//
1087// fn s_div(&self, scala: f64) -> Self {
1088// match () {
1089// #[cfg(feature = "O3")]
1090// () => {
1091// match self.len() {
1092// n if n % 8 == 0 => {
1093// let mut z = vec![0f64; n];
1094// self.chunks_exact(8)
1095// .map(f64x8::from_slice_unaligned)
1096// .map(|x| x / scala)
1097// .for_each(|x| x.write_to_slice_unaligned(&mut z));
1098// z
1099// }
1100// n if n % 4 == 0 => {
1101// let mut z = vec![0f64; n];
1102// self.chunks_exact(4)
1103// .map(f64x4::from_slice_unaligned)
1104// .map(|x| x / scala)
1105// .for_each(|x| x.write_to_slice_unaligned(&mut z));
1106// z
1107// }
1108// _ => self.fmap(|x| x / scala)
1109// }
1110// }
1111// _ => self.fmap(|x| x / scala),
1112// }
1113// }
1114//
1115// /// Dot product
1116// fn dot(&self, other: &Self) -> f64 {
1117// match () {
1118// #[cfg(feature = "O3")]
1119// () => {
1120// let n_i32 = self.len() as i32;
1121// let res: f64;
1122// unsafe {
1123// res = ddot(n_i32, self, 1, other, 1);
1124// }
1125// res
1126// }
1127// _ => zip_with(|x, y| x * y, &self, other).reduce(0, |x, y| x + y),
1128// }
1129// }
1130//
1131// fn sum(&self) -> Self::Scalar {
1132// match () {
1133// #[cfg(feature = "O3")]
1134// () => {
1135// let n_i32 = self.len() as i32;
1136// let res: f64;
1137// unsafe {
1138// res = dasum(n_i32, self, 1);
1139// }
1140// res
1141// }
1142// _ => reduce(|x, y| x + y, 0f64, self)
1143// }
1144// }
1145
1146#[cfg(feature = "O3")]
1147pub fn blas_daxpy(a: f64, x: &[f64], y: &mut [f64]) {
1148 assert_eq!(x.len(), y.len());
1149 unsafe {
1150 let n = x.len() as i32;
1151 daxpy(n, a, x, 1, y, 1)
1152 }
1153}
1154
1155#[cfg(feature = "O3")]
1156pub fn blas_daxpy_return(a: f64, x: &[f64], y: &[f64]) -> Vec<f64> {
1157 assert_eq!(x.len(), y.len());
1158 let mut result = y.to_vec();
1159 let n = x.len() as i32;
1160 unsafe {
1161 daxpy(n, a, x, 1, &mut result, 1);
1162 }
1163 result
1164}