CLAM-Development  1.4.0
FrameDescriptors.hxx
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2001-2004 MUSIC TECHNOLOGY GROUP (MTG)
3  * UNIVERSITAT POMPEU FABRA
4  *
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19  *
20  */
21 
22 #ifndef __FrameDescriptors__
23 #define __FrameDescriptors__
24 
25 #include "Descriptor.hxx"
26 
27 #include "AudioDescriptors.hxx"
28 #include "SpectralDescriptors.hxx"
31 
32 namespace CLAM{
33 
34 
35 class Frame;
36 
37 /*
38  * This class holds Descriptors computed from Frame data. Right now no specific
39  * descriptors are computed for frame (only for spectrums, audios... that are held
40  * inside the frame. TODO: add specific descriptors.
41  *
42  */
44 {
45 public:
48  DYN_ATTRIBUTE (0, public, SpectralDescriptors, SpectrumD);
50  DYN_ATTRIBUTE (1, public, SpectralPeakDescriptors, SpectralPeakD);
52  DYN_ATTRIBUTE (2, public, SpectralDescriptors, ResidualSpecD);
54  DYN_ATTRIBUTE (3, public, SpectralDescriptors, SinusoidalSpecD);
55  /*Audio chunk that has been used for generating spectral data, will usually be
56  a previously windowed audio chunk*/
58  DYN_ATTRIBUTE (4, public, AudioDescriptors, AudioFrameD);
60  DYN_ATTRIBUTE (5, public, AudioDescriptors, SinusoidalAudioFrameD);
62  DYN_ATTRIBUTE (6, public, AudioDescriptors, ResidualAudioFrameD);
64  DYN_ATTRIBUTE (7, public, AudioDescriptors, SynthAudioFrameD);
65 
66 
68  DYN_ATTRIBUTE (8, public, MorphologicalFrameDescriptors, MorphologicalFrameD);
69 
71  DYN_ATTRIBUTE (9, public, TTime, CenterTime);
72 
73  //Note: some specific frame descriptors should be added
74 public:
75  FrameDescriptors(Frame* pFrame);
76  FrameDescriptors(TData initVal);
77 
78  const Frame* GetpFrame() const;
79  void SetpFrame(const Frame* pFrame);
80  void Compute();
81  void ConcreteCompute();
82 
83  TData GetFundamental() {return mpFrame->GetFundamental().GetFreq(0);}
84 
85 private:
86  void DefaultInit();
87  void CopyInit(const FrameDescriptors & copied);
88 
89 private:
90  const Frame* mpFrame;
91 
92 };
93 
94 FrameDescriptors operator * (const FrameDescriptors& a,const FrameDescriptors& b);
95 FrameDescriptors operator + (const FrameDescriptors& a,const FrameDescriptors& b);
96 FrameDescriptors operator * (const FrameDescriptors& a,TData mult);
97 FrameDescriptors operator * (TData mult,const FrameDescriptors& a);
98 FrameDescriptors operator - (const FrameDescriptors& a,const FrameDescriptors& b);
99 FrameDescriptors operator / (const FrameDescriptors& a,TData div);
100 
101 template<>
103 {
104  FrameDescriptors tmpD(a);
105  if(a.HasSpectralPeakD() && b.HasSpectralPeakD())
106  {
107  tmpD.SetSpectralPeakD(CLAM_max(a.GetSpectralPeakD(),b.GetSpectralPeakD()));
108  }
109  if(a.HasSpectrumD() && b.HasSpectrumD())
110  {
111  tmpD.SetSpectrumD(CLAM_max(a.GetSpectrumD(),b.GetSpectrumD()));
112  }
113  if(a.HasResidualSpecD() && b.HasResidualSpecD())
114  {
115  tmpD.SetResidualSpecD(CLAM_max(a.GetResidualSpecD(),b.GetResidualSpecD()));
116  }
117  if(a.HasSinusoidalSpecD() && b.HasSinusoidalSpecD())
118  {
119  tmpD.SetSinusoidalSpecD(CLAM_max(a.GetSinusoidalSpecD(),b.GetSinusoidalSpecD()));
120  }
121  if(a.HasAudioFrameD() && b.HasAudioFrameD())
122  {
123  tmpD.SetAudioFrameD(CLAM_max(a.GetAudioFrameD(),b.GetAudioFrameD()));
124  }
125  if(a.HasSinusoidalAudioFrameD() && b.HasSinusoidalAudioFrameD())
126  {
127  tmpD.SetSinusoidalAudioFrameD(CLAM_max(a.GetSinusoidalAudioFrameD(),b.GetSinusoidalAudioFrameD()));
128  }
129  if(a.HasResidualAudioFrameD() && b.HasResidualAudioFrameD())
130  {
131  tmpD.SetResidualAudioFrameD(CLAM_max(a.GetResidualAudioFrameD(),b.GetResidualAudioFrameD()));
132  }
133  if(a.HasSynthAudioFrameD() && b.HasSynthAudioFrameD())
134  {
135  tmpD.SetSynthAudioFrameD(CLAM_max(a.GetSynthAudioFrameD(),b.GetSynthAudioFrameD()));
136  }
137  return tmpD;
138 }
139 
140 template<>
142 {
143  FrameDescriptors tmpD(a);
144  if(a.HasSpectralPeakD() && b.HasSpectralPeakD())
145  {
146  tmpD.SetSpectralPeakD(CLAM_min(a.GetSpectralPeakD(),b.GetSpectralPeakD()));
147  }
148  if(a.HasSpectrumD() && b.HasSpectrumD())
149  {
150  tmpD.SetSpectrumD(CLAM_min(a.GetSpectrumD(),b.GetSpectrumD()));
151  }
152  if(a.HasResidualSpecD() && b.HasResidualSpecD())
153  {
154  tmpD.SetResidualSpecD(CLAM_min(a.GetResidualSpecD(),b.GetResidualSpecD()));
155  }
156  if(a.HasSinusoidalSpecD() && b.HasSinusoidalSpecD())
157  {
158  tmpD.SetSinusoidalSpecD(CLAM_min(a.GetSinusoidalSpecD(),b.GetSinusoidalSpecD()));
159  }
160  if(a.HasAudioFrameD() && b.HasAudioFrameD())
161  {
162  tmpD.SetAudioFrameD(CLAM_min(a.GetAudioFrameD(),b.GetAudioFrameD()));
163  }
164  if(a.HasSinusoidalAudioFrameD() && b.HasSinusoidalAudioFrameD())
165  {
166  tmpD.SetSinusoidalAudioFrameD(CLAM_min(a.GetSinusoidalAudioFrameD(),b.GetSinusoidalAudioFrameD()));
167  }
168  if(a.HasResidualAudioFrameD() && b.HasResidualAudioFrameD())
169  {
170  tmpD.SetResidualAudioFrameD(CLAM_min(a.GetResidualAudioFrameD(),b.GetResidualAudioFrameD()));
171  }
172  if(a.HasSynthAudioFrameD() && b.HasSynthAudioFrameD())
173  {
174  tmpD.SetSynthAudioFrameD(CLAM_min(a.GetSynthAudioFrameD(),b.GetSynthAudioFrameD()));
175  }
176  return tmpD;
177 }
178 
179 
180 
181 
182 };//CLAM
183 
184 
185 
186 #endif
187