summaryrefslogtreecommitdiffstats
path: root/chromium/third_party/WebKit/Source/modules/webaudio
diff options
context:
space:
mode:
authorJocelyn Turcotte <jocelyn.turcotte@digia.com>2014-08-08 14:30:41 +0200
committerJocelyn Turcotte <jocelyn.turcotte@digia.com>2014-08-12 13:49:54 +0200
commitab0a50979b9eb4dfa3320eff7e187e41efedf7a9 (patch)
tree498dfb8a97ff3361a9f7486863a52bb4e26bb898 /chromium/third_party/WebKit/Source/modules/webaudio
parent4ce69f7403811819800e7c5ae1318b2647e778d1 (diff)
Update Chromium to beta version 37.0.2062.68
Change-Id: I188e3b5aff1bec75566014291b654eb19f5bc8ca Reviewed-by: Andras Becsi <andras.becsi@digia.com>
Diffstat (limited to 'chromium/third_party/WebKit/Source/modules/webaudio')
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/AnalyserNode.cpp45
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/AnalyserNode.h23
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/AnalyserNode.idl7
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/AsyncAudioDecoder.cpp18
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/AsyncAudioDecoder.h5
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/AudioBasicInspectorNode.h8
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/AudioBasicProcessorNode.cpp6
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/AudioBasicProcessorNode.h15
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/AudioBuffer.cpp99
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/AudioBuffer.h24
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/AudioBuffer.idl5
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/AudioBufferSourceNode.cpp75
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/AudioBufferSourceNode.h28
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/AudioBufferSourceNode.idl24
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/AudioContext.cpp333
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/AudioContext.h111
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/AudioContext.idl15
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/AudioDestinationNode.cpp2
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/AudioDestinationNode.h18
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/AudioListener.cpp96
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/AudioListener.h45
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/AudioListener.idl1
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/AudioNode.cpp41
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/AudioNode.h41
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/AudioNode.idl35
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/AudioNodeInput.h2
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/AudioNodeOutput.cpp5
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/AudioNodeOutput.h7
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/AudioParam.cpp4
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/AudioParam.h13
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/AudioParam.idl5
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/AudioParamTimeline.cpp20
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/AudioParamTimeline.h2
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/AudioProcessingEvent.cpp20
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/AudioProcessingEvent.h20
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/AudioProcessingEvent.idl1
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/AudioScheduledSourceNode.cpp29
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/AudioScheduledSourceNode.h7
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/AudioSourceNode.h4
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/AudioSummingJunction.h8
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/BiquadDSPKernel.cpp134
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/BiquadDSPKernel.h19
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/BiquadFilterNode.cpp2
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/BiquadFilterNode.h8
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/BiquadFilterNode.idl23
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/BiquadProcessor.cpp8
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/BiquadProcessor.h14
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/ChannelMergerNode.cpp31
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/ChannelMergerNode.h9
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/ChannelSplitterNode.cpp10
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/ChannelSplitterNode.h7
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/ConvolverNode.cpp25
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/ConvolverNode.h20
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/ConvolverNode.idl2
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/DOMWindowWebAudio.h7
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/DefaultAudioDestinationNode.cpp25
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/DefaultAudioDestinationNode.h7
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/DelayDSPKernel.cpp5
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/DelayDSPKernel.h2
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/DelayNode.cpp3
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/DelayNode.h6
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/DelayProcessor.h6
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/DynamicsCompressorNode.cpp16
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/DynamicsCompressorNode.h27
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/GainNode.cpp12
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/GainNode.h15
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/MediaElementAudioSourceNode.cpp18
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/MediaElementAudioSourceNode.h20
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/MediaStreamAudioDestinationNode.cpp19
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/MediaStreamAudioDestinationNode.h9
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/MediaStreamAudioSourceNode.cpp12
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/MediaStreamAudioSourceNode.h22
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/OWNERS1
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/OfflineAudioCompletionEvent.cpp18
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/OfflineAudioCompletionEvent.h16
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/OfflineAudioContext.cpp34
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/OfflineAudioContext.h4
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/OfflineAudioContext.idl1
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/OfflineAudioDestinationNode.cpp20
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/OfflineAudioDestinationNode.h13
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/OscillatorNode.cpp27
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/OscillatorNode.h18
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/OscillatorNode.idl25
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/PannerNode.cpp332
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/PannerNode.h113
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/PannerNode.idl25
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/PeriodicWave.cpp22
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/PeriodicWave.h17
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/PeriodicWave.idl1
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/RealtimeAnalyser.cpp41
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/RealtimeAnalyser.h17
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/ScriptProcessorNode.cpp52
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/ScriptProcessorNode.h20
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/WaveShaperDSPKernel.cpp42
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/WaveShaperDSPKernel.h6
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/WaveShaperNode.cpp7
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/WaveShaperNode.h8
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/WaveShaperNode.idl8
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/WaveShaperProcessor.h9
-rw-r--r--chromium/third_party/WebKit/Source/modules/webaudio/WindowWebAudio.idl12
100 files changed, 1486 insertions, 1233 deletions
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/AnalyserNode.cpp b/chromium/third_party/WebKit/Source/modules/webaudio/AnalyserNode.cpp
index 83e6dba02ef..53cdd441f57 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/AnalyserNode.cpp
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/AnalyserNode.cpp
@@ -69,70 +69,47 @@ void AnalyserNode::process(size_t framesToProcess)
outputBus->copyFrom(*inputBus);
}
-void AnalyserNode::reset()
-{
- m_analyser.reset();
-}
-
void AnalyserNode::setFftSize(unsigned size, ExceptionState& exceptionState)
{
if (!m_analyser.setFftSize(size)) {
exceptionState.throwDOMException(
IndexSizeError,
- ExceptionMessages::failedToSet(
- "fftSize",
- "AnalyserNode",
- "FFT size (" + String::number(size)
- + ") must be a power of two between "
- + String::number(RealtimeAnalyser::MinFFTSize) + " and "
- + String::number(RealtimeAnalyser::MaxFFTSize) + ", inclusive"));
+ (size < RealtimeAnalyser::MinFFTSize || size > RealtimeAnalyser::MaxFFTSize) ?
+ ExceptionMessages::indexOutsideRange("FFT size", size, RealtimeAnalyser::MinFFTSize, ExceptionMessages::InclusiveBound, RealtimeAnalyser::MaxFFTSize, ExceptionMessages::InclusiveBound)
+ : ("The value provided (" + String::number(size) + ") is not a power of two."));
}
}
-void AnalyserNode::setMinDecibels(float k, ExceptionState& exceptionState)
+void AnalyserNode::setMinDecibels(double k, ExceptionState& exceptionState)
{
- if (k <= maxDecibels()) {
+ if (k < maxDecibels()) {
m_analyser.setMinDecibels(k);
} else {
exceptionState.throwDOMException(
IndexSizeError,
- ExceptionMessages::failedToSet(
- "minDecibels",
- "AnalyserNode",
- "minDecibels (" + String::number(k)
- + ") must be less than or equal maxDecibels (" + String::number(maxDecibels())
- + ")."));
+ ExceptionMessages::indexExceedsMaximumBound("minDecibels", k, maxDecibels()));
}
}
-void AnalyserNode::setMaxDecibels(float k, ExceptionState& exceptionState)
+void AnalyserNode::setMaxDecibels(double k, ExceptionState& exceptionState)
{
- if (k >= minDecibels()) {
+ if (k > minDecibels()) {
m_analyser.setMaxDecibels(k);
} else {
exceptionState.throwDOMException(
IndexSizeError,
- ExceptionMessages::failedToSet(
- "maxDecibels",
- "AnalyserNode",
- "maxDecibels (" + String::number(k)
- + ") must be greater than or equal minDecibels (" + String::number(minDecibels())
- + ")."));
+ ExceptionMessages::indexExceedsMinimumBound("maxDecibels", k, minDecibels()));
}
}
-void AnalyserNode::setSmoothingTimeConstant(float k, ExceptionState& exceptionState)
+void AnalyserNode::setSmoothingTimeConstant(double k, ExceptionState& exceptionState)
{
if (k >= 0 && k <= 1) {
m_analyser.setSmoothingTimeConstant(k);
} else {
exceptionState.throwDOMException(
IndexSizeError,
- ExceptionMessages::failedToSet(
- "smoothingTimeConstant",
- "AnalyserNode",
- "smoothing value (" + String::number(k)
- + ") must be between 0 and 1, inclusive."));
+ ExceptionMessages::indexOutsideRange("smoothing value", k, 0.0, ExceptionMessages::InclusiveBound, 1.0, ExceptionMessages::InclusiveBound));
}
}
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/AnalyserNode.h b/chromium/third_party/WebKit/Source/modules/webaudio/AnalyserNode.h
index db2dacb7c83..fba97158f67 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/AnalyserNode.h
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/AnalyserNode.h
@@ -33,18 +33,17 @@ namespace WebCore {
class ExceptionState;
-class AnalyserNode : public AudioBasicInspectorNode {
+class AnalyserNode FINAL : public AudioBasicInspectorNode {
public:
- static PassRefPtr<AnalyserNode> create(AudioContext* context, float sampleRate)
+ static PassRefPtrWillBeRawPtr<AnalyserNode> create(AudioContext* context, float sampleRate)
{
- return adoptRef(new AnalyserNode(context, sampleRate));
+ return adoptRefWillBeNoop(new AnalyserNode(context, sampleRate));
}
virtual ~AnalyserNode();
// AudioNode
- virtual void process(size_t framesToProcess);
- virtual void reset();
+ virtual void process(size_t framesToProcess) OVERRIDE;
// Javascript bindings
unsigned fftSize() const { return m_analyser.fftSize(); }
@@ -52,19 +51,19 @@ public:
unsigned frequencyBinCount() const { return m_analyser.frequencyBinCount(); }
- void setMinDecibels(float k, ExceptionState&);
- float minDecibels() const { return m_analyser.minDecibels(); }
+ void setMinDecibels(double k, ExceptionState&);
+ double minDecibels() const { return m_analyser.minDecibels(); }
- void setMaxDecibels(float k, ExceptionState&);
- float maxDecibels() const { return m_analyser.maxDecibels(); }
+ void setMaxDecibels(double k, ExceptionState&);
+ double maxDecibels() const { return m_analyser.maxDecibels(); }
- void setSmoothingTimeConstant(float k, ExceptionState&);
- float smoothingTimeConstant() const { return m_analyser.smoothingTimeConstant(); }
+ void setSmoothingTimeConstant(double k, ExceptionState&);
+ double smoothingTimeConstant() const { return m_analyser.smoothingTimeConstant(); }
void getFloatFrequencyData(Float32Array* array) { m_analyser.getFloatFrequencyData(array); }
void getByteFrequencyData(Uint8Array* array) { m_analyser.getByteFrequencyData(array); }
+ void getFloatTimeDomainData(Float32Array* array) { m_analyser.getFloatTimeDomainData(array); }
void getByteTimeDomainData(Uint8Array* array) { m_analyser.getByteTimeDomainData(array); }
-
private:
virtual double tailTime() const OVERRIDE { return 0; }
virtual double latencyTime() const OVERRIDE { return 0; }
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/AnalyserNode.idl b/chromium/third_party/WebKit/Source/modules/webaudio/AnalyserNode.idl
index f3add662997..4de5f299edc 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/AnalyserNode.idl
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/AnalyserNode.idl
@@ -29,11 +29,11 @@
readonly attribute unsigned long frequencyBinCount;
// minDecibels / maxDecibels represent the range to scale the FFT analysis data for conversion to unsigned byte values.
- [RaisesException=Setter] attribute float minDecibels;
- [RaisesException=Setter] attribute float maxDecibels;
+ [RaisesException=Setter] attribute double minDecibels;
+ [RaisesException=Setter] attribute double maxDecibels;
// A value from 0.0 -> 1.0 where 0.0 represents no time averaging with the last analysis frame.
- [RaisesException=Setter] attribute float smoothingTimeConstant;
+ [RaisesException=Setter] attribute double smoothingTimeConstant;
// Copies the current frequency data into the passed array.
// If the array has fewer elements than the frequencyBinCount, the excess elements will be dropped.
@@ -41,5 +41,6 @@
void getByteFrequencyData(Uint8Array array);
// Real-time waveform data
+ void getFloatTimeDomainData(Float32Array array);
void getByteTimeDomainData(Uint8Array array);
};
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/AsyncAudioDecoder.cpp b/chromium/third_party/WebKit/Source/modules/webaudio/AsyncAudioDecoder.cpp
index f1ea34f4c71..db7cedaaf62 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/AsyncAudioDecoder.cpp
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/AsyncAudioDecoder.cpp
@@ -31,6 +31,8 @@
#include "modules/webaudio/AudioBuffer.h"
#include "modules/webaudio/AudioBufferCallback.h"
#include "platform/Task.h"
+#include "platform/audio/AudioBus.h"
+#include "platform/audio/AudioFileReader.h"
#include "public/platform/Platform.h"
#include "wtf/ArrayBuffer.h"
#include "wtf/MainThread.h"
@@ -63,26 +65,26 @@ void AsyncAudioDecoder::decodeAsync(ArrayBuffer* audioData, float sampleRate, Pa
void AsyncAudioDecoder::decode(ArrayBuffer* audioData, float sampleRate, AudioBufferCallback* successCallback, AudioBufferCallback* errorCallback)
{
- // Do the actual decoding and invoke the callback.
- RefPtr<AudioBuffer> audioBuffer = AudioBuffer::createFromAudioFileData(audioData->data(), audioData->byteLength(), false, sampleRate);
+ RefPtr<AudioBus> bus = createBusFromInMemoryAudioFile(audioData->data(), audioData->byteLength(), false, sampleRate);
// Decoding is finished, but we need to do the callbacks on the main thread.
// The leaked reference to audioBuffer is picked up in notifyComplete.
- callOnMainThread(WTF::bind(&AsyncAudioDecoder::notifyComplete, audioData, successCallback, errorCallback, audioBuffer.release().leakRef()));
+ callOnMainThread(WTF::bind(&AsyncAudioDecoder::notifyComplete, audioData, successCallback, errorCallback, bus.release().leakRef()));
}
-void AsyncAudioDecoder::notifyComplete(ArrayBuffer* audioData, AudioBufferCallback* successCallback, AudioBufferCallback* errorCallback, AudioBuffer* audioBuffer)
+void AsyncAudioDecoder::notifyComplete(ArrayBuffer* audioData, AudioBufferCallback* successCallback, AudioBufferCallback* errorCallback, AudioBus* audioBus)
{
// Adopt references, so everything gets correctly dereffed.
RefPtr<ArrayBuffer> audioDataRef = adoptRef(audioData);
OwnPtr<AudioBufferCallback> successCallbackPtr = adoptPtr(successCallback);
OwnPtr<AudioBufferCallback> errorCallbackPtr = adoptPtr(errorCallback);
- RefPtr<AudioBuffer> audioBufferRef = adoptRef(audioBuffer);
+ RefPtr<AudioBus> audioBusRef = adoptRef(audioBus);
- if (audioBuffer && successCallback)
- successCallback->handleEvent(audioBuffer);
+ RefPtrWillBeRawPtr<AudioBuffer> audioBuffer = AudioBuffer::createFromAudioBus(audioBus);
+ if (audioBuffer.get() && successCallback)
+ successCallback->handleEvent(audioBuffer.get());
else if (errorCallback)
- errorCallback->handleEvent(audioBuffer);
+ errorCallback->handleEvent(audioBuffer.get());
}
} // namespace WebCore
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/AsyncAudioDecoder.h b/chromium/third_party/WebKit/Source/modules/webaudio/AsyncAudioDecoder.h
index a615787bd72..7f774564635 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/AsyncAudioDecoder.h
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/AsyncAudioDecoder.h
@@ -25,6 +25,7 @@
#ifndef AsyncAudioDecoder_h
#define AsyncAudioDecoder_h
+#include "platform/heap/Handle.h"
#include "public/platform/WebThread.h"
#include "wtf/Forward.h"
#include "wtf/OwnPtr.h"
@@ -33,6 +34,7 @@ namespace WebCore {
class AudioBuffer;
class AudioBufferCallback;
+class AudioBus;
// AsyncAudioDecoder asynchronously decodes audio file data from an ArrayBuffer in a worker thread.
// Upon successful decoding, a completion callback will be invoked with the decoded PCM data in an AudioBuffer.
@@ -47,8 +49,9 @@ public:
void decodeAsync(ArrayBuffer* audioData, float sampleRate, PassOwnPtr<AudioBufferCallback> successCallback, PassOwnPtr<AudioBufferCallback> errorCallback);
private:
+ PassRefPtrWillBeRawPtr<AudioBuffer> createAudioBufferFromAudioBus(AudioBus*);
static void decode(ArrayBuffer* audioData, float sampleRate, AudioBufferCallback* successCallback, AudioBufferCallback* errorCallback);
- static void notifyComplete(ArrayBuffer* audioData, AudioBufferCallback* successCallback, AudioBufferCallback* errorCallback, AudioBuffer*);
+ static void notifyComplete(ArrayBuffer* audioData, AudioBufferCallback* successCallback, AudioBufferCallback* errorCallback, AudioBus*);
OwnPtr<blink::WebThread> m_thread;
};
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/AudioBasicInspectorNode.h b/chromium/third_party/WebKit/Source/modules/webaudio/AudioBasicInspectorNode.h
index 44d03cb103f..1e7cf824eb7 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/AudioBasicInspectorNode.h
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/AudioBasicInspectorNode.h
@@ -39,10 +39,10 @@ public:
AudioBasicInspectorNode(AudioContext*, float sampleRate, unsigned outputChannelCount);
// AudioNode
- virtual void pullInputs(size_t framesToProcess);
- virtual void connect(AudioNode*, unsigned outputIndex, unsigned inputIndex, ExceptionState&);
- virtual void disconnect(unsigned outputIndex, ExceptionState&);
- virtual void checkNumberOfChannelsForInput(AudioNodeInput*);
+ virtual void pullInputs(size_t framesToProcess) OVERRIDE FINAL;
+ virtual void connect(AudioNode*, unsigned outputIndex, unsigned inputIndex, ExceptionState&) OVERRIDE FINAL;
+ virtual void disconnect(unsigned outputIndex, ExceptionState&) OVERRIDE FINAL;
+ virtual void checkNumberOfChannelsForInput(AudioNodeInput*) OVERRIDE FINAL;
private:
void updatePullStatus();
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/AudioBasicProcessorNode.cpp b/chromium/third_party/WebKit/Source/modules/webaudio/AudioBasicProcessorNode.cpp
index 6428bae8899..8a5330e2f64 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/AudioBasicProcessorNode.cpp
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/AudioBasicProcessorNode.cpp
@@ -91,12 +91,6 @@ void AudioBasicProcessorNode::pullInputs(size_t framesToProcess)
input(0)->pull(output(0)->bus(), framesToProcess);
}
-void AudioBasicProcessorNode::reset()
-{
- if (processor())
- processor()->reset();
-}
-
// As soon as we know the channel count of our input, we can lazily initialize.
// Sometimes this may be called more than once with different channel counts, in which case we must safely
// uninitialize and then re-initialize with the new channel count.
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/AudioBasicProcessorNode.h b/chromium/third_party/WebKit/Source/modules/webaudio/AudioBasicProcessorNode.h
index eb94cac2221..ad37fb6e3bd 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/AudioBasicProcessorNode.h
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/AudioBasicProcessorNode.h
@@ -42,21 +42,20 @@ public:
AudioBasicProcessorNode(AudioContext*, float sampleRate);
// AudioNode
- virtual void process(size_t framesToProcess);
- virtual void pullInputs(size_t framesToProcess);
- virtual void reset();
- virtual void initialize();
- virtual void uninitialize();
+ virtual void process(size_t framesToProcess) OVERRIDE FINAL;
+ virtual void pullInputs(size_t framesToProcess) OVERRIDE FINAL;
+ virtual void initialize() OVERRIDE FINAL;
+ virtual void uninitialize() OVERRIDE FINAL;
// Called in the main thread when the number of channels for the input may have changed.
- virtual void checkNumberOfChannelsForInput(AudioNodeInput*);
+ virtual void checkNumberOfChannelsForInput(AudioNodeInput*) OVERRIDE FINAL;
// Returns the number of channels for both the input and the output.
unsigned numberOfChannels();
protected:
- virtual double tailTime() const OVERRIDE;
- virtual double latencyTime() const OVERRIDE;
+ virtual double tailTime() const OVERRIDE FINAL;
+ virtual double latencyTime() const OVERRIDE FINAL;
AudioProcessor* processor() { return m_processor.get(); }
OwnPtr<AudioProcessor> m_processor;
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/AudioBuffer.cpp b/chromium/third_party/WebKit/Source/modules/webaudio/AudioBuffer.cpp
index b520512e054..d473aae6cc4 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/AudioBuffer.cpp
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/AudioBuffer.cpp
@@ -32,6 +32,7 @@
#include "modules/webaudio/AudioBuffer.h"
+#include "bindings/v8/ExceptionMessages.h"
#include "bindings/v8/ExceptionState.h"
#include "core/dom/ExceptionCode.h"
#include "platform/audio/AudioBus.h"
@@ -42,36 +43,101 @@ namespace WebCore {
float AudioBuffer::minAllowedSampleRate()
{
- return 22050;
+ // crbug.com/344375
+ return 3000;
}
float AudioBuffer::maxAllowedSampleRate()
{
- return 96000;
+ // Windows can support up to this rate.
+ return 192000;
}
-PassRefPtr<AudioBuffer> AudioBuffer::create(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate)
+PassRefPtrWillBeRawPtr<AudioBuffer> AudioBuffer::create(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate)
{
- if (sampleRate < minAllowedSampleRate() || sampleRate > maxAllowedSampleRate() || numberOfChannels > AudioContext::maxNumberOfChannels() || !numberOfFrames)
- return 0;
+ if (sampleRate < minAllowedSampleRate() || sampleRate > maxAllowedSampleRate() || numberOfChannels > AudioContext::maxNumberOfChannels() || !numberOfChannels || !numberOfFrames)
+ return nullptr;
- RefPtr<AudioBuffer> buffer = adoptRef(new AudioBuffer(numberOfChannels, numberOfFrames, sampleRate));
+ RefPtrWillBeRawPtr<AudioBuffer> buffer = adoptRefWillBeNoop(new AudioBuffer(numberOfChannels, numberOfFrames, sampleRate));
if (!buffer->createdSuccessfully(numberOfChannels))
- return 0;
+ return nullptr;
return buffer;
}
-PassRefPtr<AudioBuffer> AudioBuffer::createFromAudioFileData(const void* data, size_t dataSize, bool mixToMono, float sampleRate)
+PassRefPtrWillBeRawPtr<AudioBuffer> AudioBuffer::create(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionState& exceptionState)
+{
+ if (!numberOfChannels || numberOfChannels > AudioContext::maxNumberOfChannels()) {
+ exceptionState.throwDOMException(
+ NotSupportedError,
+ ExceptionMessages::indexOutsideRange(
+ "number of channels",
+ numberOfChannels,
+ 1u,
+ ExceptionMessages::InclusiveBound,
+ AudioContext::maxNumberOfChannels(),
+ ExceptionMessages::InclusiveBound));
+ return nullptr;
+ }
+
+ if (sampleRate < AudioBuffer::minAllowedSampleRate() || sampleRate > AudioBuffer::maxAllowedSampleRate()) {
+ exceptionState.throwDOMException(
+ NotSupportedError,
+ ExceptionMessages::indexOutsideRange(
+ "sample rate",
+ sampleRate,
+ AudioBuffer::minAllowedSampleRate(),
+ ExceptionMessages::InclusiveBound,
+ AudioBuffer::maxAllowedSampleRate(),
+ ExceptionMessages::InclusiveBound));
+ return nullptr;
+ }
+
+ if (!numberOfFrames) {
+ exceptionState.throwDOMException(
+ NotSupportedError,
+ ExceptionMessages::indexExceedsMinimumBound(
+ "number of frames",
+ numberOfFrames,
+ static_cast<size_t>(0)));
+ return nullptr;
+ }
+
+ RefPtrWillBeRawPtr<AudioBuffer> audioBuffer = create(numberOfChannels, numberOfFrames, sampleRate);
+
+ if (!audioBuffer.get()) {
+ exceptionState.throwDOMException(
+ NotSupportedError,
+ "createBuffer("
+ + String::number(numberOfChannels) + ", "
+ + String::number(numberOfFrames) + ", "
+ + String::number(sampleRate)
+ + ") failed.");
+ }
+
+ return audioBuffer;
+}
+
+PassRefPtrWillBeRawPtr<AudioBuffer> AudioBuffer::createFromAudioFileData(const void* data, size_t dataSize, bool mixToMono, float sampleRate)
{
RefPtr<AudioBus> bus = createBusFromInMemoryAudioFile(data, dataSize, mixToMono, sampleRate);
if (bus.get()) {
- RefPtr<AudioBuffer> buffer = adoptRef(new AudioBuffer(bus.get()));
+ RefPtrWillBeRawPtr<AudioBuffer> buffer = adoptRefWillBeNoop(new AudioBuffer(bus.get()));
if (buffer->createdSuccessfully(bus->numberOfChannels()))
return buffer;
}
- return 0;
+ return nullptr;
+}
+
+PassRefPtrWillBeRawPtr<AudioBuffer> AudioBuffer::createFromAudioBus(AudioBus* bus)
+{
+ if (!bus)
+ return nullptr;
+ RefPtrWillBeRawPtr<AudioBuffer> buffer = adoptRefWillBeNoop(new AudioBuffer(bus));
+ if (buffer->createdSuccessfully(bus->numberOfChannels()))
+ return buffer;
+ return nullptr;
}
bool AudioBuffer::createdSuccessfully(unsigned desiredNumberOfChannels) const
@@ -80,8 +146,7 @@ bool AudioBuffer::createdSuccessfully(unsigned desiredNumberOfChannels) const
}
AudioBuffer::AudioBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate)
- : m_gain(1.0)
- , m_sampleRate(sampleRate)
+ : m_sampleRate(sampleRate)
, m_length(numberOfFrames)
{
ScriptWrappable::init(this);
@@ -101,8 +166,7 @@ AudioBuffer::AudioBuffer(unsigned numberOfChannels, size_t numberOfFrames, float
}
AudioBuffer::AudioBuffer(AudioBus* bus)
- : m_gain(1.0)
- , m_sampleRate(bus->sampleRate())
+ : m_sampleRate(bus->sampleRate())
, m_length(bus->length())
{
ScriptWrappable::init(this);
@@ -122,16 +186,11 @@ AudioBuffer::AudioBuffer(AudioBus* bus)
}
}
-void AudioBuffer::releaseMemory()
-{
- m_channels.clear();
-}
-
PassRefPtr<Float32Array> AudioBuffer::getChannelData(unsigned channelIndex, ExceptionState& exceptionState)
{
if (channelIndex >= m_channels.size()) {
exceptionState.throwDOMException(IndexSizeError, "channel index (" + String::number(channelIndex) + ") exceeds number of channels (" + String::number(m_channels.size()) + ")");
- return 0;
+ return nullptr;
}
Float32Array* channelData = m_channels[channelIndex].get();
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/AudioBuffer.h b/chromium/third_party/WebKit/Source/modules/webaudio/AudioBuffer.h
index 8ffb2aceb11..cd0f87958bb 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/AudioBuffer.h
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/AudioBuffer.h
@@ -41,16 +41,19 @@ namespace WebCore {
class AudioBus;
class ExceptionState;
-class AudioBuffer : public ScriptWrappable, public RefCounted<AudioBuffer> {
+class AudioBuffer : public RefCountedWillBeGarbageCollectedFinalized<AudioBuffer>, public ScriptWrappable {
public:
- static PassRefPtr<AudioBuffer> create(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate);
+ static PassRefPtrWillBeRawPtr<AudioBuffer> create(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate);
+ static PassRefPtrWillBeRawPtr<AudioBuffer> create(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionState&);
// Returns 0 if data is not a valid audio file.
- static PassRefPtr<AudioBuffer> createFromAudioFileData(const void* data, size_t dataSize, bool mixToMono, float sampleRate);
+ static PassRefPtrWillBeRawPtr<AudioBuffer> createFromAudioFileData(const void* data, size_t dataSize, bool mixToMono, float sampleRate);
+
+ static PassRefPtrWillBeRawPtr<AudioBuffer> createFromAudioBus(AudioBus*);
// Format
size_t length() const { return m_length; }
- double duration() const { return length() / sampleRate(); }
+ double duration() const { return length() / static_cast<double>(sampleRate()); }
float sampleRate() const { return m_sampleRate; }
// Channel data access
@@ -59,23 +62,16 @@ public:
Float32Array* getChannelData(unsigned channelIndex);
void zero();
- // Scalar gain
- double gain() const { return m_gain; }
- void setGain(double gain) { m_gain = gain; }
-
- // Because an AudioBuffer has a JavaScript wrapper, which will be garbage collected, it may take awhile for this object to be deleted.
- // releaseMemory() can be called when the AudioContext goes away, so we can release the memory earlier than when the garbage collection happens.
- // Careful! Only call this when the page unloads, after the AudioContext is no longer processing.
- void releaseMemory();
-
static float minAllowedSampleRate();
static float maxAllowedSampleRate();
+
+ void trace(Visitor*) { }
+
protected:
AudioBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate);
explicit AudioBuffer(AudioBus*);
bool createdSuccessfully(unsigned desiredNumberOfChannels) const;
- double m_gain; // scalar gain
float m_sampleRate;
size_t m_length;
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/AudioBuffer.idl b/chromium/third_party/WebKit/Source/modules/webaudio/AudioBuffer.idl
index 19f66a5f2a0..7ec7c60bb4c 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/AudioBuffer.idl
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/AudioBuffer.idl
@@ -27,14 +27,13 @@
*/
[
+ WillBeGarbageCollected,
Conditional=WEB_AUDIO
] interface AudioBuffer {
readonly attribute long length; // in sample-frames
- readonly attribute float duration; // in seconds
+ readonly attribute double duration; // in seconds
readonly attribute float sampleRate; // in sample-frames per second
- attribute float gain; // linear gain (default 1.0)
-
// Channel access
readonly attribute unsigned long numberOfChannels;
[RaisesException] Float32Array getChannelData(unsigned long channelIndex);
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/AudioBufferSourceNode.cpp b/chromium/third_party/WebKit/Source/modules/webaudio/AudioBufferSourceNode.cpp
index 07a63a57bc1..c2dbc32bf24 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/AudioBufferSourceNode.cpp
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/AudioBufferSourceNode.cpp
@@ -30,7 +30,6 @@
#include "bindings/v8/ExceptionState.h"
#include "core/dom/ExceptionCode.h"
-#include "core/page/PageConsole.h"
#include "platform/audio/AudioUtilities.h"
#include "modules/webaudio/AudioContext.h"
#include "modules/webaudio/AudioNodeOutput.h"
@@ -50,14 +49,14 @@ const double DefaultGrainDuration = 0.020; // 20ms
// to minimize linear interpolation aliasing.
const double MaxRate = 1024;
-PassRefPtr<AudioBufferSourceNode> AudioBufferSourceNode::create(AudioContext* context, float sampleRate)
+PassRefPtrWillBeRawPtr<AudioBufferSourceNode> AudioBufferSourceNode::create(AudioContext* context, float sampleRate)
{
- return adoptRef(new AudioBufferSourceNode(context, sampleRate));
+ return adoptRefWillBeNoop(new AudioBufferSourceNode(context, sampleRate));
}
AudioBufferSourceNode::AudioBufferSourceNode(AudioContext* context, float sampleRate)
: AudioScheduledSourceNode(context, sampleRate)
- , m_buffer(0)
+ , m_buffer(nullptr)
, m_isLooping(false)
, m_loopStart(0)
, m_loopEnd(0)
@@ -65,13 +64,11 @@ AudioBufferSourceNode::AudioBufferSourceNode(AudioContext* context, float sample
, m_isGrain(false)
, m_grainOffset(0.0)
, m_grainDuration(DefaultGrainDuration)
- , m_lastGain(1.0)
, m_pannerNode(0)
{
ScriptWrappable::init(this);
setNodeType(NodeTypeAudioBufferSource);
- m_gain = AudioParam::create(context, "gain", 1.0, 0.0, 1.0);
m_playbackRate = AudioParam::create(context, "playbackRate", 1.0, 0.0, MaxRate);
// Default to mono. A call to setBuffer() will set the number of output channels to that of the buffer.
@@ -133,9 +130,6 @@ void AudioBufferSourceNode::process(size_t framesToProcess)
return;
}
- // Apply the gain (in-place) to the output bus.
- float totalGain = gain()->value() * m_buffer->gain();
- outputBus->copyWithGainFrom(*outputBus, &m_lastGain, totalGain);
outputBus->clearSilentFlag();
} else {
// Too bad - the tryLock() failed. We must be in the middle of changing buffers and were already outputting silence anyway.
@@ -331,21 +325,9 @@ bool AudioBufferSourceNode::renderFromBuffer(AudioBus* bus, unsigned destination
}
-void AudioBufferSourceNode::reset()
-{
- m_virtualReadIndex = 0;
- m_lastGain = gain()->value();
-}
-
void AudioBufferSourceNode::setBuffer(AudioBuffer* buffer, ExceptionState& exceptionState)
{
ASSERT(isMainThread());
- // FIXME: It does not look like we should throw if the buffer is null as
- // the attribute is nullable in the specification.
- if (!buffer) {
- exceptionState.throwTypeError("buffer cannot be null");
- return;
- }
// The context must be locked since changing the buffer can re-configure the number of channels that are output.
AudioContext::AutoLocker contextLocker(context());
@@ -382,28 +364,18 @@ unsigned AudioBufferSourceNode::numberOfChannels()
return output(0)->numberOfChannels();
}
-void AudioBufferSourceNode::start(ExceptionState& exceptionState)
-{
- startPlaying(false, 0, 0, buffer() ? buffer()->duration() : 0, exceptionState);
-}
-
void AudioBufferSourceNode::start(double when, ExceptionState& exceptionState)
{
- startPlaying(false, when, 0, buffer() ? buffer()->duration() : 0, exceptionState);
+ AudioScheduledSourceNode::start(when, exceptionState);
}
void AudioBufferSourceNode::start(double when, double grainOffset, ExceptionState& exceptionState)
{
- startPlaying(true, when, grainOffset, buffer() ? buffer()->duration() : 0, exceptionState);
+ start(when, grainOffset, buffer() ? buffer()->duration() : 0, exceptionState);
}
void AudioBufferSourceNode::start(double when, double grainOffset, double grainDuration, ExceptionState& exceptionState)
{
- startPlaying(true, when, grainOffset, grainDuration, exceptionState);
-}
-
-void AudioBufferSourceNode::startPlaying(bool isGrain, double when, double grainOffset, double grainDuration, ExceptionState& exceptionState)
-{
ASSERT(isMainThread());
if (m_playbackState != UNSCHEDULED_STATE) {
@@ -416,22 +388,20 @@ void AudioBufferSourceNode::startPlaying(bool isGrain, double when, double grain
if (!buffer())
return;
- if (isGrain) {
- // Do sanity checking of grain parameters versus buffer size.
- double bufferDuration = buffer()->duration();
+ // Do sanity checking of grain parameters versus buffer size.
+ double bufferDuration = buffer()->duration();
- grainOffset = max(0.0, grainOffset);
- grainOffset = min(bufferDuration, grainOffset);
- m_grainOffset = grainOffset;
+ grainOffset = max(0.0, grainOffset);
+ grainOffset = min(bufferDuration, grainOffset);
+ m_grainOffset = grainOffset;
- double maxDuration = bufferDuration - grainOffset;
+ double maxDuration = bufferDuration - grainOffset;
- grainDuration = max(0.0, grainDuration);
- grainDuration = min(maxDuration, grainDuration);
- m_grainDuration = grainDuration;
- }
+ grainDuration = max(0.0, grainDuration);
+ grainDuration = min(maxDuration, grainDuration);
+ m_grainDuration = grainDuration;
- m_isGrain = isGrain;
+ m_isGrain = true;
m_startTime = when;
// We call timeToSampleFrame here since at playbackRate == 1 we don't want to go through linear interpolation
@@ -443,14 +413,6 @@ void AudioBufferSourceNode::startPlaying(bool isGrain, double when, double grain
m_playbackState = SCHEDULED_STATE;
}
-void AudioBufferSourceNode::noteGrainOn(double when, double grainOffset, double grainDuration, ExceptionState& exceptionState)
-{
- // Handle unspecified duration where 0 means the rest of the buffer.
- if (!grainDuration && buffer())
- grainDuration = buffer()->duration();
- startPlaying(true, when, grainOffset, grainDuration, exceptionState);
-}
-
double AudioBufferSourceNode::totalPitchRate()
{
double dopplerRate = 1.0;
@@ -513,6 +475,13 @@ void AudioBufferSourceNode::finish()
AudioScheduledSourceNode::finish();
}
+void AudioBufferSourceNode::trace(Visitor* visitor)
+{
+ visitor->trace(m_buffer);
+ visitor->trace(m_playbackRate);
+ AudioScheduledSourceNode::trace(visitor);
+}
+
} // namespace WebCore
#endif // ENABLE(WEB_AUDIO)
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/AudioBufferSourceNode.h b/chromium/third_party/WebKit/Source/modules/webaudio/AudioBufferSourceNode.h
index 9eafdb201b4..cb24a38c09a 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/AudioBufferSourceNode.h
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/AudioBufferSourceNode.h
@@ -42,15 +42,14 @@ class AudioContext;
// AudioBufferSourceNode is an AudioNode representing an audio source from an in-memory audio asset represented by an AudioBuffer.
// It generally will be used for short sounds which require a high degree of scheduling flexibility (can playback in rhythmically perfect ways).
-class AudioBufferSourceNode : public AudioScheduledSourceNode {
+class AudioBufferSourceNode FINAL : public AudioScheduledSourceNode {
public:
- static PassRefPtr<AudioBufferSourceNode> create(AudioContext*, float sampleRate);
+ static PassRefPtrWillBeRawPtr<AudioBufferSourceNode> create(AudioContext*, float sampleRate);
virtual ~AudioBufferSourceNode();
// AudioNode
- virtual void process(size_t framesToProcess);
- virtual void reset();
+ virtual void process(size_t framesToProcess) OVERRIDE;
// setBuffer() is called on the main thread. This is the buffer we use for playback.
void setBuffer(AudioBuffer*, ExceptionState&);
@@ -61,13 +60,11 @@ public:
unsigned numberOfChannels();
// Play-state
- void start(ExceptionState&);
+ void start(ExceptionState& exceptionState) { start(0, exceptionState); }
void start(double when, ExceptionState&);
void start(double when, double grainOffset, ExceptionState&);
void start(double when, double grainOffset, double grainDuration, ExceptionState&);
- void noteGrainOn(double when, double grainOffset, double grainDuration, ExceptionState&);
-
// Note: the attribute was originally exposed as .looping, but to be more consistent in naming with <audio>
// and with how it's described in the specification, the proper attribute name is .loop
// The old attribute is kept for backwards compatibility.
@@ -80,7 +77,6 @@ public:
void setLoopStart(double loopStart) { m_loopStart = loopStart; }
void setLoopEnd(double loopEnd) { m_loopEnd = loopEnd; }
- AudioParam* gain() { return m_gain.get(); }
AudioParam* playbackRate() { return m_playbackRate.get(); }
// If a panner node is set, then we can incorporate doppler shift into the playback pitch rate.
@@ -88,16 +84,16 @@ public:
void clearPannerNode();
// If we are no longer playing, propogate silence ahead to downstream nodes.
- virtual bool propagatesSilence() const;
+ virtual bool propagatesSilence() const OVERRIDE;
// AudioScheduledSourceNode
virtual void finish() OVERRIDE;
+ virtual void trace(Visitor*) OVERRIDE;
+
private:
AudioBufferSourceNode(AudioContext*, float sampleRate);
- void startPlaying(bool isGrain, double when, double grainOffset, double grainDuration, ExceptionState&);
-
// Returns true on success.
bool renderFromBuffer(AudioBus*, unsigned destinationFrameOffset, size_t numberOfFrames);
@@ -105,15 +101,14 @@ private:
inline bool renderSilenceAndFinishIfNotLooping(AudioBus*, unsigned index, size_t framesToProcess);
// m_buffer holds the sample data which this node outputs.
- RefPtr<AudioBuffer> m_buffer;
+ RefPtrWillBeMember<AudioBuffer> m_buffer;
// Pointers for the buffer and destination.
OwnPtr<const float*[]> m_sourceChannels;
OwnPtr<float*[]> m_destinationChannels;
- // Used for the "gain" and "playbackRate" attributes.
- RefPtr<AudioParam> m_gain;
- RefPtr<AudioParam> m_playbackRate;
+ // Used for the "playbackRate" attributes.
+ RefPtrWillBeMember<AudioParam> m_playbackRate;
// If m_isLooping is false, then this node will be done playing and become inactive after it reaches the end of the sample data in the buffer.
// If true, it will wrap around to the start of the buffer each time it reaches the end.
@@ -135,9 +130,6 @@ private:
// It incorporates the base pitch rate, any sample-rate conversion factor from the buffer, and any doppler shift from an associated panner node.
double totalPitchRate();
- // m_lastGain provides continuity when we dynamically adjust the gain.
- float m_lastGain;
-
// We optionally keep track of a panner node which has a doppler shift that is incorporated into
// the pitch rate. We manually manage ref-counting because we want to use RefTypeConnection.
PannerNode* m_pannerNode;
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/AudioBufferSourceNode.idl b/chromium/third_party/WebKit/Source/modules/webaudio/AudioBufferSourceNode.idl
index ab58774ead6..135f1ce1be2 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/AudioBufferSourceNode.idl
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/AudioBufferSourceNode.idl
@@ -24,33 +24,19 @@
// A cached (non-streamed), memory-resident audio source
[
- Conditional=WEB_AUDIO
+ Conditional=WEB_AUDIO,
+ TypeChecking=Interface|Nullable,
] interface AudioBufferSourceNode : AudioSourceNode {
- [StrictTypeChecking, RaisesException=Setter] attribute AudioBuffer buffer;
+ [RaisesException=Setter] attribute AudioBuffer buffer;
- const unsigned short UNSCHEDULED_STATE = 0;
- const unsigned short SCHEDULED_STATE = 1;
- const unsigned short PLAYING_STATE = 2;
- const unsigned short FINISHED_STATE = 3;
-
- readonly attribute unsigned short playbackState;
-
- readonly attribute AudioParam gain;
readonly attribute AudioParam playbackRate;
- attribute boolean loop; // This is the proper attribute name from the specification.
-
+ attribute boolean loop;
attribute double loopStart;
attribute double loopEnd;
- [RaisesException, MeasureAs=WebAudioStart] void start(optional double when, optional double grainOffset, optional double grainDuration);
+ [RaisesException] void start(optional double when, optional double grainOffset, optional double grainDuration);
[RaisesException] void stop(optional double when);
- [DeprecateAs=WebAudioLooping, ImplementedAs=loop] attribute boolean looping; // This is an alias for the .loop attribute for backwards compatibility.
-
- [RaisesException, ImplementedAs=start, MeasureAs=LegacyWebAudio] void noteOn(double when);
- [RaisesException, MeasureAs=LegacyWebAudio] void noteGrainOn(double when, double grainOffset, double grainDuration);
- [RaisesException, ImplementedAs=stop] void noteOff(double when);
-
attribute EventHandler onended;
};
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/AudioContext.cpp b/chromium/third_party/WebKit/Source/modules/webaudio/AudioContext.cpp
index 5ae5ffe99f1..a77c9988d43 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/AudioContext.cpp
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/AudioContext.cpp
@@ -86,91 +86,67 @@ bool AudioContext::isSampleRateRangeGood(float sampleRate)
}
// Don't allow more than this number of simultaneous AudioContexts talking to hardware.
-const unsigned MaxHardwareContexts = 4;
+const unsigned MaxHardwareContexts = 6;
unsigned AudioContext::s_hardwareContextCount = 0;
-PassRefPtr<AudioContext> AudioContext::create(Document& document, ExceptionState& exceptionState)
+PassRefPtrWillBeRawPtr<AudioContext> AudioContext::create(Document& document, ExceptionState& exceptionState)
{
ASSERT(isMainThread());
if (s_hardwareContextCount >= MaxHardwareContexts) {
exceptionState.throwDOMException(
SyntaxError,
"number of hardware contexts reached maximum (" + String::number(MaxHardwareContexts) + ").");
- return 0;
+ return nullptr;
}
- RefPtr<AudioContext> audioContext(adoptRef(new AudioContext(&document)));
+ RefPtrWillBeRawPtr<AudioContext> audioContext(adoptRefWillBeThreadSafeRefCountedGarbageCollected(new AudioContext(&document)));
audioContext->suspendIfNeeded();
return audioContext.release();
}
-PassRefPtr<AudioContext> AudioContext::create(Document& document, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionState& exceptionState)
-{
- document.addConsoleMessage(JSMessageSource, WarningMessageLevel, "Deprecated AudioContext constructor: use OfflineAudioContext instead");
- return OfflineAudioContext::create(&document, numberOfChannels, numberOfFrames, sampleRate, exceptionState);
-}
-
// Constructor for rendering to the audio hardware.
AudioContext::AudioContext(Document* document)
: ActiveDOMObject(document)
, m_isStopScheduled(false)
+ , m_isCleared(false)
, m_isInitialized(false)
- , m_isAudioThreadFinished(false)
- , m_destinationNode(0)
+ , m_destinationNode(nullptr)
, m_isDeletionScheduled(false)
, m_automaticPullNodesNeedUpdating(false)
, m_connectionCount(0)
, m_audioThread(0)
, m_graphOwnerThread(UndefinedThreadIdentifier)
, m_isOfflineContext(false)
- , m_activeSourceCount(0)
{
- constructCommon();
+ ScriptWrappable::init(this);
m_destinationNode = DefaultAudioDestinationNode::create(this);
- // This sets in motion an asynchronous loading mechanism on another thread.
- // We can check m_hrtfDatabaseLoader->isLoaded() to find out whether or not it has been fully loaded.
- // It's not that useful to have a callback function for this since the audio thread automatically starts rendering on the graph
- // when this has finished (see AudioDestinationNode).
- m_hrtfDatabaseLoader = HRTFDatabaseLoader::createAndLoadAsynchronouslyIfNecessary(sampleRate());
+ initialize();
}
// Constructor for offline (non-realtime) rendering.
AudioContext::AudioContext(Document* document, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate)
: ActiveDOMObject(document)
, m_isStopScheduled(false)
+ , m_isCleared(false)
, m_isInitialized(false)
- , m_isAudioThreadFinished(false)
- , m_destinationNode(0)
+ , m_destinationNode(nullptr)
+ , m_isDeletionScheduled(false)
, m_automaticPullNodesNeedUpdating(false)
, m_connectionCount(0)
, m_audioThread(0)
, m_graphOwnerThread(UndefinedThreadIdentifier)
, m_isOfflineContext(true)
- , m_activeSourceCount(0)
{
- constructCommon();
-
- m_hrtfDatabaseLoader = HRTFDatabaseLoader::createAndLoadAsynchronouslyIfNecessary(sampleRate);
+ ScriptWrappable::init(this);
// Create a new destination for offline rendering.
m_renderTarget = AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate);
- ASSERT(m_renderTarget);
- m_destinationNode = OfflineAudioDestinationNode::create(this, m_renderTarget.get());
- ASSERT(m_destinationNode);
-}
+ if (m_renderTarget.get())
+ m_destinationNode = OfflineAudioDestinationNode::create(this, m_renderTarget.get());
-void AudioContext::constructCommon()
-{
- ScriptWrappable::init(this);
- // According to spec AudioContext must die only after page navigate.
- // Lets mark it as ActiveDOMObject with pending activity and unmark it in clear method.
- setPendingActivity(this);
-
- FFTFrame::initialize();
-
- m_listener = AudioListener::create();
+ initialize();
}
AudioContext::~AudioContext()
@@ -180,7 +156,6 @@ AudioContext::~AudioContext()
#endif
// AudioNodes keep a reference to their context, so there should be no way to be in the destructor if there are still AudioNodes around.
ASSERT(!m_isInitialized);
- ASSERT(m_isStopScheduled);
ASSERT(!m_nodesToDelete.size());
ASSERT(!m_referencedNodes.size());
ASSERT(!m_finishedNodes.size());
@@ -190,27 +165,27 @@ AudioContext::~AudioContext()
ASSERT(!m_renderingAutomaticPullNodes.size());
}
-void AudioContext::lazyInitialize()
+void AudioContext::initialize()
{
- if (!m_isInitialized) {
- // Don't allow the context to initialize a second time after it's already been explicitly uninitialized.
- ASSERT(!m_isAudioThreadFinished);
- if (!m_isAudioThreadFinished) {
- if (m_destinationNode.get()) {
- m_destinationNode->initialize();
+ if (isInitialized())
+ return;
+
+ FFTFrame::initialize();
+ m_listener = AudioListener::create();
- if (!isOfflineContext()) {
- // This starts the audio thread. The destination node's provideInput() method will now be called repeatedly to render audio.
- // Each time provideInput() is called, a portion of the audio stream is rendered. Let's call this time period a "render quantum".
- // NOTE: for now default AudioContext does not need an explicit startRendering() call from JavaScript.
- // We may want to consider requiring it for symmetry with OfflineAudioContext.
- m_destinationNode->startRendering();
- ++s_hardwareContextCount;
- }
+ if (m_destinationNode.get()) {
+ m_destinationNode->initialize();
- }
- m_isInitialized = true;
+ if (!isOfflineContext()) {
+ // This starts the audio thread. The destination node's provideInput() method will now be called repeatedly to render audio.
+ // Each time provideInput() is called, a portion of the audio stream is rendered. Let's call this time period a "render quantum".
+ // NOTE: for now default AudioContext does not need an explicit startRendering() call from JavaScript.
+ // We may want to consider requiring it for symmetry with OfflineAudioContext.
+ m_destinationNode->startRendering();
+ ++s_hardwareContextCount;
}
+
+ m_isInitialized = true;
}
}
@@ -222,28 +197,24 @@ void AudioContext::clear()
// Audio thread is dead. Nobody will schedule node deletion action. Let's do it ourselves.
do {
- deleteMarkedNodes();
- m_nodesToDelete.append(m_nodesMarkedForDeletion);
+ m_nodesToDelete.appendVector(m_nodesMarkedForDeletion);
m_nodesMarkedForDeletion.clear();
+ deleteMarkedNodes();
} while (m_nodesToDelete.size());
- // It was set in constructCommon.
- unsetPendingActivity(this);
+ m_isCleared = true;
}
void AudioContext::uninitialize()
{
ASSERT(isMainThread());
- if (!m_isInitialized)
+ if (!isInitialized())
return;
// This stops the audio thread and all audio rendering.
m_destinationNode->uninitialize();
- // Don't allow the context to initialize a second time after it's already been explicitly uninitialized.
- m_isAudioThreadFinished = true;
-
if (!isOfflineContext()) {
ASSERT(s_hardwareContextCount);
--s_hardwareContextCount;
@@ -255,20 +226,6 @@ void AudioContext::uninitialize()
m_isInitialized = false;
}
-bool AudioContext::isInitialized() const
-{
- return m_isInitialized;
-}
-
-bool AudioContext::isRunnable() const
-{
- if (!isInitialized())
- return false;
-
- // Check with the HRTF spatialization system to see if it's finished loading.
- return m_hrtfDatabaseLoader->isLoaded();
-}
-
void AudioContext::stopDispatch(void* userData)
{
AudioContext* context = reinterpret_cast<AudioContext*>(userData);
@@ -294,55 +251,15 @@ void AudioContext::stop()
callOnMainThread(stopDispatch, this);
}
-PassRefPtr<AudioBuffer> AudioContext::createBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionState& exceptionState)
+bool AudioContext::hasPendingActivity() const
{
- RefPtr<AudioBuffer> audioBuffer = AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate);
- if (!audioBuffer.get()) {
- if (numberOfChannels > AudioContext::maxNumberOfChannels()) {
- exceptionState.throwDOMException(
- NotSupportedError,
- "requested number of channels (" + String::number(numberOfChannels) + ") exceeds maximum (" + String::number(AudioContext::maxNumberOfChannels()) + ")");
- } else if (sampleRate < AudioBuffer::minAllowedSampleRate() || sampleRate > AudioBuffer::maxAllowedSampleRate()) {
- exceptionState.throwDOMException(
- NotSupportedError,
- "requested sample rate (" + String::number(sampleRate)
- + ") does not lie in the allowed range of "
- + String::number(AudioBuffer::minAllowedSampleRate())
- + "-" + String::number(AudioBuffer::maxAllowedSampleRate()) + " Hz");
- } else if (!numberOfFrames) {
- exceptionState.throwDOMException(
- NotSupportedError,
- "number of frames must be greater than 0.");
- } else {
- exceptionState.throwDOMException(
- NotSupportedError,
- "unable to create buffer of " + String::number(numberOfChannels)
- + " channel(s) of " + String::number(numberOfFrames)
- + " frames each.");
- }
- return 0;
- }
-
- return audioBuffer;
+ // According to spec AudioContext must die only after page navigates.
+ return !m_isCleared;
}
-PassRefPtr<AudioBuffer> AudioContext::createBuffer(ArrayBuffer* arrayBuffer, bool mixToMono, ExceptionState& exceptionState)
+PassRefPtrWillBeRawPtr<AudioBuffer> AudioContext::createBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionState& exceptionState)
{
- ASSERT(arrayBuffer);
- if (!arrayBuffer) {
- exceptionState.throwDOMException(
- SyntaxError,
- "invalid ArrayBuffer.");
- return 0;
- }
-
- RefPtr<AudioBuffer> audioBuffer = AudioBuffer::createFromAudioFileData(arrayBuffer->data(), arrayBuffer->byteLength(), mixToMono, sampleRate());
- if (!audioBuffer.get()) {
- exceptionState.throwDOMException(
- SyntaxError,
- "invalid audio data in ArrayBuffer.");
- return 0;
- }
+ RefPtrWillBeRawPtr<AudioBuffer> audioBuffer = AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate, exceptionState);
return audioBuffer;
}
@@ -358,11 +275,10 @@ void AudioContext::decodeAudioData(ArrayBuffer* audioData, PassOwnPtr<AudioBuffe
m_audioDecoder.decodeAsync(audioData, sampleRate(), successCallback, errorCallback);
}
-PassRefPtr<AudioBufferSourceNode> AudioContext::createBufferSource()
+PassRefPtrWillBeRawPtr<AudioBufferSourceNode> AudioContext::createBufferSource()
{
ASSERT(isMainThread());
- lazyInitialize();
- RefPtr<AudioBufferSourceNode> node = AudioBufferSourceNode::create(this, m_destinationNode->sampleRate());
+ RefPtrWillBeRawPtr<AudioBufferSourceNode> node = AudioBufferSourceNode::create(this, m_destinationNode->sampleRate());
// Because this is an AudioScheduledSourceNode, the context keeps a reference until it has finished playing.
// When this happens, AudioScheduledSourceNode::finish() calls AudioContext::notifyNodeFinishedProcessing().
@@ -371,27 +287,25 @@ PassRefPtr<AudioBufferSourceNode> AudioContext::createBufferSource()
return node;
}
-PassRefPtr<MediaElementAudioSourceNode> AudioContext::createMediaElementSource(HTMLMediaElement* mediaElement, ExceptionState& exceptionState)
+PassRefPtrWillBeRawPtr<MediaElementAudioSourceNode> AudioContext::createMediaElementSource(HTMLMediaElement* mediaElement, ExceptionState& exceptionState)
{
+ ASSERT(isMainThread());
if (!mediaElement) {
exceptionState.throwDOMException(
InvalidStateError,
"invalid HTMLMedialElement.");
- return 0;
+ return nullptr;
}
- ASSERT(isMainThread());
- lazyInitialize();
-
// First check if this media element already has a source node.
if (mediaElement->audioSourceNode()) {
exceptionState.throwDOMException(
InvalidStateError,
"invalid HTMLMediaElement.");
- return 0;
+ return nullptr;
}
- RefPtr<MediaElementAudioSourceNode> node = MediaElementAudioSourceNode::create(this, mediaElement);
+ RefPtrWillBeRawPtr<MediaElementAudioSourceNode> node = MediaElementAudioSourceNode::create(this, mediaElement);
mediaElement->setAudioSourceNode(node.get());
@@ -399,33 +313,28 @@ PassRefPtr<MediaElementAudioSourceNode> AudioContext::createMediaElementSource(H
return node;
}
-PassRefPtr<MediaStreamAudioSourceNode> AudioContext::createMediaStreamSource(MediaStream* mediaStream, ExceptionState& exceptionState)
+PassRefPtrWillBeRawPtr<MediaStreamAudioSourceNode> AudioContext::createMediaStreamSource(MediaStream* mediaStream, ExceptionState& exceptionState)
{
+ ASSERT(isMainThread());
if (!mediaStream) {
exceptionState.throwDOMException(
InvalidStateError,
"invalid MediaStream source");
- return 0;
+ return nullptr;
}
- ASSERT(isMainThread());
- lazyInitialize();
-
- AudioSourceProvider* provider = 0;
-
MediaStreamTrackVector audioTracks = mediaStream->getAudioTracks();
- RefPtr<MediaStreamTrack> audioTrack;
-
- // FIXME: get a provider for non-local MediaStreams (like from a remote peer).
- for (size_t i = 0; i < audioTracks.size(); ++i) {
- audioTrack = audioTracks[i];
- if (audioTrack->component()->audioSourceProvider()) {
- provider = audioTrack->component()->audioSourceProvider();
- break;
- }
+ if (audioTracks.isEmpty()) {
+ exceptionState.throwDOMException(
+ InvalidStateError,
+ "MediaStream has no audio track");
+ return nullptr;
}
- RefPtr<MediaStreamAudioSourceNode> node = MediaStreamAudioSourceNode::create(this, mediaStream, audioTrack.get(), provider);
+ // Use the first audio track in the media stream.
+ RefPtrWillBeRawPtr<MediaStreamTrack> audioTrack = audioTracks[0];
+ OwnPtr<AudioSourceProvider> provider = audioTrack->createWebAudioSource();
+ RefPtrWillBeRawPtr<MediaStreamAudioSourceNode> node = MediaStreamAudioSourceNode::create(this, mediaStream, audioTrack.get(), provider.release());
// FIXME: Only stereo streams are supported right now. We should be able to accept multi-channel streams.
node->setFormat(2, sampleRate());
@@ -434,36 +343,34 @@ PassRefPtr<MediaStreamAudioSourceNode> AudioContext::createMediaStreamSource(Med
return node;
}
-PassRefPtr<MediaStreamAudioDestinationNode> AudioContext::createMediaStreamDestination()
+PassRefPtrWillBeRawPtr<MediaStreamAudioDestinationNode> AudioContext::createMediaStreamDestination()
{
- // FIXME: Add support for an optional argument which specifies the number of channels.
- // FIXME: The default should probably be stereo instead of mono.
- return MediaStreamAudioDestinationNode::create(this, 1);
+ // Set number of output channels to stereo by default.
+ return MediaStreamAudioDestinationNode::create(this, 2);
}
-PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(ExceptionState& exceptionState)
+PassRefPtrWillBeRawPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(ExceptionState& exceptionState)
{
// Set number of input/output channels to stereo by default.
return createScriptProcessor(0, 2, 2, exceptionState);
}
-PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t bufferSize, ExceptionState& exceptionState)
+PassRefPtrWillBeRawPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t bufferSize, ExceptionState& exceptionState)
{
// Set number of input/output channels to stereo by default.
return createScriptProcessor(bufferSize, 2, 2, exceptionState);
}
-PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, ExceptionState& exceptionState)
+PassRefPtrWillBeRawPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, ExceptionState& exceptionState)
{
// Set number of output channels to stereo by default.
return createScriptProcessor(bufferSize, numberOfInputChannels, 2, exceptionState);
}
-PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, size_t numberOfOutputChannels, ExceptionState& exceptionState)
+PassRefPtrWillBeRawPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, size_t numberOfOutputChannels, ExceptionState& exceptionState)
{
ASSERT(isMainThread());
- lazyInitialize();
- RefPtr<ScriptProcessorNode> node = ScriptProcessorNode::create(this, m_destinationNode->sampleRate(), bufferSize, numberOfInputChannels, numberOfOutputChannels);
+ RefPtrWillBeRawPtr<ScriptProcessorNode> node = ScriptProcessorNode::create(this, m_destinationNode->sampleRate(), bufferSize, numberOfInputChannels, numberOfOutputChannels);
if (!node.get()) {
if (!numberOfInputChannels && !numberOfOutputChannels) {
@@ -488,90 +395,81 @@ PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t buffe
"buffer size (" + String::number(bufferSize)
+ ") must be a power of two between 256 and 16384.");
}
- return 0;
+ return nullptr;
}
refNode(node.get()); // context keeps reference until we stop making javascript rendering callbacks
return node;
}
-PassRefPtr<BiquadFilterNode> AudioContext::createBiquadFilter()
+PassRefPtrWillBeRawPtr<BiquadFilterNode> AudioContext::createBiquadFilter()
{
ASSERT(isMainThread());
- lazyInitialize();
return BiquadFilterNode::create(this, m_destinationNode->sampleRate());
}
-PassRefPtr<WaveShaperNode> AudioContext::createWaveShaper()
+PassRefPtrWillBeRawPtr<WaveShaperNode> AudioContext::createWaveShaper()
{
ASSERT(isMainThread());
- lazyInitialize();
return WaveShaperNode::create(this);
}
-PassRefPtr<PannerNode> AudioContext::createPanner()
+PassRefPtrWillBeRawPtr<PannerNode> AudioContext::createPanner()
{
ASSERT(isMainThread());
- lazyInitialize();
return PannerNode::create(this, m_destinationNode->sampleRate());
}
-PassRefPtr<ConvolverNode> AudioContext::createConvolver()
+PassRefPtrWillBeRawPtr<ConvolverNode> AudioContext::createConvolver()
{
ASSERT(isMainThread());
- lazyInitialize();
return ConvolverNode::create(this, m_destinationNode->sampleRate());
}
-PassRefPtr<DynamicsCompressorNode> AudioContext::createDynamicsCompressor()
+PassRefPtrWillBeRawPtr<DynamicsCompressorNode> AudioContext::createDynamicsCompressor()
{
ASSERT(isMainThread());
- lazyInitialize();
return DynamicsCompressorNode::create(this, m_destinationNode->sampleRate());
}
-PassRefPtr<AnalyserNode> AudioContext::createAnalyser()
+PassRefPtrWillBeRawPtr<AnalyserNode> AudioContext::createAnalyser()
{
ASSERT(isMainThread());
- lazyInitialize();
return AnalyserNode::create(this, m_destinationNode->sampleRate());
}
-PassRefPtr<GainNode> AudioContext::createGain()
+PassRefPtrWillBeRawPtr<GainNode> AudioContext::createGain()
{
ASSERT(isMainThread());
- lazyInitialize();
return GainNode::create(this, m_destinationNode->sampleRate());
}
-PassRefPtr<DelayNode> AudioContext::createDelay(ExceptionState& exceptionState)
+PassRefPtrWillBeRawPtr<DelayNode> AudioContext::createDelay(ExceptionState& exceptionState)
{
const double defaultMaxDelayTime = 1;
return createDelay(defaultMaxDelayTime, exceptionState);
}
-PassRefPtr<DelayNode> AudioContext::createDelay(double maxDelayTime, ExceptionState& exceptionState)
+PassRefPtrWillBeRawPtr<DelayNode> AudioContext::createDelay(double maxDelayTime, ExceptionState& exceptionState)
{
ASSERT(isMainThread());
- lazyInitialize();
- RefPtr<DelayNode> node = DelayNode::create(this, m_destinationNode->sampleRate(), maxDelayTime, exceptionState);
+ RefPtrWillBeRawPtr<DelayNode> node = DelayNode::create(this, m_destinationNode->sampleRate(), maxDelayTime, exceptionState);
if (exceptionState.hadException())
- return 0;
+ return nullptr;
return node;
}
-PassRefPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(ExceptionState& exceptionState)
+PassRefPtrWillBeRawPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(ExceptionState& exceptionState)
{
const unsigned ChannelSplitterDefaultNumberOfOutputs = 6;
return createChannelSplitter(ChannelSplitterDefaultNumberOfOutputs, exceptionState);
}
-PassRefPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(size_t numberOfOutputs, ExceptionState& exceptionState)
+PassRefPtrWillBeRawPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(size_t numberOfOutputs, ExceptionState& exceptionState)
{
ASSERT(isMainThread());
- lazyInitialize();
- RefPtr<ChannelSplitterNode> node = ChannelSplitterNode::create(this, m_destinationNode->sampleRate(), numberOfOutputs);
+ RefPtrWillBeRawPtr<ChannelSplitterNode> node = ChannelSplitterNode::create(this, m_destinationNode->sampleRate(), numberOfOutputs);
if (!node.get()) {
exceptionState.throwDOMException(
@@ -579,24 +477,23 @@ PassRefPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(size_t numbe
"number of outputs (" + String::number(numberOfOutputs)
+ ") must be between 1 and "
+ String::number(AudioContext::maxNumberOfChannels()) + ".");
- return 0;
+ return nullptr;
}
return node;
}
-PassRefPtr<ChannelMergerNode> AudioContext::createChannelMerger(ExceptionState& exceptionState)
+PassRefPtrWillBeRawPtr<ChannelMergerNode> AudioContext::createChannelMerger(ExceptionState& exceptionState)
{
const unsigned ChannelMergerDefaultNumberOfInputs = 6;
return createChannelMerger(ChannelMergerDefaultNumberOfInputs, exceptionState);
}
-PassRefPtr<ChannelMergerNode> AudioContext::createChannelMerger(size_t numberOfInputs, ExceptionState& exceptionState)
+PassRefPtrWillBeRawPtr<ChannelMergerNode> AudioContext::createChannelMerger(size_t numberOfInputs, ExceptionState& exceptionState)
{
ASSERT(isMainThread());
- lazyInitialize();
- RefPtr<ChannelMergerNode> node = ChannelMergerNode::create(this, m_destinationNode->sampleRate(), numberOfInputs);
+ RefPtrWillBeRawPtr<ChannelMergerNode> node = ChannelMergerNode::create(this, m_destinationNode->sampleRate(), numberOfInputs);
if (!node.get()) {
exceptionState.throwDOMException(
@@ -604,18 +501,17 @@ PassRefPtr<ChannelMergerNode> AudioContext::createChannelMerger(size_t numberOfI
"number of inputs (" + String::number(numberOfInputs)
+ ") must be between 1 and "
+ String::number(AudioContext::maxNumberOfChannels()) + ".");
- return 0;
+ return nullptr;
}
return node;
}
-PassRefPtr<OscillatorNode> AudioContext::createOscillator()
+PassRefPtrWillBeRawPtr<OscillatorNode> AudioContext::createOscillator()
{
ASSERT(isMainThread());
- lazyInitialize();
- RefPtr<OscillatorNode> node = OscillatorNode::create(this, m_destinationNode->sampleRate());
+ RefPtrWillBeRawPtr<OscillatorNode> node = OscillatorNode::create(this, m_destinationNode->sampleRate());
// Because this is an AudioScheduledSourceNode, the context keeps a reference until it has finished playing.
// When this happens, AudioScheduledSourceNode::finish() calls AudioContext::notifyNodeFinishedProcessing().
@@ -624,7 +520,7 @@ PassRefPtr<OscillatorNode> AudioContext::createOscillator()
return node;
}
-PassRefPtr<PeriodicWave> AudioContext::createPeriodicWave(Float32Array* real, Float32Array* imag, ExceptionState& exceptionState)
+PassRefPtrWillBeRawPtr<PeriodicWave> AudioContext::createPeriodicWave(Float32Array* real, Float32Array* imag, ExceptionState& exceptionState)
{
ASSERT(isMainThread());
@@ -632,14 +528,14 @@ PassRefPtr<PeriodicWave> AudioContext::createPeriodicWave(Float32Array* real, Fl
exceptionState.throwDOMException(
SyntaxError,
"invalid real array");
- return 0;
+ return nullptr;
}
if (!imag) {
exceptionState.throwDOMException(
SyntaxError,
"invalid imaginary array");
- return 0;
+ return nullptr;
}
if (real->length() != imag->length()) {
@@ -648,7 +544,7 @@ PassRefPtr<PeriodicWave> AudioContext::createPeriodicWave(Float32Array* real, Fl
"length of real array (" + String::number(real->length())
+ ") and length of imaginary array (" + String::number(imag->length())
+ ") must match.");
- return 0;
+ return nullptr;
}
if (real->length() > 4096) {
@@ -656,7 +552,7 @@ PassRefPtr<PeriodicWave> AudioContext::createPeriodicWave(Float32Array* real, Fl
IndexSizeError,
"length of real array (" + String::number(real->length())
+ ") exceeds allowed maximum of 4096");
- return 0;
+ return nullptr;
}
if (imag->length() > 4096) {
@@ -664,10 +560,9 @@ PassRefPtr<PeriodicWave> AudioContext::createPeriodicWave(Float32Array* real, Fl
IndexSizeError,
"length of imaginary array (" + String::number(imag->length())
+ ") exceeds allowed maximum of 4096");
- return 0;
+ return nullptr;
}
- lazyInitialize();
return PeriodicWave::create(sampleRate(), real, imag);
}
@@ -680,7 +575,7 @@ void AudioContext::notifyNodeFinishedProcessing(AudioNode* node)
void AudioContext::derefFinishedSourceNodes()
{
ASSERT(isGraphOwner());
- ASSERT(isAudioThread() || isAudioThreadFinished());
+ ASSERT(isAudioThread());
for (unsigned i = 0; i < m_finishedNodes.size(); i++)
derefNode(m_finishedNodes[i]);
@@ -712,7 +607,7 @@ void AudioContext::derefNode(AudioNode* node)
void AudioContext::derefUnfinishedSourceNodes()
{
- ASSERT(isMainThread() && isAudioThreadFinished());
+ ASSERT(isMainThread());
for (unsigned i = 0; i < m_referencedNodes.size(); ++i)
m_referencedNodes[i]->deref(AudioNode::RefTypeConnection);
@@ -743,7 +638,7 @@ bool AudioContext::tryLock(bool& mustReleaseLock)
bool isAudioThread = thisThread == audioThread();
// Try to catch cases of using try lock on main thread - it should use regular lock.
- ASSERT(isAudioThread || isAudioThreadFinished());
+ ASSERT(isAudioThread);
if (!isAudioThread) {
// In release build treat tryLock() as lock() (since above ASSERT(isAudioThread) never fires) - this is the best we can do.
@@ -858,7 +753,7 @@ void AudioContext::markForDeletion(AudioNode* node)
{
ASSERT(isGraphOwner());
- if (isAudioThreadFinished())
+ if (!isInitialized())
m_nodesToDelete.append(node);
else
m_nodesMarkedForDeletion.append(node);
@@ -872,14 +767,14 @@ void AudioContext::markForDeletion(AudioNode* node)
void AudioContext::scheduleNodeDeletion()
{
- bool isGood = m_isInitialized && isGraphOwner();
+ bool isGood = isInitialized() && isGraphOwner();
ASSERT(isGood);
if (!isGood)
return;
// Make sure to call deleteMarkedNodes() on main thread.
if (m_nodesMarkedForDeletion.size() && !m_isDeletionScheduled) {
- m_nodesToDelete.append(m_nodesMarkedForDeletion);
+ m_nodesToDelete.appendVector(m_nodesMarkedForDeletion);
m_nodesMarkedForDeletion.clear();
m_isDeletionScheduled = true;
@@ -907,7 +802,7 @@ void AudioContext::deleteMarkedNodes()
ASSERT(isMainThread());
// Protect this object from being deleted before we release the mutex locked by AutoLocker.
- RefPtr<AudioContext> protect(this);
+ RefPtrWillBeRawPtr<AudioContext> protect(this);
{
AutoLocker locker(this);
@@ -924,9 +819,14 @@ void AudioContext::deleteMarkedNodes()
unsigned numberOfOutputs = node->numberOfOutputs();
for (unsigned i = 0; i < numberOfOutputs; ++i)
m_dirtyAudioNodeOutputs.remove(node->output(i));
-
+#if ENABLE(OILPAN)
+ // Finally, clear the keep alive handle that keeps this
+ // object from being collected.
+ node->clearKeepAlive();
+#else
// Finally, delete it.
delete node;
+#endif
}
m_isDeletionScheduled = false;
}
@@ -955,7 +855,7 @@ void AudioContext::handleDirtyAudioSummingJunctions()
{
ASSERT(isGraphOwner());
- for (HashSet<AudioSummingJunction*>::iterator i = m_dirtySummingJunctions.begin(); i != m_dirtySummingJunctions.end(); ++i)
+ for (HashSet<AudioSummingJunction* >::iterator i = m_dirtySummingJunctions.begin(); i != m_dirtySummingJunctions.end(); ++i)
(*i)->updateRenderingState();
m_dirtySummingJunctions.clear();
@@ -1051,14 +951,13 @@ void AudioContext::fireCompletionEvent()
}
}
-void AudioContext::incrementActiveSourceCount()
-{
- atomicIncrement(&m_activeSourceCount);
-}
-
-void AudioContext::decrementActiveSourceCount()
+void AudioContext::trace(Visitor* visitor)
{
- atomicDecrement(&m_activeSourceCount);
+ visitor->trace(m_renderTarget);
+ visitor->trace(m_destinationNode);
+ visitor->trace(m_listener);
+ visitor->trace(m_dirtySummingJunctions);
+ EventTargetWithInlineData::trace(visitor);
}
} // namespace WebCore
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/AudioContext.h b/chromium/third_party/WebKit/Source/modules/webaudio/AudioContext.h
index a6ece93825d..d6c9f8b94dd 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/AudioContext.h
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/AudioContext.h
@@ -28,11 +28,11 @@
#include "bindings/v8/ScriptWrappable.h"
#include "core/dom/ActiveDOMObject.h"
#include "core/events/EventListener.h"
-#include "core/events/EventTarget.h"
-#include "platform/audio/AudioBus.h"
-#include "platform/audio/HRTFDatabaseLoader.h"
+#include "modules/EventTargetModules.h"
#include "modules/webaudio/AsyncAudioDecoder.h"
#include "modules/webaudio/AudioDestinationNode.h"
+#include "platform/audio/AudioBus.h"
+#include "platform/heap/Handle.h"
#include "wtf/HashSet.h"
#include "wtf/MainThread.h"
#include "wtf/OwnPtr.h"
@@ -74,43 +74,30 @@ class WaveShaperNode;
// AudioContext is the cornerstone of the web audio API and all AudioNodes are created from it.
// For thread safety between the audio thread and the main thread, it has a rendering graph locking mechanism.
-class AudioContext : public ActiveDOMObject, public ScriptWrappable, public ThreadSafeRefCounted<AudioContext>, public EventTargetWithInlineData {
- DEFINE_EVENT_TARGET_REFCOUNTING(ThreadSafeRefCounted<AudioContext>);
+class AudioContext : public ThreadSafeRefCountedWillBeThreadSafeRefCountedGarbageCollected<AudioContext>, public ActiveDOMObject, public ScriptWrappable, public EventTargetWithInlineData {
+ DEFINE_EVENT_TARGET_REFCOUNTING(ThreadSafeRefCountedWillBeThreadSafeRefCountedGarbageCollected<AudioContext>);
+ WILL_BE_USING_GARBAGE_COLLECTED_MIXIN(AudioContext);
public:
// Create an AudioContext for rendering to the audio hardware.
- static PassRefPtr<AudioContext> create(Document&, ExceptionState&);
-
- // Deprecated: create an AudioContext for offline (non-realtime) rendering.
- static PassRefPtr<AudioContext> create(Document&, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionState&);
+ static PassRefPtrWillBeRawPtr<AudioContext> create(Document&, ExceptionState&);
virtual ~AudioContext();
- bool isInitialized() const;
+ virtual void trace(Visitor*) OVERRIDE;
+ bool isInitialized() const { return m_isInitialized; }
bool isOfflineContext() { return m_isOfflineContext; }
- // Returns true when initialize() was called AND all asynchronous initialization has completed.
- bool isRunnable() const;
-
- HRTFDatabaseLoader* hrtfDatabaseLoader() const { return m_hrtfDatabaseLoader.get(); }
-
// Document notification
- virtual void stop();
-
- Document* document() const; // ASSERTs if document no longer exists.
- bool hasDocument();
+ virtual void stop() OVERRIDE FINAL;
+ virtual bool hasPendingActivity() const OVERRIDE;
AudioDestinationNode* destination() { return m_destinationNode.get(); }
size_t currentSampleFrame() const { return m_destinationNode->currentSampleFrame(); }
double currentTime() const { return m_destinationNode->currentTime(); }
float sampleRate() const { return m_destinationNode->sampleRate(); }
- unsigned long activeSourceCount() const { return static_cast<unsigned long>(m_activeSourceCount); }
- void incrementActiveSourceCount();
- void decrementActiveSourceCount();
-
- PassRefPtr<AudioBuffer> createBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionState&);
- PassRefPtr<AudioBuffer> createBuffer(ArrayBuffer*, bool mixToMono, ExceptionState&);
+ PassRefPtrWillBeRawPtr<AudioBuffer> createBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionState&);
// Asynchronous audio file data decoding.
void decodeAudioData(ArrayBuffer*, PassOwnPtr<AudioBufferCallback>, PassOwnPtr<AudioBufferCallback>, ExceptionState&);
@@ -118,29 +105,29 @@ public:
AudioListener* listener() { return m_listener.get(); }
// The AudioNode create methods are called on the main thread (from JavaScript).
- PassRefPtr<AudioBufferSourceNode> createBufferSource();
- PassRefPtr<MediaElementAudioSourceNode> createMediaElementSource(HTMLMediaElement*, ExceptionState&);
- PassRefPtr<MediaStreamAudioSourceNode> createMediaStreamSource(MediaStream*, ExceptionState&);
- PassRefPtr<MediaStreamAudioDestinationNode> createMediaStreamDestination();
- PassRefPtr<GainNode> createGain();
- PassRefPtr<BiquadFilterNode> createBiquadFilter();
- PassRefPtr<WaveShaperNode> createWaveShaper();
- PassRefPtr<DelayNode> createDelay(ExceptionState&);
- PassRefPtr<DelayNode> createDelay(double maxDelayTime, ExceptionState&);
- PassRefPtr<PannerNode> createPanner();
- PassRefPtr<ConvolverNode> createConvolver();
- PassRefPtr<DynamicsCompressorNode> createDynamicsCompressor();
- PassRefPtr<AnalyserNode> createAnalyser();
- PassRefPtr<ScriptProcessorNode> createScriptProcessor(ExceptionState&);
- PassRefPtr<ScriptProcessorNode> createScriptProcessor(size_t bufferSize, ExceptionState&);
- PassRefPtr<ScriptProcessorNode> createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, ExceptionState&);
- PassRefPtr<ScriptProcessorNode> createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, size_t numberOfOutputChannels, ExceptionState&);
- PassRefPtr<ChannelSplitterNode> createChannelSplitter(ExceptionState&);
- PassRefPtr<ChannelSplitterNode> createChannelSplitter(size_t numberOfOutputs, ExceptionState&);
- PassRefPtr<ChannelMergerNode> createChannelMerger(ExceptionState&);
- PassRefPtr<ChannelMergerNode> createChannelMerger(size_t numberOfInputs, ExceptionState&);
- PassRefPtr<OscillatorNode> createOscillator();
- PassRefPtr<PeriodicWave> createPeriodicWave(Float32Array* real, Float32Array* imag, ExceptionState&);
+ PassRefPtrWillBeRawPtr<AudioBufferSourceNode> createBufferSource();
+ PassRefPtrWillBeRawPtr<MediaElementAudioSourceNode> createMediaElementSource(HTMLMediaElement*, ExceptionState&);
+ PassRefPtrWillBeRawPtr<MediaStreamAudioSourceNode> createMediaStreamSource(MediaStream*, ExceptionState&);
+ PassRefPtrWillBeRawPtr<MediaStreamAudioDestinationNode> createMediaStreamDestination();
+ PassRefPtrWillBeRawPtr<GainNode> createGain();
+ PassRefPtrWillBeRawPtr<BiquadFilterNode> createBiquadFilter();
+ PassRefPtrWillBeRawPtr<WaveShaperNode> createWaveShaper();
+ PassRefPtrWillBeRawPtr<DelayNode> createDelay(ExceptionState&);
+ PassRefPtrWillBeRawPtr<DelayNode> createDelay(double maxDelayTime, ExceptionState&);
+ PassRefPtrWillBeRawPtr<PannerNode> createPanner();
+ PassRefPtrWillBeRawPtr<ConvolverNode> createConvolver();
+ PassRefPtrWillBeRawPtr<DynamicsCompressorNode> createDynamicsCompressor();
+ PassRefPtrWillBeRawPtr<AnalyserNode> createAnalyser();
+ PassRefPtrWillBeRawPtr<ScriptProcessorNode> createScriptProcessor(ExceptionState&);
+ PassRefPtrWillBeRawPtr<ScriptProcessorNode> createScriptProcessor(size_t bufferSize, ExceptionState&);
+ PassRefPtrWillBeRawPtr<ScriptProcessorNode> createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, ExceptionState&);
+ PassRefPtrWillBeRawPtr<ScriptProcessorNode> createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, size_t numberOfOutputChannels, ExceptionState&);
+ PassRefPtrWillBeRawPtr<ChannelSplitterNode> createChannelSplitter(ExceptionState&);
+ PassRefPtrWillBeRawPtr<ChannelSplitterNode> createChannelSplitter(size_t numberOfOutputs, ExceptionState&);
+ PassRefPtrWillBeRawPtr<ChannelMergerNode> createChannelMerger(ExceptionState&);
+ PassRefPtrWillBeRawPtr<ChannelMergerNode> createChannelMerger(size_t numberOfInputs, ExceptionState&);
+ PassRefPtrWillBeRawPtr<OscillatorNode> createOscillator();
+ PassRefPtrWillBeRawPtr<PeriodicWave> createPeriodicWave(Float32Array* real, Float32Array* imag, ExceptionState&);
// When a source node has no more processing to do (has finished playing), then it tells the context to dereference it.
void notifyNodeFinishedProcessing(AudioNode*);
@@ -183,9 +170,6 @@ public:
ThreadIdentifier audioThread() const { return m_audioThread; }
bool isAudioThread() const;
- // Returns true only after the audio thread has been started and then shutdown.
- bool isAudioThreadFinished() { return m_isAudioThreadFinished; }
-
// mustReleaseLock is set to true if we acquired the lock in this method call and caller must unlock(), false if it was previously acquired.
void lock(bool& mustReleaseLock);
@@ -234,8 +218,8 @@ public:
void removeMarkedSummingJunction(AudioSummingJunction*);
// EventTarget
- virtual const AtomicString& interfaceName() const OVERRIDE;
- virtual ExecutionContext* executionContext() const OVERRIDE;
+ virtual const AtomicString& interfaceName() const OVERRIDE FINAL;
+ virtual ExecutionContext* executionContext() const OVERRIDE FINAL;
DEFINE_ATTRIBUTE_EVENT_LISTENER(complete);
@@ -251,22 +235,21 @@ protected:
static bool isSampleRateRangeGood(float sampleRate);
private:
- void constructCommon();
-
- void lazyInitialize();
+ void initialize();
void uninitialize();
// ExecutionContext calls stop twice.
// We'd like to schedule only one stop action for them.
bool m_isStopScheduled;
static void stopDispatch(void* userData);
+ bool m_isCleared;
void clear();
void scheduleNodeDeletion();
static void deleteMarkedNodesDispatch(void* userData);
+ // Set to true when the destination node has been initialized and is ready to process data.
bool m_isInitialized;
- bool m_isAudioThreadFinished;
// The context itself keeps a reference to all source nodes. The source nodes, then reference all nodes they're connected to.
// In turn, these nodes reference all nodes they're connected to. All nodes are ultimately connected to the AudioDestinationNode.
@@ -279,8 +262,8 @@ private:
// Make sure to dereference them here.
void derefUnfinishedSourceNodes();
- RefPtr<AudioDestinationNode> m_destinationNode;
- RefPtr<AudioListener> m_listener;
+ RefPtrWillBeMember<AudioDestinationNode> m_destinationNode;
+ RefPtrWillBeMember<AudioListener> m_listener;
// Only accessed in the audio thread.
Vector<AudioNode*> m_finishedNodes;
@@ -301,7 +284,7 @@ private:
bool m_isDeletionScheduled;
// Only accessed when the graph lock is held.
- HashSet<AudioSummingJunction*> m_dirtySummingJunctions;
+ HashSet<AudioSummingJunction* > m_dirtySummingJunctions;
HashSet<AudioNodeOutput*> m_dirtyAudioNodeOutputs;
void handleDirtyAudioSummingJunctions();
void handleDirtyAudioNodeOutputs();
@@ -324,10 +307,7 @@ private:
// Only accessed in the audio thread.
Vector<AudioNode*> m_deferredFinishDerefList;
- // HRTF Database loader
- RefPtr<HRTFDatabaseLoader> m_hrtfDatabaseLoader;
-
- RefPtr<AudioBuffer> m_renderTarget;
+ RefPtrWillBeMember<AudioBuffer> m_renderTarget;
bool m_isOfflineContext;
@@ -336,9 +316,6 @@ private:
// This is considering 32 is large enough for multiple channels audio.
// It is somewhat arbitrary and could be increased if necessary.
enum { MaxNumberOfChannels = 32 };
-
- // Number of AudioBufferSourceNodes that are active (playing).
- int m_activeSourceCount;
};
} // WebCore
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/AudioContext.idl b/chromium/third_party/WebKit/Source/modules/webaudio/AudioContext.idl
index 52da1a57277..f5a932a181c 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/AudioContext.idl
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/AudioContext.idl
@@ -24,19 +24,20 @@
*/
[
+ WillBeGarbageCollected,
ActiveDOMObject,
Conditional=WEB_AUDIO,
Constructor,
- Constructor(unsigned long numberOfChannels, unsigned long numberOfFrames, float sampleRate),
ConstructorCallWith=Document,
NoInterfaceObject,
RaisesException=Constructor,
] interface AudioContext : EventTarget {
+
// All rendered audio ultimately connects to destination, which represents the audio hardware.
readonly attribute AudioDestinationNode destination;
// All scheduled times are relative to this time in seconds.
- readonly attribute float currentTime;
+ readonly attribute double currentTime;
// All AudioNodes in the context run at this sample-rate (sample-frames per second).
readonly attribute float sampleRate;
@@ -44,11 +45,7 @@
// All panning is relative to this listener.
readonly attribute AudioListener listener;
- // Number of AudioBufferSourceNodes that are currently playing.
- readonly attribute unsigned long activeSourceCount;
-
[RaisesException] AudioBuffer createBuffer(unsigned long numberOfChannels, unsigned long numberOfFrames, float sampleRate);
- [RaisesException] AudioBuffer createBuffer(ArrayBuffer? buffer, boolean mixToMono);
// Asynchronous audio file data decoding.
[RaisesException] void decodeAudioData(ArrayBuffer audioData, AudioBufferCallback successCallback, optional AudioBufferCallback errorCallback);
@@ -82,10 +79,4 @@
// void prepareOfflineBufferRendering(unsigned long numberOfChannels, unsigned long numberOfFrames, float sampleRate);
attribute EventHandler oncomplete;
void startRendering();
-
- [MeasureAs=LegacyWebAudio, ImplementedAs=createGain] GainNode createGainNode();
- [MeasureAs=LegacyWebAudio, ImplementedAs=createDelay, RaisesException] DelayNode createDelayNode(optional double maxDelayTime);
-
- [MeasureAs=LegacyWebAudio, ImplementedAs=createScriptProcessor, RaisesException] ScriptProcessorNode createJavaScriptNode(unsigned long bufferSize, optional unsigned long numberOfInputChannels, optional unsigned long numberOfOutputChannels);
-
};
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/AudioDestinationNode.cpp b/chromium/third_party/WebKit/Source/modules/webaudio/AudioDestinationNode.cpp
index 68a6d7285e1..807c1d7a8ff 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/AudioDestinationNode.cpp
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/AudioDestinationNode.cpp
@@ -60,7 +60,7 @@ void AudioDestinationNode::render(AudioBus* sourceBus, AudioBus* destinationBus,
context()->setAudioThread(currentThread());
- if (!context()->isRunnable()) {
+ if (!context()->isInitialized()) {
destinationBus->zero();
return;
}
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/AudioDestinationNode.h b/chromium/third_party/WebKit/Source/modules/webaudio/AudioDestinationNode.h
index 801d7fd656d..182648732f8 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/AudioDestinationNode.h
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/AudioDestinationNode.h
@@ -42,29 +42,23 @@ public:
virtual ~AudioDestinationNode();
// AudioNode
- virtual void process(size_t) { }; // we're pulled by hardware so this is never called
- virtual void reset() { m_currentSampleFrame = 0; };
+ virtual void process(size_t) OVERRIDE FINAL { } // we're pulled by hardware so this is never called
// The audio hardware calls render() to get the next render quantum of audio into destinationBus.
// It will optionally give us local/live audio input in sourceBus (if it's not 0).
- virtual void render(AudioBus* sourceBus, AudioBus* destinationBus, size_t numberOfFrames);
+ virtual void render(AudioBus* sourceBus, AudioBus* destinationBus, size_t numberOfFrames) OVERRIDE FINAL;
size_t currentSampleFrame() const { return m_currentSampleFrame; }
double currentTime() const { return currentSampleFrame() / static_cast<double>(sampleRate()); }
virtual unsigned long maxChannelCount() const { return 0; }
- // Enable local/live input for the specified device.
- virtual void enableInput(const String& inputDeviceId) = 0;
-
virtual void startRendering() = 0;
- AudioSourceProvider* localAudioInputProvider() { return &m_localAudioInputProvider; }
-
protected:
// LocalAudioInputProvider allows us to expose an AudioSourceProvider for local/live audio input.
// If there is local/live audio input, we call set() with the audio input data every render quantum.
- class LocalAudioInputProvider : public AudioSourceProvider {
+ class LocalAudioInputProvider FINAL : public AudioSourceProvider {
public:
LocalAudioInputProvider()
: m_sourceBus(AudioBus::create(2, AudioNode::ProcessingSizeInFrames)) // FIXME: handle non-stereo local input.
@@ -78,7 +72,7 @@ protected:
}
// AudioSourceProvider.
- virtual void provideInput(AudioBus* destinationBus, size_t numberOfFrames)
+ virtual void provideInput(AudioBus* destinationBus, size_t numberOfFrames) OVERRIDE
{
bool isGood = destinationBus && destinationBus->length() == numberOfFrames && m_sourceBus->length() == numberOfFrames;
ASSERT(isGood);
@@ -90,8 +84,8 @@ protected:
RefPtr<AudioBus> m_sourceBus;
};
- virtual double tailTime() const OVERRIDE { return 0; }
- virtual double latencyTime() const OVERRIDE { return 0; }
+ virtual double tailTime() const OVERRIDE FINAL { return 0; }
+ virtual double latencyTime() const OVERRIDE FINAL { return 0; }
// Counts the number of sample-frames processed by the destination.
size_t m_currentSampleFrame;
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/AudioListener.cpp b/chromium/third_party/WebKit/Source/modules/webaudio/AudioListener.cpp
index e13794597bb..94466c9a19f 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/AudioListener.cpp
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/AudioListener.cpp
@@ -32,6 +32,7 @@
#include "modules/webaudio/AudioListener.h"
+#include "modules/webaudio/PannerNode.h"
#include "platform/audio/AudioBus.h"
namespace WebCore {
@@ -47,6 +48,101 @@ AudioListener::AudioListener()
ScriptWrappable::init(this);
}
+AudioListener::~AudioListener()
+{
+ m_panners.clear();
+}
+
+void AudioListener::addPanner(PannerNode* panner)
+{
+ if (!panner)
+ return;
+
+ m_panners.append(panner);
+}
+
+void AudioListener::removePanner(PannerNode* panner)
+{
+ for (unsigned i = 0; i < m_panners.size(); ++i) {
+ if (panner == m_panners[i]) {
+ m_panners.remove(i);
+ break;
+ }
+ }
+}
+
+void AudioListener::markPannersAsDirty(unsigned type)
+{
+ for (unsigned i = 0; i < m_panners.size(); ++i)
+ m_panners[i]->markPannerAsDirty(type);
+}
+
+void AudioListener::setPosition(const FloatPoint3D &position)
+{
+ if (m_position == position)
+ return;
+
+ // This synchronizes with panner's process().
+ MutexLocker listenerLocker(m_listenerLock);
+ m_position = position;
+ markPannersAsDirty(PannerNode::AzimuthElevationDirty | PannerNode::DistanceConeGainDirty | PannerNode::DopplerRateDirty);
+}
+
+void AudioListener::setOrientation(const FloatPoint3D &orientation)
+{
+ if (m_orientation == orientation)
+ return;
+
+ // This synchronizes with panner's process().
+ MutexLocker listenerLocker(m_listenerLock);
+ m_orientation = orientation;
+ markPannersAsDirty(PannerNode::AzimuthElevationDirty);
+}
+
+void AudioListener::setUpVector(const FloatPoint3D &upVector)
+{
+ if (m_upVector == upVector)
+ return;
+
+ // This synchronizes with panner's process().
+ MutexLocker listenerLocker(m_listenerLock);
+ m_upVector = upVector;
+ markPannersAsDirty(PannerNode::AzimuthElevationDirty);
+}
+
+void AudioListener::setVelocity(const FloatPoint3D &velocity)
+{
+ if (m_velocity == velocity)
+ return;
+
+ // This synchronizes with panner's process().
+ MutexLocker listenerLocker(m_listenerLock);
+ m_velocity = velocity;
+ markPannersAsDirty(PannerNode::DopplerRateDirty);
+}
+
+void AudioListener::setDopplerFactor(double dopplerFactor)
+{
+ if (m_dopplerFactor == dopplerFactor)
+ return;
+
+ // This synchronizes with panner's process().
+ MutexLocker listenerLocker(m_listenerLock);
+ m_dopplerFactor = dopplerFactor;
+ markPannersAsDirty(PannerNode::DopplerRateDirty);
+}
+
+void AudioListener::setSpeedOfSound(double speedOfSound)
+{
+ if (m_speedOfSound == speedOfSound)
+ return;
+
+ // This synchronizes with panner's process().
+ MutexLocker listenerLocker(m_listenerLock);
+ m_speedOfSound = speedOfSound;
+ markPannersAsDirty(PannerNode::DopplerRateDirty);
+}
+
} // namespace WebCore
#endif // ENABLE(WEB_AUDIO)
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/AudioListener.h b/chromium/third_party/WebKit/Source/modules/webaudio/AudioListener.h
index dcd68dd42ba..214db390b70 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/AudioListener.h
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/AudioListener.h
@@ -31,63 +31,78 @@
#include "bindings/v8/ScriptWrappable.h"
#include "platform/geometry/FloatPoint3D.h"
+#include "platform/heap/Handle.h"
#include "wtf/PassRefPtr.h"
#include "wtf/RefCounted.h"
+#include "wtf/Vector.h"
namespace WebCore {
+class PannerNode;
+
// AudioListener maintains the state of the listener in the audio scene as defined in the OpenAL specification.
-class AudioListener : public ScriptWrappable, public RefCounted<AudioListener> {
+class AudioListener : public RefCountedWillBeGarbageCollectedFinalized<AudioListener>, public ScriptWrappable {
public:
- static PassRefPtr<AudioListener> create()
+ static PassRefPtrWillBeRawPtr<AudioListener> create()
{
- return adoptRef(new AudioListener());
+ return adoptRefWillBeNoop(new AudioListener());
}
+ virtual ~AudioListener();
// Position
void setPosition(float x, float y, float z) { setPosition(FloatPoint3D(x, y, z)); }
- void setPosition(const FloatPoint3D &position) { m_position = position; }
const FloatPoint3D& position() const { return m_position; }
- // Orientation
+ // Orientation and Up-vector
void setOrientation(float x, float y, float z, float upX, float upY, float upZ)
{
setOrientation(FloatPoint3D(x, y, z));
setUpVector(FloatPoint3D(upX, upY, upZ));
}
- void setOrientation(const FloatPoint3D &orientation) { m_orientation = orientation; }
const FloatPoint3D& orientation() const { return m_orientation; }
-
- // Up-vector
- void setUpVector(const FloatPoint3D &upVector) { m_upVector = upVector; }
const FloatPoint3D& upVector() const { return m_upVector; }
// Velocity
void setVelocity(float x, float y, float z) { setVelocity(FloatPoint3D(x, y, z)); }
- void setVelocity(const FloatPoint3D &velocity) { m_velocity = velocity; }
const FloatPoint3D& velocity() const { return m_velocity; }
// Doppler factor
- void setDopplerFactor(double dopplerFactor) { m_dopplerFactor = dopplerFactor; }
+ void setDopplerFactor(double);
double dopplerFactor() const { return m_dopplerFactor; }
// Speed of sound
- void setSpeedOfSound(double speedOfSound) { m_speedOfSound = speedOfSound; }
+ void setSpeedOfSound(double);
double speedOfSound() const { return m_speedOfSound; }
+ Mutex& listenerLock() { return m_listenerLock; }
+ void addPanner(PannerNode*);
+ void removePanner(PannerNode*);
+
+ void trace(Visitor*) { }
+
private:
AudioListener();
- // Position / Orientation
+ void setPosition(const FloatPoint3D&);
+ void setOrientation(const FloatPoint3D&);
+ void setUpVector(const FloatPoint3D&);
+ void setVelocity(const FloatPoint3D&);
+
+ void markPannersAsDirty(unsigned);
+
FloatPoint3D m_position;
FloatPoint3D m_orientation;
FloatPoint3D m_upVector;
-
FloatPoint3D m_velocity;
-
double m_dopplerFactor;
double m_speedOfSound;
+
+ // Synchronize a panner's process() with setting of the state of the listener.
+ mutable Mutex m_listenerLock;
+
+ // List for pannerNodes in context.
+ Vector<PannerNode*> m_panners;
};
} // WebCore
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/AudioListener.idl b/chromium/third_party/WebKit/Source/modules/webaudio/AudioListener.idl
index 0011fe0f560..b9668f513fa 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/AudioListener.idl
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/AudioListener.idl
@@ -27,6 +27,7 @@
*/
[
+ WillBeGarbageCollected,
Conditional=WEB_AUDIO
] interface AudioListener {
attribute float dopplerFactor; // same as OpenAL (default 1.0)
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/AudioNode.cpp b/chromium/third_party/WebKit/Source/modules/webaudio/AudioNode.cpp
index 7e4b075f998..e48936c7335 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/AudioNode.cpp
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/AudioNode.cpp
@@ -48,6 +48,9 @@ AudioNode::AudioNode(AudioContext* context, float sampleRate)
, m_nodeType(NodeTypeUnknown)
, m_context(context)
, m_sampleRate(sampleRate)
+#if ENABLE(OILPAN)
+ , m_keepAlive(adoptPtr(new Persistent<AudioNode>(this)))
+#endif
, m_lastProcessingTime(-1)
, m_lastNonSilentTime(-1)
, m_normalRefCount(1) // start out with normal refCount == 1 (like WTF::RefCounted class)
@@ -139,12 +142,6 @@ void AudioNode::setNodeType(NodeType type)
#endif
}
-void AudioNode::lazyInitialize()
-{
- if (!isInitialized())
- initialize();
-}
-
void AudioNode::addInput(PassOwnPtr<AudioNodeInput> input)
{
m_inputs.append(input);
@@ -309,9 +306,7 @@ void AudioNode::setChannelCountMode(const String& mode, ExceptionState& exceptio
} else if (mode == "explicit") {
m_channelCountMode = Explicit;
} else {
- exceptionState.throwDOMException(
- InvalidStateError,
- "invalid mode '" + mode + "'; must be 'max', 'clamped-max', or 'explicit'.");
+ ASSERT_NOT_REACHED();
}
if (m_channelCountMode != oldMode)
@@ -340,9 +335,7 @@ void AudioNode::setChannelInterpretation(const String& interpretation, Exception
} else if (interpretation == "discrete") {
m_channelInterpretation = AudioBus::Discrete;
} else {
- exceptionState.throwDOMException(
- InvalidStateError,
- "invalid interpretation '" + interpretation + "'; must be 'speakers' or 'discrete'.");
+ ASSERT_NOT_REACHED();
}
}
@@ -478,6 +471,9 @@ void AudioNode::disableOutputsIfNecessary()
void AudioNode::ref(RefType refType)
{
+#if ENABLE(OILPAN)
+ ASSERT(m_keepAlive);
+#endif
switch (refType) {
case RefTypeNormal:
atomicIncrement(&m_normalRefCount);
@@ -531,7 +527,7 @@ void AudioNode::deref(RefType refType)
// Once AudioContext::uninitialize() is called there's no more chances for deleteMarkedNodes() to get called, so we call here.
// We can't call in AudioContext::~AudioContext() since it will never be called as long as any AudioNode is alive
// because AudioNodes keep a reference to the context.
- if (context()->isAudioThreadFinished())
+ if (!context()->isInitialized())
context()->deleteMarkedNodes();
}
@@ -592,6 +588,25 @@ void AudioNode::printNodeCounts()
#endif // DEBUG_AUDIONODE_REFERENCES
+void AudioNode::trace(Visitor* visitor)
+{
+ visitor->trace(m_context);
+ EventTargetWithInlineData::trace(visitor);
+}
+
+#if ENABLE(OILPAN)
+void AudioNode::clearKeepAlive()
+{
+ // It is safe to drop the self-persistent when the ref count
+ // of a AudioNode reaches zero. At that point, the
+ // AudioNode node is removed from the AudioContext and
+ // it cannot be reattached. Therefore, the reference count
+ // will not go above zero again.
+ ASSERT(m_keepAlive);
+ m_keepAlive = nullptr;
+}
+#endif
+
} // namespace WebCore
#endif // ENABLE(WEB_AUDIO)
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/AudioNode.h b/chromium/third_party/WebKit/Source/modules/webaudio/AudioNode.h
index 61fc047bdd5..fb63c918e40 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/AudioNode.h
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/AudioNode.h
@@ -26,7 +26,7 @@
#define AudioNode_h
#include "bindings/v8/ScriptWrappable.h"
-#include "core/events/EventTarget.h"
+#include "modules/EventTargetModules.h"
#include "platform/audio/AudioBus.h"
#include "wtf/Forward.h"
#include "wtf/OwnPtr.h"
@@ -50,7 +50,9 @@ class ExceptionState;
// An AudioDestinationNode has one input and no outputs and represents the final destination to the audio hardware.
// Most processing nodes such as filters will have one input and one output, although multiple inputs and outputs are possible.
-class AudioNode : public ScriptWrappable, public EventTargetWithInlineData {
+// AudioNode has its own ref-counting mechanism that use RefTypes so we cannot use RefCountedGarbageCollected.
+class AudioNode : public NoBaseWillBeGarbageCollectedFinalized<AudioNode>, public ScriptWrappable, public EventTargetWithInlineData {
+ WILL_BE_USING_GARBAGE_COLLECTED_MIXIN(AudioNode);
public:
enum { ProcessingSizeInFrames = 128 };
@@ -109,17 +111,12 @@ public:
// Called from context's audio thread.
virtual void process(size_t framesToProcess) = 0;
- // Resets DSP processing state (clears delay lines, filter memory, etc.)
- // Called from context's audio thread.
- virtual void reset() = 0;
-
// No significant resources should be allocated until initialize() is called.
// Processing may not occur until a node is initialized.
virtual void initialize();
virtual void uninitialize();
bool isInitialized() const { return m_isInitialized; }
- void lazyInitialize();
unsigned numberOfInputs() const { return m_inputs.size(); }
unsigned numberOfOutputs() const { return m_outputs.size(); }
@@ -181,8 +178,14 @@ public:
AudioBus::ChannelInterpretation internalChannelInterpretation() const { return m_channelInterpretation; }
// EventTarget
- virtual const AtomicString& interfaceName() const OVERRIDE;
- virtual ExecutionContext* executionContext() const OVERRIDE;
+ virtual const AtomicString& interfaceName() const OVERRIDE FINAL;
+ virtual ExecutionContext* executionContext() const OVERRIDE FINAL;
+
+ virtual void trace(Visitor*) OVERRIDE;
+
+#if ENABLE(OILPAN)
+ void clearKeepAlive();
+#endif
protected:
// Inputs and outputs must be created before the AudioNode is initialized.
@@ -200,11 +203,23 @@ protected:
private:
volatile bool m_isInitialized;
NodeType m_nodeType;
- RefPtr<AudioContext> m_context;
+ RefPtrWillBeMember<AudioContext> m_context;
float m_sampleRate;
Vector<OwnPtr<AudioNodeInput> > m_inputs;
Vector<OwnPtr<AudioNodeOutput> > m_outputs;
+#if ENABLE(OILPAN)
+ // AudioNodes are in the oilpan heap but they are still reference counted at
+ // the same time. This is because we are not allowed to stop the audio
+ // thread and thus the audio thread cannot allocate objects in the oilpan
+ // heap.
+ // The m_keepAlive handle is used to keep a persistent reference to this
+ // AudioNode while someone has a reference to this AudioNode through a
+ // RefPtr.
+ GC_PLUGIN_IGNORE("http://crbug.com/353083")
+ OwnPtr<Persistent<AudioNode> > m_keepAlive;
+#endif
+
double m_lastProcessingTime;
double m_lastNonSilentTime;
@@ -220,8 +235,10 @@ private:
static int s_nodeCount[NodeTypeEnd];
#endif
- virtual void refEventTarget() OVERRIDE { ref(); }
- virtual void derefEventTarget() OVERRIDE { deref(); }
+#if !ENABLE(OILPAN)
+ virtual void refEventTarget() OVERRIDE FINAL { ref(); }
+ virtual void derefEventTarget() OVERRIDE FINAL { deref(); }
+#endif
protected:
unsigned m_channelCount;
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/AudioNode.idl b/chromium/third_party/WebKit/Source/modules/webaudio/AudioNode.idl
index 35d98ccf302..4ed4d40b0db 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/AudioNode.idl
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/AudioNode.idl
@@ -22,23 +22,32 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+enum ChannelCountMode {
+ "max",
+ "clamped-max",
+ "explicit"
+};
+
+enum ChannelInterpretation {
+ "speakers",
+ "discrete"
+};
+
+// http://webaudio.github.io/web-audio-api/#idl-def-AudioNode
+
[
Conditional=WEB_AUDIO,
Custom=Wrap,
+ WillBeGarbageCollected,
] interface AudioNode : EventTarget {
- readonly attribute AudioContext context;
- readonly attribute unsigned long numberOfInputs;
- readonly attribute unsigned long numberOfOutputs;
-
- [RaisesException=Setter] attribute unsigned long channelCount;
-
- [RaisesException=Setter] attribute DOMString channelCountMode;
-
- [RaisesException=Setter] attribute DOMString channelInterpretation;
-
+ // FIXME: AudioNode argument should not be nullable
[RaisesException] void connect(AudioNode? destination, [Default=Undefined] optional unsigned long output, [Default=Undefined] optional unsigned long input);
-
- [RaisesException] void connect(AudioParam? destination, [Default=Undefined] optional unsigned long output);
-
+ [RaisesException] void connect(AudioParam destination, [Default=Undefined] optional unsigned long output);
[RaisesException] void disconnect([Default=Undefined] optional unsigned long output);
+ readonly attribute AudioContext context;
+ readonly attribute unsigned long numberOfInputs;
+ readonly attribute unsigned long numberOfOutputs;
+ [RaisesException=Setter] attribute unsigned long channelCount;
+ [RaisesException=Setter] attribute ChannelCountMode channelCountMode;
+ [RaisesException=Setter] attribute ChannelInterpretation channelInterpretation;
};
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/AudioNodeInput.h b/chromium/third_party/WebKit/Source/modules/webaudio/AudioNodeInput.h
index 9f171a26ecf..8952863d554 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/AudioNodeInput.h
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/AudioNodeInput.h
@@ -40,7 +40,7 @@ class AudioNodeOutput;
// In the case of multiple connections, the input will act as a unity-gain summing junction, mixing all the outputs.
// The number of channels of the input's bus is the maximum of the number of channels of all its connections.
-class AudioNodeInput : public AudioSummingJunction {
+class AudioNodeInput FINAL : public AudioSummingJunction {
public:
explicit AudioNodeInput(AudioNode*);
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/AudioNodeOutput.cpp b/chromium/third_party/WebKit/Source/modules/webaudio/AudioNodeOutput.cpp
index 9902699bb95..ef7a7ab2a01 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/AudioNodeOutput.cpp
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/AudioNodeOutput.cpp
@@ -146,11 +146,6 @@ unsigned AudioNodeOutput::renderingFanOutCount() const
return m_renderingFanOutCount;
}
-unsigned AudioNodeOutput::renderingParamFanOutCount() const
-{
- return m_renderingParamFanOutCount;
-}
-
void AudioNodeOutput::addInput(AudioNodeInput* input)
{
ASSERT(context()->isGraphOwner());
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/AudioNodeOutput.h b/chromium/third_party/WebKit/Source/modules/webaudio/AudioNodeOutput.h
index 14e9272a306..31f9637ecc7 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/AudioNodeOutput.h
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/AudioNodeOutput.h
@@ -62,10 +62,6 @@ public:
// Unlike fanOutCount() it will not change during the course of a render quantum.
unsigned renderingFanOutCount() const;
- // renderingParamFanOutCount() is the number of AudioParams that we're connected to during rendering.
- // Unlike paramFanOutCount() it will not change during the course of a render quantum.
- unsigned renderingParamFanOutCount() const;
-
// Must be called with the context's graph lock.
void disconnectAll();
@@ -144,8 +140,7 @@ private:
unsigned m_renderingFanOutCount;
unsigned m_renderingParamFanOutCount;
- HashSet<RefPtr<AudioParam> > m_params;
- typedef HashSet<RefPtr<AudioParam> >::iterator ParamsIterator;
+ WillBePersistentHeapHashSet<RefPtrWillBeMember<AudioParam> > m_params;
};
} // namespace WebCore
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/AudioParam.cpp b/chromium/third_party/WebKit/Source/modules/webaudio/AudioParam.cpp
index 8ff48e2ed9e..a356c93bd54 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/AudioParam.cpp
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/AudioParam.cpp
@@ -84,7 +84,7 @@ bool AudioParam::smooth()
m_smoothedValue = m_value;
else {
// Dezipper - exponential approach.
- m_smoothedValue += (m_value - m_smoothedValue) * m_smoothingConstant;
+ m_smoothedValue += (m_value - m_smoothedValue) * DefaultSmoothingConstant;
// If we get close enough then snap to actual value.
if (fabs(m_smoothedValue - m_value) < SnapThreshold) // FIXME: the threshold needs to be adjustable depending on range - but this is OK general purpose value.
@@ -96,7 +96,7 @@ bool AudioParam::smooth()
float AudioParam::finalValue()
{
- float value;
+ float value = m_value;
calculateFinalValues(&value, 1, false);
return value;
}
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/AudioParam.h b/chromium/third_party/WebKit/Source/modules/webaudio/AudioParam.h
index b6c30cbaeb8..ed25afabf79 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/AudioParam.h
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/AudioParam.h
@@ -43,14 +43,14 @@ namespace WebCore {
class AudioNodeOutput;
-class AudioParam : public RefCounted<AudioParam>, public ScriptWrappable, public AudioSummingJunction {
+class AudioParam FINAL : public RefCountedWillBeGarbageCollectedFinalized<AudioParam>, public ScriptWrappable, public AudioSummingJunction {
public:
static const double DefaultSmoothingConstant;
static const double SnapThreshold;
- static PassRefPtr<AudioParam> create(AudioContext* context, const String& name, double defaultValue, double minValue, double maxValue, unsigned units = 0)
+ static PassRefPtrWillBeRawPtr<AudioParam> create(AudioContext* context, const String& name, double defaultValue, double minValue, double maxValue, unsigned units = 0)
{
- return adoptRef(new AudioParam(context, name, defaultValue, minValue, maxValue, units));
+ return adoptRefWillBeNoop(new AudioParam(context, name, defaultValue, minValue, maxValue, units));
}
// AudioSummingJunction
@@ -83,12 +83,11 @@ public:
bool smooth();
void resetSmoothedValue() { m_smoothedValue = m_value; }
- void setSmoothingConstant(double k) { m_smoothingConstant = k; }
// Parameter automation.
void setValueAtTime(float value, double time) { m_timeline.setValueAtTime(value, time); }
void linearRampToValueAtTime(float value, double time) { m_timeline.linearRampToValueAtTime(value, time); }
- void exponentialRampToValueAtTime(float value, double time) { m_timeline.exponentialRampToValueAtTime(value, time); }
+ void exponentialRampToValueAtTime(float value, double time, ExceptionState& es) { m_timeline.exponentialRampToValueAtTime(value, time, es); }
void setTargetAtTime(float target, double time, double timeConstant) { m_timeline.setTargetAtTime(target, time, timeConstant); }
void setValueCurveAtTime(Float32Array* curve, double time, double duration) { m_timeline.setValueCurveAtTime(curve, time, duration); }
void cancelScheduledValues(double startTime) { m_timeline.cancelScheduledValues(startTime); }
@@ -103,6 +102,8 @@ public:
void connect(AudioNodeOutput*);
void disconnect(AudioNodeOutput*);
+ void trace(Visitor*) { }
+
protected:
AudioParam(AudioContext* context, const String& name, double defaultValue, double minValue, double maxValue, unsigned units = 0)
: AudioSummingJunction(context)
@@ -113,7 +114,6 @@ protected:
, m_maxValue(maxValue)
, m_units(units)
, m_smoothedValue(defaultValue)
- , m_smoothingConstant(DefaultSmoothingConstant)
{
ScriptWrappable::init(this);
}
@@ -132,7 +132,6 @@ private:
// Smoothing (de-zippering)
double m_smoothedValue;
- double m_smoothingConstant;
AudioParamTimeline m_timeline;
};
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/AudioParam.idl b/chromium/third_party/WebKit/Source/modules/webaudio/AudioParam.idl
index bb903874f3b..2d55ad31792 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/AudioParam.idl
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/AudioParam.idl
@@ -27,6 +27,7 @@
*/
[
+ WillBeGarbageCollected,
Conditional=WEB_AUDIO
] interface AudioParam {
attribute float value;
@@ -42,7 +43,7 @@
// Parameter automation.
void setValueAtTime(float value, double time);
void linearRampToValueAtTime(float value, double time);
- void exponentialRampToValueAtTime(float value, double time);
+ [RaisesException] void exponentialRampToValueAtTime(float value, double time);
// Exponentially approach the target with a rate having the given time constant.
void setTargetAtTime(float target, double time, double timeConstant);
@@ -54,6 +55,4 @@
// Cancels all scheduled parameter changes with times greater than or equal to startTime.
void cancelScheduledValues(double startTime);
- [MeasureAs=LegacyWebAudio, ImplementedAs=setTargetAtTime] void setTargetValueAtTime(float targetValue, double time, double timeConstant);
-
};
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/AudioParamTimeline.cpp b/chromium/third_party/WebKit/Source/modules/webaudio/AudioParamTimeline.cpp
index 51d5293244a..eb107c04c1a 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/AudioParamTimeline.cpp
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/AudioParamTimeline.cpp
@@ -29,6 +29,8 @@
#include "modules/webaudio/AudioParamTimeline.h"
+#include "bindings/v8/ExceptionState.h"
+#include "core/dom/ExceptionCode.h"
#include "platform/audio/AudioUtilities.h"
#include "platform/FloatConversion.h"
#include "wtf/MathExtras.h"
@@ -40,22 +42,30 @@ namespace WebCore {
void AudioParamTimeline::setValueAtTime(float value, double time)
{
- insertEvent(ParamEvent(ParamEvent::SetValue, value, time, 0, 0, 0));
+ insertEvent(ParamEvent(ParamEvent::SetValue, value, time, 0, 0, nullptr));
}
void AudioParamTimeline::linearRampToValueAtTime(float value, double time)
{
- insertEvent(ParamEvent(ParamEvent::LinearRampToValue, value, time, 0, 0, 0));
+ insertEvent(ParamEvent(ParamEvent::LinearRampToValue, value, time, 0, 0, nullptr));
}
-void AudioParamTimeline::exponentialRampToValueAtTime(float value, double time)
+void AudioParamTimeline::exponentialRampToValueAtTime(float value, double time, ExceptionState& exceptionState)
{
- insertEvent(ParamEvent(ParamEvent::ExponentialRampToValue, value, time, 0, 0, 0));
+ ASSERT(isMainThread());
+ if (value <= 0) {
+ exceptionState.throwDOMException(
+ InvalidStateError,
+ "Target value for exponential ramp must be positive: " + String::number(value));
+ return;
+ }
+
+ insertEvent(ParamEvent(ParamEvent::ExponentialRampToValue, value, time, 0, 0, nullptr));
}
void AudioParamTimeline::setTargetAtTime(float target, double time, double timeConstant)
{
- insertEvent(ParamEvent(ParamEvent::SetTarget, target, time, timeConstant, 0, 0));
+ insertEvent(ParamEvent(ParamEvent::SetTarget, target, time, timeConstant, 0, nullptr));
}
void AudioParamTimeline::setValueCurveAtTime(Float32Array* curve, double time, double duration)
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/AudioParamTimeline.h b/chromium/third_party/WebKit/Source/modules/webaudio/AudioParamTimeline.h
index 335a9ac2116..350d3bcb89f 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/AudioParamTimeline.h
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/AudioParamTimeline.h
@@ -46,7 +46,7 @@ public:
void setValueAtTime(float value, double time);
void linearRampToValueAtTime(float value, double time);
- void exponentialRampToValueAtTime(float value, double time);
+ void exponentialRampToValueAtTime(float value, double time, ExceptionState&);
void setTargetAtTime(float target, double time, double timeConstant);
void setValueCurveAtTime(Float32Array* curve, double time, double duration);
void cancelScheduledValues(double startTime);
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/AudioProcessingEvent.cpp b/chromium/third_party/WebKit/Source/modules/webaudio/AudioProcessingEvent.cpp
index ac8eb99bcbc..112994af876 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/AudioProcessingEvent.cpp
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/AudioProcessingEvent.cpp
@@ -28,18 +28,16 @@
#include "modules/webaudio/AudioProcessingEvent.h"
-#include "core/events/ThreadLocalEventNames.h"
-
namespace WebCore {
-PassRefPtr<AudioProcessingEvent> AudioProcessingEvent::create()
+PassRefPtrWillBeRawPtr<AudioProcessingEvent> AudioProcessingEvent::create()
{
- return adoptRef(new AudioProcessingEvent);
+ return adoptRefWillBeNoop(new AudioProcessingEvent);
}
-PassRefPtr<AudioProcessingEvent> AudioProcessingEvent::create(PassRefPtr<AudioBuffer> inputBuffer, PassRefPtr<AudioBuffer> outputBuffer)
+PassRefPtrWillBeRawPtr<AudioProcessingEvent> AudioProcessingEvent::create(PassRefPtrWillBeRawPtr<AudioBuffer> inputBuffer, PassRefPtrWillBeRawPtr<AudioBuffer> outputBuffer, double playbackTime)
{
- return adoptRef(new AudioProcessingEvent(inputBuffer, outputBuffer));
+ return adoptRefWillBeNoop(new AudioProcessingEvent(inputBuffer, outputBuffer, playbackTime));
}
AudioProcessingEvent::AudioProcessingEvent()
@@ -47,10 +45,11 @@ AudioProcessingEvent::AudioProcessingEvent()
ScriptWrappable::init(this);
}
-AudioProcessingEvent::AudioProcessingEvent(PassRefPtr<AudioBuffer> inputBuffer, PassRefPtr<AudioBuffer> outputBuffer)
+AudioProcessingEvent::AudioProcessingEvent(PassRefPtrWillBeRawPtr<AudioBuffer> inputBuffer, PassRefPtrWillBeRawPtr<AudioBuffer> outputBuffer, double playbackTime)
: Event(EventTypeNames::audioprocess, true, false)
, m_inputBuffer(inputBuffer)
, m_outputBuffer(outputBuffer)
+ , m_playbackTime(playbackTime)
{
ScriptWrappable::init(this);
}
@@ -64,6 +63,13 @@ const AtomicString& AudioProcessingEvent::interfaceName() const
return EventNames::AudioProcessingEvent;
}
+void AudioProcessingEvent::trace(Visitor* visitor)
+{
+ visitor->trace(m_inputBuffer);
+ visitor->trace(m_outputBuffer);
+ Event::trace(visitor);
+}
+
} // namespace WebCore
#endif // ENABLE(WEB_AUDIO)
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/AudioProcessingEvent.h b/chromium/third_party/WebKit/Source/modules/webaudio/AudioProcessingEvent.h
index de2d78332f5..e938fe75748 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/AudioProcessingEvent.h
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/AudioProcessingEvent.h
@@ -25,7 +25,7 @@
#ifndef AudioProcessingEvent_h
#define AudioProcessingEvent_h
-#include "core/events/Event.h"
+#include "modules/EventModules.h"
#include "modules/webaudio/AudioBuffer.h"
#include "wtf/PassRefPtr.h"
#include "wtf/RefPtr.h"
@@ -34,24 +34,28 @@ namespace WebCore {
class AudioBuffer;
-class AudioProcessingEvent : public Event {
+class AudioProcessingEvent FINAL : public Event {
public:
- static PassRefPtr<AudioProcessingEvent> create();
- static PassRefPtr<AudioProcessingEvent> create(PassRefPtr<AudioBuffer> inputBuffer, PassRefPtr<AudioBuffer> outputBuffer);
+ static PassRefPtrWillBeRawPtr<AudioProcessingEvent> create();
+ static PassRefPtrWillBeRawPtr<AudioProcessingEvent> create(PassRefPtrWillBeRawPtr<AudioBuffer> inputBuffer, PassRefPtrWillBeRawPtr<AudioBuffer> outputBuffer, double playbackTime);
virtual ~AudioProcessingEvent();
AudioBuffer* inputBuffer() { return m_inputBuffer.get(); }
AudioBuffer* outputBuffer() { return m_outputBuffer.get(); }
+ double playbackTime() const { return m_playbackTime; }
- virtual const AtomicString& interfaceName() const;
+ virtual const AtomicString& interfaceName() const OVERRIDE;
+
+ virtual void trace(Visitor*) OVERRIDE;
private:
AudioProcessingEvent();
- AudioProcessingEvent(PassRefPtr<AudioBuffer> inputBuffer, PassRefPtr<AudioBuffer> outputBuffer);
+ AudioProcessingEvent(PassRefPtrWillBeRawPtr<AudioBuffer> inputBuffer, PassRefPtrWillBeRawPtr<AudioBuffer> outputBuffer, double playbackTime);
- RefPtr<AudioBuffer> m_inputBuffer;
- RefPtr<AudioBuffer> m_outputBuffer;
+ RefPtrWillBeMember<AudioBuffer> m_inputBuffer;
+ RefPtrWillBeMember<AudioBuffer> m_outputBuffer;
+ double m_playbackTime;
};
} // namespace WebCore
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/AudioProcessingEvent.idl b/chromium/third_party/WebKit/Source/modules/webaudio/AudioProcessingEvent.idl
index ed8f90a0a3a..2b812825dd0 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/AudioProcessingEvent.idl
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/AudioProcessingEvent.idl
@@ -25,6 +25,7 @@
[
Conditional=WEB_AUDIO
] interface AudioProcessingEvent : Event {
+ readonly attribute double playbackTime;
readonly attribute AudioBuffer inputBuffer;
readonly attribute AudioBuffer outputBuffer;
};
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/AudioScheduledSourceNode.cpp b/chromium/third_party/WebKit/Source/modules/webaudio/AudioScheduledSourceNode.cpp
index f53e18cf44e..e96f2c89ff9 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/AudioScheduledSourceNode.cpp
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/AudioScheduledSourceNode.cpp
@@ -30,9 +30,9 @@
#include "bindings/v8/ExceptionState.h"
#include "core/dom/ExceptionCode.h"
-#include "core/events/Event.h"
-#include "platform/audio/AudioUtilities.h"
+#include "modules/EventModules.h"
#include "modules/webaudio/AudioContext.h"
+#include "platform/audio/AudioUtilities.h"
#include <algorithm>
#include "wtf/MathExtras.h"
@@ -48,7 +48,6 @@ AudioScheduledSourceNode::AudioScheduledSourceNode(AudioContext* context, float
, m_startTime(0)
, m_endTime(UnknownTime)
, m_hasEndedListener(false)
- , m_stopCalled(false)
{
}
@@ -91,7 +90,6 @@ void AudioScheduledSourceNode::updateSchedulingInfo(size_t quantumFrameSize,
if (m_playbackState == SCHEDULED_STATE) {
// Increment the active source count only if we're transitioning from SCHEDULED_STATE to PLAYING_STATE.
m_playbackState = PLAYING_STATE;
- context()->incrementActiveSourceCount();
}
quantumFrameOffset = startFrame > quantumStartFrame ? startFrame - quantumStartFrame : 0;
@@ -137,7 +135,6 @@ void AudioScheduledSourceNode::updateSchedulingInfo(size_t quantumFrameSize,
return;
}
-
void AudioScheduledSourceNode::start(double when, ExceptionState& exceptionState)
{
ASSERT(isMainThread());
@@ -157,28 +154,23 @@ void AudioScheduledSourceNode::stop(double when, ExceptionState& exceptionState)
{
ASSERT(isMainThread());
- if (m_stopCalled) {
- exceptionState.throwDOMException(
- InvalidStateError,
- "cannot call stop more than once.");
- } else if (m_playbackState == UNSCHEDULED_STATE) {
+ if (m_playbackState == UNSCHEDULED_STATE) {
exceptionState.throwDOMException(
InvalidStateError,
"cannot call stop without calling start first.");
} else {
- // This can only happen from the SCHEDULED_STATE or PLAYING_STATE. The UNSCHEDULED_STATE is
- // handled above, and the FINISHED_STATE is only reachable after stop() has been called, and
- // hence m_stopCalled is true. But that case is handled above.
+ // stop() can be called more than once, with the last call to stop taking effect, unless the
+ // source has already stopped due to earlier calls to stop. No exceptions are thrown in any
+ // case.
when = max(0.0, when);
m_endTime = when;
- m_stopCalled = true;
}
}
-void AudioScheduledSourceNode::setOnended(PassRefPtr<EventListener> listener, DOMWrapperWorld* isolatedWorld)
+void AudioScheduledSourceNode::setOnended(PassRefPtr<EventListener> listener)
{
m_hasEndedListener = listener;
- setAttributeEventListener(EventTypeNames::ended, listener, isolatedWorld);
+ setAttributeEventListener(EventTypeNames::ended, listener);
}
void AudioScheduledSourceNode::finish()
@@ -187,7 +179,6 @@ void AudioScheduledSourceNode::finish()
// Let the context dereference this AudioNode.
context()->notifyNodeFinishedProcessing(this);
m_playbackState = FINISHED_STATE;
- context()->decrementActiveSourceCount();
}
if (m_hasEndedListener) {
@@ -211,8 +202,8 @@ AudioScheduledSourceNode::NotifyEndedTask::NotifyEndedTask(PassRefPtr<AudioSched
void AudioScheduledSourceNode::NotifyEndedTask::notifyEnded()
{
- RefPtr<Event> event = Event::create(EventTypeNames::ended);
- event->setTarget(m_scheduledNode);
+ RefPtrWillBeRawPtr<Event> event = Event::create(EventTypeNames::ended);
+ event->setTarget(m_scheduledNode.get());
m_scheduledNode->dispatchEvent(event.get());
}
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/AudioScheduledSourceNode.h b/chromium/third_party/WebKit/Source/modules/webaudio/AudioScheduledSourceNode.h
index e89cd76bf0d..3942ecd3589 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/AudioScheduledSourceNode.h
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/AudioScheduledSourceNode.h
@@ -40,7 +40,7 @@ public:
// These are the possible states an AudioScheduledSourceNode can be in:
//
// UNSCHEDULED_STATE - Initial playback state. Created, but not yet scheduled.
- // SCHEDULED_STATE - Scheduled to play (via noteOn() or noteGrainOn()), but not yet playing.
+ // SCHEDULED_STATE - Scheduled to play (via start()), but not yet playing.
// PLAYING_STATE - Generating sound.
// FINISHED_STATE - Finished generating sound.
//
@@ -77,8 +77,8 @@ public:
bool isPlayingOrScheduled() const { return m_playbackState == PLAYING_STATE || m_playbackState == SCHEDULED_STATE; }
bool hasFinished() const { return m_playbackState == FINISHED_STATE; }
- EventListener* onended(DOMWrapperWorld* isolatedWorld) { return getAttributeEventListener(EventTypeNames::ended, isolatedWorld); }
- void setOnended(PassRefPtr<EventListener>, DOMWrapperWorld* isolatedWorld = 0);
+ EventListener* onended() { return getAttributeEventListener(EventTypeNames::ended); }
+ void setOnended(PassRefPtr<EventListener>);
protected:
// Get frame information for the current time quantum.
@@ -109,7 +109,6 @@ protected:
double m_endTime; // in seconds
bool m_hasEndedListener;
- bool m_stopCalled;
static const double UnknownTime;
};
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/AudioSourceNode.h b/chromium/third_party/WebKit/Source/modules/webaudio/AudioSourceNode.h
index b0883e10c5a..a8105b39993 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/AudioSourceNode.h
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/AudioSourceNode.h
@@ -41,8 +41,8 @@ public:
ScriptWrappable::init(this);
}
protected:
- virtual double tailTime() const OVERRIDE { return 0; }
- virtual double latencyTime() const OVERRIDE { return 0; }
+ virtual double tailTime() const OVERRIDE FINAL { return 0; }
+ virtual double latencyTime() const OVERRIDE FINAL { return 0; }
};
} // namespace WebCore
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/AudioSummingJunction.h b/chromium/third_party/WebKit/Source/modules/webaudio/AudioSummingJunction.h
index a2e789f04b2..ca21fe6cb02 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/AudioSummingJunction.h
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/AudioSummingJunction.h
@@ -26,6 +26,7 @@
#define AudioSummingJunction_h
#include "platform/audio/AudioBus.h"
+#include "platform/heap/Handle.h"
#include "wtf/HashSet.h"
#include "wtf/Vector.h"
@@ -54,23 +55,18 @@ public:
// Rendering code accesses its version of the current connections here.
unsigned numberOfRenderingConnections() const { return m_renderingOutputs.size(); }
AudioNodeOutput* renderingOutput(unsigned i) { return m_renderingOutputs[i]; }
- const AudioNodeOutput* renderingOutput(unsigned i) const { return m_renderingOutputs[i]; }
bool isConnected() const { return numberOfRenderingConnections() > 0; }
virtual bool canUpdateState() = 0;
virtual void didUpdate() = 0;
protected:
- RefPtr<AudioContext> m_context;
+ RefPtrWillBePersistent<AudioContext> m_context;
// m_outputs contains the AudioNodeOutputs representing current connections which are not disabled.
// The rendering code should never use this directly, but instead uses m_renderingOutputs.
HashSet<AudioNodeOutput*> m_outputs;
- // numberOfConnections() should never be called from the audio rendering thread.
- // Instead numberOfRenderingConnections() and renderingOutput() should be used.
- unsigned numberOfConnections() const { return m_outputs.size(); }
-
// m_renderingOutputs is a copy of m_outputs which will never be modified during the graph rendering on the audio thread.
// This is the list which is used by the rendering code.
// Whenever m_outputs is modified, the context is told so it can later update m_renderingOutputs from m_outputs at a safe time.
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/BiquadDSPKernel.cpp b/chromium/third_party/WebKit/Source/modules/webaudio/BiquadDSPKernel.cpp
index 5136ae118cd..45b663be10b 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/BiquadDSPKernel.cpp
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/BiquadDSPKernel.cpp
@@ -40,73 +40,73 @@ namespace WebCore {
// settings of the Biquad.
static const double MaxBiquadDelayTime = 0.2;
-void BiquadDSPKernel::updateCoefficientsIfNecessary(bool useSmoothing, bool forceUpdate)
+void BiquadDSPKernel::updateCoefficientsIfNecessary()
{
- if (forceUpdate || biquadProcessor()->filterCoefficientsDirty()) {
- double value1;
- double value2;
+ if (biquadProcessor()->filterCoefficientsDirty()) {
+ double cutoffFrequency;
+ double Q;
double gain;
double detune; // in Cents
if (biquadProcessor()->hasSampleAccurateValues()) {
- value1 = biquadProcessor()->parameter1()->finalValue();
- value2 = biquadProcessor()->parameter2()->finalValue();
+ cutoffFrequency = biquadProcessor()->parameter1()->finalValue();
+ Q = biquadProcessor()->parameter2()->finalValue();
gain = biquadProcessor()->parameter3()->finalValue();
detune = biquadProcessor()->parameter4()->finalValue();
- } else if (useSmoothing) {
- value1 = biquadProcessor()->parameter1()->smoothedValue();
- value2 = biquadProcessor()->parameter2()->smoothedValue();
+ } else {
+ cutoffFrequency = biquadProcessor()->parameter1()->smoothedValue();
+ Q = biquadProcessor()->parameter2()->smoothedValue();
gain = biquadProcessor()->parameter3()->smoothedValue();
detune = biquadProcessor()->parameter4()->smoothedValue();
- } else {
- value1 = biquadProcessor()->parameter1()->value();
- value2 = biquadProcessor()->parameter2()->value();
- gain = biquadProcessor()->parameter3()->value();
- detune = biquadProcessor()->parameter4()->value();
}
- // Convert from Hertz to normalized frequency 0 -> 1.
- double nyquist = this->nyquist();
- double normalizedFrequency = value1 / nyquist;
+ updateCoefficients(cutoffFrequency, Q, gain, detune);
+ }
+}
- // Offset frequency by detune.
- if (detune)
- normalizedFrequency *= pow(2, detune / 1200);
+void BiquadDSPKernel::updateCoefficients(double cutoffFrequency, double Q, double gain, double detune)
+{
+ // Convert from Hertz to normalized frequency 0 -> 1.
+ double nyquist = this->nyquist();
+ double normalizedFrequency = cutoffFrequency / nyquist;
- // Configure the biquad with the new filter parameters for the appropriate type of filter.
- switch (biquadProcessor()->type()) {
- case BiquadProcessor::LowPass:
- m_biquad.setLowpassParams(normalizedFrequency, value2);
- break;
+ // Offset frequency by detune.
+ if (detune)
+ normalizedFrequency *= pow(2, detune / 1200);
- case BiquadProcessor::HighPass:
- m_biquad.setHighpassParams(normalizedFrequency, value2);
- break;
+ // Configure the biquad with the new filter parameters for the appropriate type of filter.
+ switch (biquadProcessor()->type()) {
+ case BiquadProcessor::LowPass:
+ m_biquad.setLowpassParams(normalizedFrequency, Q);
+ break;
- case BiquadProcessor::BandPass:
- m_biquad.setBandpassParams(normalizedFrequency, value2);
- break;
+ case BiquadProcessor::HighPass:
+ m_biquad.setHighpassParams(normalizedFrequency, Q);
+ break;
- case BiquadProcessor::LowShelf:
- m_biquad.setLowShelfParams(normalizedFrequency, gain);
- break;
+ case BiquadProcessor::BandPass:
+ m_biquad.setBandpassParams(normalizedFrequency, Q);
+ break;
- case BiquadProcessor::HighShelf:
- m_biquad.setHighShelfParams(normalizedFrequency, gain);
- break;
+ case BiquadProcessor::LowShelf:
+ m_biquad.setLowShelfParams(normalizedFrequency, gain);
+ break;
- case BiquadProcessor::Peaking:
- m_biquad.setPeakingParams(normalizedFrequency, value2, gain);
- break;
+ case BiquadProcessor::HighShelf:
+ m_biquad.setHighShelfParams(normalizedFrequency, gain);
+ break;
- case BiquadProcessor::Notch:
- m_biquad.setNotchParams(normalizedFrequency, value2);
- break;
+ case BiquadProcessor::Peaking:
+ m_biquad.setPeakingParams(normalizedFrequency, Q, gain);
+ break;
- case BiquadProcessor::Allpass:
- m_biquad.setAllpassParams(normalizedFrequency, value2);
- break;
- }
+ case BiquadProcessor::Notch:
+ m_biquad.setNotchParams(normalizedFrequency, Q);
+ break;
+
+ case BiquadProcessor::Allpass:
+ m_biquad.setAllpassParams(normalizedFrequency, Q);
+ break;
}
}
@@ -118,7 +118,14 @@ void BiquadDSPKernel::process(const float* source, float* destination, size_t fr
// FIXME: as an optimization, implement a way that a Biquad object can simply copy its internal filter coefficients from another Biquad object.
// Then re-factor this code to only run for the first BiquadDSPKernel of each BiquadProcessor.
- updateCoefficientsIfNecessary(true, false);
+
+ // The audio thread can't block on this lock; skip updating the coefficients for this block if
+ // necessary. We'll get them the next time around.
+ {
+ MutexTryLocker tryLocker(m_processLock);
+ if (tryLocker.locked())
+ updateCoefficientsIfNecessary();
+ }
m_biquad.process(source, destination, framesToProcess);
}
@@ -142,12 +149,33 @@ void BiquadDSPKernel::getFrequencyResponse(int nFrequencies,
for (int k = 0; k < nFrequencies; ++k)
frequency[k] = narrowPrecisionToFloat(frequencyHz[k] / nyquist);
- // We want to get the final values of the coefficients and compute
- // the response from that instead of some intermediate smoothed
- // set. Forcefully update the coefficients even if they are not
- // dirty.
+ double cutoffFrequency;
+ double Q;
+ double gain;
+ double detune; // in Cents
+
+ {
+ // Get a copy of the current biquad filter coefficients so we can update the biquad with
+ // these values. We need to synchronize with process() to prevent process() from updating
+ // the filter coefficients while we're trying to access them. The process will update it
+ // next time around.
+ //
+ // The BiquadDSPKernel object here (along with it's Biquad object) is for querying the
+ // frequency response and is NOT the same as the one in process() which is used for
+ // performing the actual filtering. This one is is created in
+ // BiquadProcessor::getFrequencyResponse for this purpose. Both, however, point to the same
+ // BiquadProcessor object.
+ //
+ // FIXME: Simplify this: crbug.com/390266
+ MutexLocker processLocker(m_processLock);
+
+ cutoffFrequency = biquadProcessor()->parameter1()->value();
+ Q = biquadProcessor()->parameter2()->value();
+ gain = biquadProcessor()->parameter3()->value();
+ detune = biquadProcessor()->parameter4()->value();
+ }
- updateCoefficientsIfNecessary(false, true);
+ updateCoefficients(cutoffFrequency, Q, gain, detune);
m_biquad.getFrequencyResponse(nFrequencies, frequency.data(), magResponse, phaseResponse);
}
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/BiquadDSPKernel.h b/chromium/third_party/WebKit/Source/modules/webaudio/BiquadDSPKernel.h
index c2e8ac8cbb5..d0b12b332f0 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/BiquadDSPKernel.h
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/BiquadDSPKernel.h
@@ -35,7 +35,7 @@ class BiquadProcessor;
// BiquadDSPKernel is an AudioDSPKernel and is responsible for filtering one channel of a BiquadProcessor using a Biquad object.
-class BiquadDSPKernel : public AudioDSPKernel {
+class BiquadDSPKernel FINAL : public AudioDSPKernel {
public:
explicit BiquadDSPKernel(BiquadProcessor* processor)
: AudioDSPKernel(processor)
@@ -43,8 +43,8 @@ public:
}
// AudioDSPKernel
- virtual void process(const float* source, float* dest, size_t framesToProcess);
- virtual void reset() { m_biquad.reset(); }
+ virtual void process(const float* source, float* dest, size_t framesToProcess) OVERRIDE;
+ virtual void reset() OVERRIDE { m_biquad.reset(); }
// Get the magnitude and phase response of the filter at the given
// set of frequencies (in Hz). The phase response is in radians.
@@ -62,12 +62,13 @@ protected:
// To prevent audio glitches when parameters are changed,
// dezippering is used to slowly change the parameters.
- // |useSmoothing| implies that we want to update using the
- // smoothed values. Otherwise the final target values are
- // used. If |forceUpdate| is true, we update the coefficients even
- // if they are not dirty. (Used when computing the frequency
- // response.)
- void updateCoefficientsIfNecessary(bool useSmoothing, bool forceUpdate);
+ void updateCoefficientsIfNecessary();
+ // Update the biquad cofficients with the given parameters
+ void updateCoefficients(double frequency, double Q, double gain, double detune);
+
+private:
+ // Synchronize process() with getting and setting the filter coefficients.
+ mutable Mutex m_processLock;
};
} // namespace WebCore
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/BiquadFilterNode.cpp b/chromium/third_party/WebKit/Source/modules/webaudio/BiquadFilterNode.cpp
index 1aa8f4a910a..7069d1f3f04 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/BiquadFilterNode.cpp
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/BiquadFilterNode.cpp
@@ -82,8 +82,6 @@ void BiquadFilterNode::setType(const String& type)
setType(BiquadProcessor::Notch);
else if (type == "allpass")
setType(BiquadProcessor::Allpass);
- else
- ASSERT_NOT_REACHED();
}
bool BiquadFilterNode::setType(unsigned type)
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/BiquadFilterNode.h b/chromium/third_party/WebKit/Source/modules/webaudio/BiquadFilterNode.h
index 0ba4681c304..94bd76e90d2 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/BiquadFilterNode.h
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/BiquadFilterNode.h
@@ -32,7 +32,7 @@ namespace WebCore {
class AudioParam;
-class BiquadFilterNode : public AudioBasicProcessorNode {
+class BiquadFilterNode FINAL : public AudioBasicProcessorNode {
public:
// These must be defined as in the .idl file and must match those in the BiquadProcessor class.
enum {
@@ -46,13 +46,12 @@ public:
ALLPASS = 7
};
- static PassRefPtr<BiquadFilterNode> create(AudioContext* context, float sampleRate)
+ static PassRefPtrWillBeRawPtr<BiquadFilterNode> create(AudioContext* context, float sampleRate)
{
- return adoptRef(new BiquadFilterNode(context, sampleRate));
+ return adoptRefWillBeNoop(new BiquadFilterNode(context, sampleRate));
}
String type() const;
- bool setType(unsigned); // Returns true on success.
void setType(const String&);
AudioParam* frequency() { return biquadProcessor()->parameter1(); }
@@ -70,6 +69,7 @@ private:
BiquadFilterNode(AudioContext*, float sampleRate);
BiquadProcessor* biquadProcessor() { return static_cast<BiquadProcessor*>(processor()); }
+ bool setType(unsigned); // Returns true on success.
};
} // namespace WebCore
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/BiquadFilterNode.idl b/chromium/third_party/WebKit/Source/modules/webaudio/BiquadFilterNode.idl
index 2422dfb1a98..9783ab2a22b 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/BiquadFilterNode.idl
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/BiquadFilterNode.idl
@@ -22,20 +22,21 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+enum BiquadFilterType {
+ "lowpass",
+ "highpass",
+ "bandpass",
+ "lowshelf",
+ "highshelf",
+ "peaking",
+ "notch",
+ "allpass"
+};
+
[
Conditional=WEB_AUDIO
] interface BiquadFilterNode : AudioNode {
- // Filter type.
- const unsigned short LOWPASS = 0;
- const unsigned short HIGHPASS = 1;
- const unsigned short BANDPASS = 2;
- const unsigned short LOWSHELF = 3;
- const unsigned short HIGHSHELF = 4;
- const unsigned short PEAKING = 5;
- const unsigned short NOTCH = 6;
- const unsigned short ALLPASS = 7;
-
- [Custom=Setter] attribute DOMString type;
+ attribute BiquadFilterType type;
readonly attribute AudioParam frequency; // in Hertz
readonly attribute AudioParam detune; // in Cents
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/BiquadProcessor.cpp b/chromium/third_party/WebKit/Source/modules/webaudio/BiquadProcessor.cpp
index 2655abeb215..f2410ce928d 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/BiquadProcessor.cpp
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/BiquadProcessor.cpp
@@ -35,10 +35,10 @@ namespace WebCore {
BiquadProcessor::BiquadProcessor(AudioContext* context, float sampleRate, size_t numberOfChannels, bool autoInitialize)
: AudioDSPKernelProcessor(sampleRate, numberOfChannels)
, m_type(LowPass)
- , m_parameter1(0)
- , m_parameter2(0)
- , m_parameter3(0)
- , m_parameter4(0)
+ , m_parameter1(nullptr)
+ , m_parameter2(nullptr)
+ , m_parameter3(nullptr)
+ , m_parameter4(nullptr)
, m_filterCoefficientsDirty(true)
, m_hasSampleAccurateValues(false)
{
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/BiquadProcessor.h b/chromium/third_party/WebKit/Source/modules/webaudio/BiquadProcessor.h
index 6c3e3307094..7647586c36e 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/BiquadProcessor.h
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/BiquadProcessor.h
@@ -36,7 +36,7 @@ namespace WebCore {
// BiquadProcessor is an AudioDSPKernelProcessor which uses Biquad objects to implement several common filters.
-class BiquadProcessor : public AudioDSPKernelProcessor {
+class BiquadProcessor FINAL : public AudioDSPKernelProcessor {
public:
enum FilterType {
LowPass = 0,
@@ -53,9 +53,9 @@ public:
virtual ~BiquadProcessor();
- virtual PassOwnPtr<AudioDSPKernel> createKernel();
+ virtual PassOwnPtr<AudioDSPKernel> createKernel() OVERRIDE;
- virtual void process(const AudioBus* source, AudioBus* destination, size_t framesToProcess);
+ virtual void process(const AudioBus* source, AudioBus* destination, size_t framesToProcess) OVERRIDE;
// Get the magnitude and phase response of the filter at the given
// set of frequencies (in Hz). The phase response is in radians.
@@ -80,10 +80,10 @@ public:
private:
FilterType m_type;
- RefPtr<AudioParam> m_parameter1;
- RefPtr<AudioParam> m_parameter2;
- RefPtr<AudioParam> m_parameter3;
- RefPtr<AudioParam> m_parameter4;
+ RefPtrWillBePersistent<AudioParam> m_parameter1;
+ RefPtrWillBePersistent<AudioParam> m_parameter2;
+ RefPtrWillBePersistent<AudioParam> m_parameter3;
+ RefPtrWillBePersistent<AudioParam> m_parameter4;
// so DSP kernels know when to re-compute coefficients
bool m_filterCoefficientsDirty;
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/ChannelMergerNode.cpp b/chromium/third_party/WebKit/Source/modules/webaudio/ChannelMergerNode.cpp
index 43b86b4d389..96befdeb438 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/ChannelMergerNode.cpp
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/ChannelMergerNode.cpp
@@ -40,12 +40,12 @@ const unsigned DefaultNumberOfOutputChannels = 1;
namespace WebCore {
-PassRefPtr<ChannelMergerNode> ChannelMergerNode::create(AudioContext* context, float sampleRate, unsigned numberOfInputs)
+PassRefPtrWillBeRawPtr<ChannelMergerNode> ChannelMergerNode::create(AudioContext* context, float sampleRate, unsigned numberOfInputs)
{
if (!numberOfInputs || numberOfInputs > AudioContext::maxNumberOfChannels())
- return 0;
+ return nullptr;
- return adoptRef(new ChannelMergerNode(context, sampleRate, numberOfInputs));
+ return adoptRefWillBeNoop(new ChannelMergerNode(context, sampleRate, numberOfInputs));
}
ChannelMergerNode::ChannelMergerNode(AudioContext* context, float sampleRate, unsigned numberOfInputs)
@@ -76,29 +76,33 @@ void ChannelMergerNode::process(size_t framesToProcess)
// Merge all the channels from all the inputs into one output.
unsigned outputChannelIndex = 0;
+ unsigned maxAllowedOutputChannels = output->numberOfChannels();
+
for (unsigned i = 0; i < numberOfInputs(); ++i) {
AudioNodeInput* input = this->input(i);
if (input->isConnected()) {
unsigned numberOfInputChannels = input->bus()->numberOfChannels();
- // Merge channels from this particular input.
+ // Merge channels from this particular input, but be careful not to exceed the number of
+ // output channels. (This can happen if there are many inputs with each input
+ // containing many channels.)
for (unsigned j = 0; j < numberOfInputChannels; ++j) {
- AudioChannel* inputChannel = input->bus()->channel(j);
- AudioChannel* outputChannel = output->bus()->channel(outputChannelIndex);
- outputChannel->copyFrom(inputChannel);
+ if (outputChannelIndex < maxAllowedOutputChannels) {
+ AudioChannel* inputChannel = input->bus()->channel(j);
+ AudioChannel* outputChannel = output->bus()->channel(outputChannelIndex);
+ outputChannel->copyFrom(inputChannel);
- ++outputChannelIndex;
+ ++outputChannelIndex;
+ }
}
}
+ if (outputChannelIndex >= maxAllowedOutputChannels)
+ break;
}
ASSERT(outputChannelIndex == output->numberOfChannels());
}
-void ChannelMergerNode::reset()
-{
-}
-
// Any time a connection or disconnection happens on any of our inputs, we potentially need to change the
// number of channels of our output.
void ChannelMergerNode::checkNumberOfChannelsForInput(AudioNodeInput* input)
@@ -113,6 +117,9 @@ void ChannelMergerNode::checkNumberOfChannelsForInput(AudioNodeInput* input)
numberOfOutputChannels += input->numberOfChannels();
}
+ // If the actual number of channels exceeds the max allowed, just drop the excess.
+ numberOfOutputChannels = std::min(numberOfOutputChannels, AudioContext::maxNumberOfChannels());
+
// Set the correct number of channels on the output
AudioNodeOutput* output = this->output(0);
ASSERT(output);
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/ChannelMergerNode.h b/chromium/third_party/WebKit/Source/modules/webaudio/ChannelMergerNode.h
index a15b6fa5122..ea62eff3175 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/ChannelMergerNode.h
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/ChannelMergerNode.h
@@ -36,16 +36,15 @@ namespace WebCore {
class AudioContext;
-class ChannelMergerNode : public AudioNode {
+class ChannelMergerNode FINAL : public AudioNode {
public:
- static PassRefPtr<ChannelMergerNode> create(AudioContext*, float sampleRate, unsigned numberOfInputs);
+ static PassRefPtrWillBeRawPtr<ChannelMergerNode> create(AudioContext*, float sampleRate, unsigned numberOfInputs);
// AudioNode
- virtual void process(size_t framesToProcess);
- virtual void reset();
+ virtual void process(size_t framesToProcess) OVERRIDE;
// Called in the audio thread (pre-rendering task) when the number of channels for an input may have changed.
- virtual void checkNumberOfChannelsForInput(AudioNodeInput*);
+ virtual void checkNumberOfChannelsForInput(AudioNodeInput*) OVERRIDE;
private:
unsigned m_desiredNumberOfOutputChannels;
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/ChannelSplitterNode.cpp b/chromium/third_party/WebKit/Source/modules/webaudio/ChannelSplitterNode.cpp
index 4009ba49373..18a5e1a6848 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/ChannelSplitterNode.cpp
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/ChannelSplitterNode.cpp
@@ -34,12 +34,12 @@
namespace WebCore {
-PassRefPtr<ChannelSplitterNode> ChannelSplitterNode::create(AudioContext* context, float sampleRate, unsigned numberOfOutputs)
+PassRefPtrWillBeRawPtr<ChannelSplitterNode> ChannelSplitterNode::create(AudioContext* context, float sampleRate, unsigned numberOfOutputs)
{
if (!numberOfOutputs || numberOfOutputs > AudioContext::maxNumberOfChannels())
- return 0;
+ return nullptr;
- return adoptRef(new ChannelSplitterNode(context, sampleRate, numberOfOutputs));
+ return adoptRefWillBeNoop(new ChannelSplitterNode(context, sampleRate, numberOfOutputs));
}
ChannelSplitterNode::ChannelSplitterNode(AudioContext* context, float sampleRate, unsigned numberOfOutputs)
@@ -79,10 +79,6 @@ void ChannelSplitterNode::process(size_t framesToProcess)
}
}
-void ChannelSplitterNode::reset()
-{
-}
-
} // namespace WebCore
#endif // ENABLE(WEB_AUDIO)
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/ChannelSplitterNode.h b/chromium/third_party/WebKit/Source/modules/webaudio/ChannelSplitterNode.h
index c3c4a668e6a..32bdbd78833 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/ChannelSplitterNode.h
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/ChannelSplitterNode.h
@@ -32,13 +32,12 @@ namespace WebCore {
class AudioContext;
-class ChannelSplitterNode : public AudioNode {
+class ChannelSplitterNode FINAL : public AudioNode {
public:
- static PassRefPtr<ChannelSplitterNode> create(AudioContext*, float sampleRate, unsigned numberOfOutputs);
+ static PassRefPtrWillBeRawPtr<ChannelSplitterNode> create(AudioContext*, float sampleRate, unsigned numberOfOutputs);
// AudioNode
- virtual void process(size_t framesToProcess);
- virtual void reset();
+ virtual void process(size_t framesToProcess) OVERRIDE;
private:
virtual double tailTime() const OVERRIDE { return 0; }
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/ConvolverNode.cpp b/chromium/third_party/WebKit/Source/modules/webaudio/ConvolverNode.cpp
index cef9bca9c78..a0eac4de379 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/ConvolverNode.cpp
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/ConvolverNode.cpp
@@ -28,6 +28,8 @@
#include "modules/webaudio/ConvolverNode.h"
+#include "bindings/v8/ExceptionState.h"
+#include "core/dom/ExceptionCode.h"
#include "platform/audio/Reverb.h"
#include "modules/webaudio/AudioBuffer.h"
#include "modules/webaudio/AudioContext.h"
@@ -90,13 +92,6 @@ void ConvolverNode::process(size_t framesToProcess)
}
}
-void ConvolverNode::reset()
-{
- MutexLocker locker(m_processLock);
- if (m_reverb.get())
- m_reverb->reset();
-}
-
void ConvolverNode::initialize()
{
if (isInitialized())
@@ -114,13 +109,21 @@ void ConvolverNode::uninitialize()
AudioNode::uninitialize();
}
-void ConvolverNode::setBuffer(AudioBuffer* buffer)
+void ConvolverNode::setBuffer(AudioBuffer* buffer, ExceptionState& exceptionState)
{
ASSERT(isMainThread());
if (!buffer)
return;
+ if (buffer->sampleRate() != context()->sampleRate()) {
+ exceptionState.throwDOMException(
+ NotSupportedError,
+ "The buffer sample rate of " + String::number(buffer->sampleRate())
+ + " does not match the context rate of " + String::number(context()->sampleRate())
+ + " Hz.");
+ }
+
unsigned numberOfChannels = buffer->numberOfChannels();
size_t bufferLength = buffer->length();
@@ -176,6 +179,12 @@ double ConvolverNode::latencyTime() const
return std::numeric_limits<double>::infinity();
}
+void ConvolverNode::trace(Visitor* visitor)
+{
+ visitor->trace(m_buffer);
+ AudioNode::trace(visitor);
+}
+
} // namespace WebCore
#endif // ENABLE(WEB_AUDIO)
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/ConvolverNode.h b/chromium/third_party/WebKit/Source/modules/webaudio/ConvolverNode.h
index e5e33e62691..02ad70b3a8f 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/ConvolverNode.h
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/ConvolverNode.h
@@ -33,30 +33,32 @@
namespace WebCore {
class AudioBuffer;
+class ExceptionState;
class Reverb;
-class ConvolverNode : public AudioNode {
+class ConvolverNode FINAL : public AudioNode {
public:
- static PassRefPtr<ConvolverNode> create(AudioContext* context, float sampleRate)
+ static PassRefPtrWillBeRawPtr<ConvolverNode> create(AudioContext* context, float sampleRate)
{
- return adoptRef(new ConvolverNode(context, sampleRate));
+ return adoptRefWillBeNoop(new ConvolverNode(context, sampleRate));
}
virtual ~ConvolverNode();
// AudioNode
- virtual void process(size_t framesToProcess);
- virtual void reset();
- virtual void initialize();
- virtual void uninitialize();
+ virtual void process(size_t framesToProcess) OVERRIDE;
+ virtual void initialize() OVERRIDE;
+ virtual void uninitialize() OVERRIDE;
// Impulse responses
- void setBuffer(AudioBuffer*);
+ void setBuffer(AudioBuffer*, ExceptionState&);
AudioBuffer* buffer();
bool normalize() const { return m_normalize; }
void setNormalize(bool normalize) { m_normalize = normalize; }
+ virtual void trace(Visitor*) OVERRIDE;
+
private:
ConvolverNode(AudioContext*, float sampleRate);
@@ -64,7 +66,7 @@ private:
virtual double latencyTime() const OVERRIDE;
OwnPtr<Reverb> m_reverb;
- RefPtr<AudioBuffer> m_buffer;
+ RefPtrWillBeMember<AudioBuffer> m_buffer;
// This synchronizes dynamic changes to the convolution impulse response with process().
mutable Mutex m_processLock;
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/ConvolverNode.idl b/chromium/third_party/WebKit/Source/modules/webaudio/ConvolverNode.idl
index f0339170133..fe0544b33eb 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/ConvolverNode.idl
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/ConvolverNode.idl
@@ -26,6 +26,6 @@
[
Conditional=WEB_AUDIO
] interface ConvolverNode : AudioNode {
- attribute AudioBuffer buffer;
+ [RaisesException=Setter] attribute AudioBuffer buffer;
attribute boolean normalize;
};
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/DOMWindowWebAudio.h b/chromium/third_party/WebKit/Source/modules/webaudio/DOMWindowWebAudio.h
new file mode 100644
index 00000000000..03f01d5c617
--- /dev/null
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/DOMWindowWebAudio.h
@@ -0,0 +1,7 @@
+// Copyright (c) 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// TODO: This is a dummy header file required by the generated binding code.
+// This file should be removed after fixing the code generator.
+
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/DefaultAudioDestinationNode.cpp b/chromium/third_party/WebKit/Source/modules/webaudio/DefaultAudioDestinationNode.cpp
index a6b93bf28d8..50f1df3254d 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/DefaultAudioDestinationNode.cpp
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/DefaultAudioDestinationNode.cpp
@@ -34,8 +34,6 @@
#include "platform/Logging.h"
#include "wtf/MainThread.h"
-const unsigned EnabledInputChannels = 2;
-
namespace WebCore {
DefaultAudioDestinationNode::DefaultAudioDestinationNode(AudioContext* context)
@@ -83,22 +81,6 @@ void DefaultAudioDestinationNode::createDestination()
m_destination = AudioDestination::create(*this, m_inputDeviceId, m_numberOfInputChannels, channelCount(), hardwareSampleRate);
}
-void DefaultAudioDestinationNode::enableInput(const String& inputDeviceId)
-{
- ASSERT(isMainThread());
- if (m_numberOfInputChannels != EnabledInputChannels) {
- m_numberOfInputChannels = EnabledInputChannels;
- m_inputDeviceId = inputDeviceId;
-
- if (isInitialized()) {
- // Re-create destination.
- m_destination->stop();
- createDestination();
- m_destination->start();
- }
- }
-}
-
void DefaultAudioDestinationNode::startRendering()
{
ASSERT(isInitialized());
@@ -122,12 +104,7 @@ void DefaultAudioDestinationNode::setChannelCount(unsigned long channelCount, Ex
if (!maxChannelCount() || channelCount > maxChannelCount()) {
exceptionState.throwDOMException(
IndexSizeError,
- ExceptionMessages::failedToSet(
- "channelCount",
- "AudioDestinationNode",
- "channel count (" + String::number(channelCount)
- + ") must be between 1 and "
- + String::number(maxChannelCount()) + "."));
+ ExceptionMessages::indexOutsideRange<unsigned>("channel count", channelCount, 1, ExceptionMessages::InclusiveBound, maxChannelCount(), ExceptionMessages::InclusiveBound));
return;
}
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/DefaultAudioDestinationNode.h b/chromium/third_party/WebKit/Source/modules/webaudio/DefaultAudioDestinationNode.h
index 8c0506d221e..409d31c4c50 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/DefaultAudioDestinationNode.h
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/DefaultAudioDestinationNode.h
@@ -34,11 +34,11 @@ namespace WebCore {
class AudioContext;
class ExceptionState;
-class DefaultAudioDestinationNode : public AudioDestinationNode {
+class DefaultAudioDestinationNode FINAL : public AudioDestinationNode {
public:
- static PassRefPtr<DefaultAudioDestinationNode> create(AudioContext* context)
+ static PassRefPtrWillBeRawPtr<DefaultAudioDestinationNode> create(AudioContext* context)
{
- return adoptRef(new DefaultAudioDestinationNode(context));
+ return adoptRefWillBeNoop(new DefaultAudioDestinationNode(context));
}
virtual ~DefaultAudioDestinationNode();
@@ -49,7 +49,6 @@ public:
virtual void setChannelCount(unsigned long, ExceptionState&) OVERRIDE;
// AudioDestinationNode
- virtual void enableInput(const String& inputDeviceId) OVERRIDE;
virtual void startRendering() OVERRIDE;
virtual unsigned long maxChannelCount() const OVERRIDE;
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/DelayDSPKernel.cpp b/chromium/third_party/WebKit/Source/modules/webaudio/DelayDSPKernel.cpp
index 05b8cc566fd..2301325f95b 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/DelayDSPKernel.cpp
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/DelayDSPKernel.cpp
@@ -29,6 +29,7 @@
#include "modules/webaudio/DelayDSPKernel.h"
#include "platform/audio/AudioUtilities.h"
+#include "wtf/MathExtras.h"
#include <algorithm>
using namespace std;
@@ -45,8 +46,8 @@ DelayDSPKernel::DelayDSPKernel(DelayProcessor* processor)
return;
m_maxDelayTime = processor->maxDelayTime();
- ASSERT(m_maxDelayTime >= 0);
- if (m_maxDelayTime < 0)
+ ASSERT(m_maxDelayTime >= 0 && !std::isnan(m_maxDelayTime));
+ if (m_maxDelayTime < 0 || std::isnan(m_maxDelayTime))
return;
m_buffer.allocate(bufferLengthForDelay(m_maxDelayTime, processor->sampleRate()));
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/DelayDSPKernel.h b/chromium/third_party/WebKit/Source/modules/webaudio/DelayDSPKernel.h
index 320350ebb5c..a0fbd661015 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/DelayDSPKernel.h
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/DelayDSPKernel.h
@@ -32,7 +32,7 @@ namespace WebCore {
class DelayProcessor;
-class DelayDSPKernel : public AudioDelayDSPKernel {
+class DelayDSPKernel FINAL : public AudioDelayDSPKernel {
public:
explicit DelayDSPKernel(DelayProcessor*);
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/DelayNode.cpp b/chromium/third_party/WebKit/Source/modules/webaudio/DelayNode.cpp
index 7a8817ee82e..55c6337635e 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/DelayNode.cpp
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/DelayNode.cpp
@@ -31,6 +31,7 @@
#include "bindings/v8/ExceptionMessages.h"
#include "bindings/v8/ExceptionState.h"
#include "core/dom/ExceptionCode.h"
+#include "wtf/MathExtras.h"
namespace WebCore {
@@ -40,7 +41,7 @@ DelayNode::DelayNode(AudioContext* context, float sampleRate, double maxDelayTim
: AudioBasicProcessorNode(context, sampleRate)
{
ScriptWrappable::init(this);
- if (maxDelayTime <= 0 || maxDelayTime >= maximumAllowedDelayTime) {
+ if (maxDelayTime <= 0 || maxDelayTime >= maximumAllowedDelayTime || std::isnan(maxDelayTime)) {
exceptionState.throwDOMException(
NotSupportedError,
"max delay time (" + String::number(maxDelayTime)
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/DelayNode.h b/chromium/third_party/WebKit/Source/modules/webaudio/DelayNode.h
index 14ee7e4deab..8324d73d2e9 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/DelayNode.h
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/DelayNode.h
@@ -34,11 +34,11 @@ namespace WebCore {
class AudioParam;
class ExceptionState;
-class DelayNode : public AudioBasicProcessorNode {
+class DelayNode FINAL : public AudioBasicProcessorNode {
public:
- static PassRefPtr<DelayNode> create(AudioContext* context, float sampleRate, double maxDelayTime, ExceptionState& exceptionState)
+ static PassRefPtrWillBeRawPtr<DelayNode> create(AudioContext* context, float sampleRate, double maxDelayTime, ExceptionState& exceptionState)
{
- return adoptRef(new DelayNode(context, sampleRate, maxDelayTime, exceptionState));
+ return adoptRefWillBeNoop(new DelayNode(context, sampleRate, maxDelayTime, exceptionState));
}
AudioParam* delayTime();
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/DelayProcessor.h b/chromium/third_party/WebKit/Source/modules/webaudio/DelayProcessor.h
index 779aa7682e6..63b8af26cb7 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/DelayProcessor.h
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/DelayProcessor.h
@@ -34,19 +34,19 @@ namespace WebCore {
class AudioDSPKernel;
-class DelayProcessor : public AudioDSPKernelProcessor {
+class DelayProcessor FINAL : public AudioDSPKernelProcessor {
public:
DelayProcessor(AudioContext*, float sampleRate, unsigned numberOfChannels, double maxDelayTime);
virtual ~DelayProcessor();
- virtual PassOwnPtr<AudioDSPKernel> createKernel();
+ virtual PassOwnPtr<AudioDSPKernel> createKernel() OVERRIDE;
AudioParam* delayTime() const { return m_delayTime.get(); }
double maxDelayTime() { return m_maxDelayTime; }
private:
- RefPtr<AudioParam> m_delayTime;
+ RefPtrWillBePersistent<AudioParam> m_delayTime;
double m_maxDelayTime;
};
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/DynamicsCompressorNode.cpp b/chromium/third_party/WebKit/Source/modules/webaudio/DynamicsCompressorNode.cpp
index 6700897e469..476e8db5dab 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/DynamicsCompressorNode.cpp
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/DynamicsCompressorNode.cpp
@@ -85,11 +85,6 @@ void DynamicsCompressorNode::process(size_t framesToProcess)
m_reduction->setValue(reduction);
}
-void DynamicsCompressorNode::reset()
-{
- m_dynamicsCompressor->reset();
-}
-
void DynamicsCompressorNode::initialize()
{
if (isInitialized())
@@ -118,6 +113,17 @@ double DynamicsCompressorNode::latencyTime() const
return m_dynamicsCompressor->latencyTime();
}
+void DynamicsCompressorNode::trace(Visitor* visitor)
+{
+ visitor->trace(m_threshold);
+ visitor->trace(m_knee);
+ visitor->trace(m_ratio);
+ visitor->trace(m_reduction);
+ visitor->trace(m_attack);
+ visitor->trace(m_release);
+ AudioNode::trace(visitor);
+}
+
} // namespace WebCore
#endif // ENABLE(WEB_AUDIO)
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/DynamicsCompressorNode.h b/chromium/third_party/WebKit/Source/modules/webaudio/DynamicsCompressorNode.h
index 413c0b08e26..068c6696fdb 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/DynamicsCompressorNode.h
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/DynamicsCompressorNode.h
@@ -33,20 +33,19 @@ namespace WebCore {
class DynamicsCompressor;
-class DynamicsCompressorNode : public AudioNode {
+class DynamicsCompressorNode FINAL : public AudioNode {
public:
- static PassRefPtr<DynamicsCompressorNode> create(AudioContext* context, float sampleRate)
+ static PassRefPtrWillBeRawPtr<DynamicsCompressorNode> create(AudioContext* context, float sampleRate)
{
- return adoptRef(new DynamicsCompressorNode(context, sampleRate));
+ return adoptRefWillBeNoop(new DynamicsCompressorNode(context, sampleRate));
}
virtual ~DynamicsCompressorNode();
// AudioNode
- virtual void process(size_t framesToProcess);
- virtual void reset();
- virtual void initialize();
- virtual void uninitialize();
+ virtual void process(size_t framesToProcess) OVERRIDE;
+ virtual void initialize() OVERRIDE;
+ virtual void uninitialize() OVERRIDE;
// Static compression curve parameters.
AudioParam* threshold() { return m_threshold.get(); }
@@ -58,6 +57,8 @@ public:
// Amount by which the compressor is currently compressing the signal in decibels.
AudioParam* reduction() { return m_reduction.get(); }
+ virtual void trace(Visitor*) OVERRIDE;
+
private:
virtual double tailTime() const OVERRIDE;
virtual double latencyTime() const OVERRIDE;
@@ -65,12 +66,12 @@ private:
DynamicsCompressorNode(AudioContext*, float sampleRate);
OwnPtr<DynamicsCompressor> m_dynamicsCompressor;
- RefPtr<AudioParam> m_threshold;
- RefPtr<AudioParam> m_knee;
- RefPtr<AudioParam> m_ratio;
- RefPtr<AudioParam> m_reduction;
- RefPtr<AudioParam> m_attack;
- RefPtr<AudioParam> m_release;
+ RefPtrWillBeMember<AudioParam> m_threshold;
+ RefPtrWillBeMember<AudioParam> m_knee;
+ RefPtrWillBeMember<AudioParam> m_ratio;
+ RefPtrWillBeMember<AudioParam> m_reduction;
+ RefPtrWillBeMember<AudioParam> m_attack;
+ RefPtrWillBeMember<AudioParam> m_release;
};
} // namespace WebCore
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/GainNode.cpp b/chromium/third_party/WebKit/Source/modules/webaudio/GainNode.cpp
index 22a4c8377bc..03156476ef3 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/GainNode.cpp
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/GainNode.cpp
@@ -79,12 +79,6 @@ void GainNode::process(size_t framesToProcess)
}
}
-void GainNode::reset()
-{
- // Snap directly to desired gain.
- m_lastGain = gain()->value();
-}
-
// FIXME: this can go away when we do mixing with gain directly in summing junction of AudioNodeInput
//
// As soon as we know the channel count of our input, we can lazily initialize.
@@ -114,6 +108,12 @@ void GainNode::checkNumberOfChannelsForInput(AudioNodeInput* input)
AudioNode::checkNumberOfChannelsForInput(input);
}
+void GainNode::trace(Visitor* visitor)
+{
+ visitor->trace(m_gain);
+ AudioNode::trace(visitor);
+}
+
} // namespace WebCore
#endif // ENABLE(WEB_AUDIO)
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/GainNode.h b/chromium/third_party/WebKit/Source/modules/webaudio/GainNode.h
index 361f581a040..a0886b918e8 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/GainNode.h
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/GainNode.h
@@ -37,23 +37,24 @@ class AudioContext;
// GainNode is an AudioNode with one input and one output which applies a gain (volume) change to the audio signal.
// De-zippering (smoothing) is applied when the gain value is changed dynamically.
-class GainNode : public AudioNode {
+class GainNode FINAL : public AudioNode {
public:
- static PassRefPtr<GainNode> create(AudioContext* context, float sampleRate)
+ static PassRefPtrWillBeRawPtr<GainNode> create(AudioContext* context, float sampleRate)
{
- return adoptRef(new GainNode(context, sampleRate));
+ return adoptRefWillBeNoop(new GainNode(context, sampleRate));
}
// AudioNode
- virtual void process(size_t framesToProcess);
- virtual void reset();
+ virtual void process(size_t framesToProcess) OVERRIDE;
// Called in the main thread when the number of channels for the input may have changed.
- virtual void checkNumberOfChannelsForInput(AudioNodeInput*);
+ virtual void checkNumberOfChannelsForInput(AudioNodeInput*) OVERRIDE;
// JavaScript interface
AudioParam* gain() { return m_gain.get(); }
+ virtual void trace(Visitor*) OVERRIDE;
+
private:
virtual double tailTime() const OVERRIDE { return 0; }
virtual double latencyTime() const OVERRIDE { return 0; }
@@ -61,7 +62,7 @@ private:
GainNode(AudioContext*, float sampleRate);
float m_lastGain; // for de-zippering
- RefPtr<AudioParam> m_gain;
+ RefPtrWillBeMember<AudioParam> m_gain;
AudioFloatArray m_sampleAccurateGainValues;
};
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/MediaElementAudioSourceNode.cpp b/chromium/third_party/WebKit/Source/modules/webaudio/MediaElementAudioSourceNode.cpp
index 50b574ab9cd..ac3f7443b93 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/MediaElementAudioSourceNode.cpp
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/MediaElementAudioSourceNode.cpp
@@ -28,6 +28,7 @@
#include "modules/webaudio/MediaElementAudioSourceNode.h"
+#include "core/html/HTMLMediaElement.h"
#include "modules/webaudio/AudioContext.h"
#include "modules/webaudio/AudioNodeOutput.h"
#include "platform/Logging.h"
@@ -40,9 +41,9 @@ const unsigned maxSampleRate = 192000;
namespace WebCore {
-PassRefPtr<MediaElementAudioSourceNode> MediaElementAudioSourceNode::create(AudioContext* context, HTMLMediaElement* mediaElement)
+PassRefPtrWillBeRawPtr<MediaElementAudioSourceNode> MediaElementAudioSourceNode::create(AudioContext* context, HTMLMediaElement* mediaElement)
{
- return adoptRef(new MediaElementAudioSourceNode(context, mediaElement));
+ return adoptRefWillBeNoop(new MediaElementAudioSourceNode(context, mediaElement));
}
MediaElementAudioSourceNode::MediaElementAudioSourceNode(AudioContext* context, HTMLMediaElement* mediaElement)
@@ -62,7 +63,9 @@ MediaElementAudioSourceNode::MediaElementAudioSourceNode(AudioContext* context,
MediaElementAudioSourceNode::~MediaElementAudioSourceNode()
{
+#if !ENABLE(OILPAN)
m_mediaElement->setAudioSourceNode(0);
+#endif
uninitialize();
}
@@ -135,10 +138,6 @@ void MediaElementAudioSourceNode::process(size_t numberOfFrames)
}
}
-void MediaElementAudioSourceNode::reset()
-{
-}
-
void MediaElementAudioSourceNode::lock()
{
ref();
@@ -151,6 +150,13 @@ void MediaElementAudioSourceNode::unlock()
deref();
}
+void MediaElementAudioSourceNode::trace(Visitor* visitor)
+{
+ visitor->trace(m_mediaElement);
+ AudioSourceNode::trace(visitor);
+ AudioSourceProviderClient::trace(visitor);
+}
+
} // namespace WebCore
#endif // ENABLE(WEB_AUDIO)
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/MediaElementAudioSourceNode.h b/chromium/third_party/WebKit/Source/modules/webaudio/MediaElementAudioSourceNode.h
index 27ae39272e3..c5888027f3c 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/MediaElementAudioSourceNode.h
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/MediaElementAudioSourceNode.h
@@ -27,7 +27,6 @@
#if ENABLE(WEB_AUDIO)
-#include "core/html/HTMLMediaElement.h"
#include "platform/audio/AudioSourceProviderClient.h"
#include "platform/audio/MultiChannelResampler.h"
#include "modules/webaudio/AudioSourceNode.h"
@@ -38,24 +37,27 @@
namespace WebCore {
class AudioContext;
+class HTMLMediaElement;
-class MediaElementAudioSourceNode : public AudioSourceNode, public AudioSourceProviderClient {
+class MediaElementAudioSourceNode FINAL : public AudioSourceNode, public AudioSourceProviderClient {
+ WILL_BE_USING_GARBAGE_COLLECTED_MIXIN(MediaElementAudioSourceNode);
public:
- static PassRefPtr<MediaElementAudioSourceNode> create(AudioContext*, HTMLMediaElement*);
+ static PassRefPtrWillBeRawPtr<MediaElementAudioSourceNode> create(AudioContext*, HTMLMediaElement*);
virtual ~MediaElementAudioSourceNode();
HTMLMediaElement* mediaElement() { return m_mediaElement.get(); }
// AudioNode
- virtual void process(size_t framesToProcess);
- virtual void reset();
+ virtual void process(size_t framesToProcess) OVERRIDE;
// AudioSourceProviderClient
- virtual void setFormat(size_t numberOfChannels, float sampleRate);
+ virtual void setFormat(size_t numberOfChannels, float sampleRate) OVERRIDE;
- void lock();
- void unlock();
+ virtual void lock() OVERRIDE;
+ virtual void unlock() OVERRIDE;
+
+ virtual void trace(Visitor*) OVERRIDE;
private:
MediaElementAudioSourceNode(AudioContext*, HTMLMediaElement*);
@@ -63,7 +65,7 @@ private:
// As an audio source, we will never propagate silence.
virtual bool propagatesSilence() const OVERRIDE { return false; }
- RefPtr<HTMLMediaElement> m_mediaElement;
+ RefPtrWillBeMember<HTMLMediaElement> m_mediaElement;
Mutex m_processLock;
unsigned m_sourceNumberOfChannels;
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/MediaStreamAudioDestinationNode.cpp b/chromium/third_party/WebKit/Source/modules/webaudio/MediaStreamAudioDestinationNode.cpp
index 34d883a0d42..0e26912194d 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/MediaStreamAudioDestinationNode.cpp
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/MediaStreamAudioDestinationNode.cpp
@@ -28,18 +28,18 @@
#include "modules/webaudio/MediaStreamAudioDestinationNode.h"
-#include "core/platform/mediastream/MediaStreamCenter.h"
-#include "core/platform/mediastream/RTCPeerConnectionHandler.h"
#include "modules/webaudio/AudioContext.h"
#include "modules/webaudio/AudioNodeInput.h"
#include "platform/UUID.h"
+#include "platform/mediastream/MediaStreamCenter.h"
+#include "public/platform/WebRTCPeerConnectionHandler.h"
#include "wtf/Locker.h"
namespace WebCore {
-PassRefPtr<MediaStreamAudioDestinationNode> MediaStreamAudioDestinationNode::create(AudioContext* context, size_t numberOfChannels)
+PassRefPtrWillBeRawPtr<MediaStreamAudioDestinationNode> MediaStreamAudioDestinationNode::create(AudioContext* context, size_t numberOfChannels)
{
- return adoptRef(new MediaStreamAudioDestinationNode(context, numberOfChannels));
+ return adoptRefWillBeNoop(new MediaStreamAudioDestinationNode(context, numberOfChannels));
}
MediaStreamAudioDestinationNode::MediaStreamAudioDestinationNode(AudioContext* context, size_t numberOfChannels)
@@ -54,18 +54,13 @@ MediaStreamAudioDestinationNode::MediaStreamAudioDestinationNode(AudioContext* c
audioSources.append(m_source);
MediaStreamSourceVector videoSources;
m_stream = MediaStream::create(context->executionContext(), MediaStreamDescriptor::create(audioSources, videoSources));
- MediaStreamCenter::instance().didCreateMediaStream(m_stream->descriptor());
+ MediaStreamCenter::instance().didCreateMediaStreamAndTracks(m_stream->descriptor());
m_source->setAudioFormat(numberOfChannels, context->sampleRate());
initialize();
}
-MediaStreamSource* MediaStreamAudioDestinationNode::mediaStreamSource()
-{
- return m_source.get();
-}
-
MediaStreamAudioDestinationNode::~MediaStreamAudioDestinationNode()
{
uninitialize();
@@ -77,10 +72,6 @@ void MediaStreamAudioDestinationNode::process(size_t numberOfFrames)
m_source->consumeAudio(m_mixBus.get(), numberOfFrames);
}
-void MediaStreamAudioDestinationNode::reset()
-{
-}
-
} // namespace WebCore
#endif // ENABLE(WEB_AUDIO)
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/MediaStreamAudioDestinationNode.h b/chromium/third_party/WebKit/Source/modules/webaudio/MediaStreamAudioDestinationNode.h
index 7ac3862f7c0..ef8f39340d8 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/MediaStreamAudioDestinationNode.h
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/MediaStreamAudioDestinationNode.h
@@ -37,19 +37,16 @@ namespace WebCore {
class AudioContext;
-class MediaStreamAudioDestinationNode : public AudioBasicInspectorNode {
+class MediaStreamAudioDestinationNode FINAL : public AudioBasicInspectorNode {
public:
- static PassRefPtr<MediaStreamAudioDestinationNode> create(AudioContext*, size_t numberOfChannels);
+ static PassRefPtrWillBeRawPtr<MediaStreamAudioDestinationNode> create(AudioContext*, size_t numberOfChannels);
virtual ~MediaStreamAudioDestinationNode();
MediaStream* stream() { return m_stream.get(); }
// AudioNode.
- virtual void process(size_t framesToProcess);
- virtual void reset();
-
- MediaStreamSource* mediaStreamSource();
+ virtual void process(size_t framesToProcess) OVERRIDE;
private:
MediaStreamAudioDestinationNode(AudioContext*, size_t numberOfChannels);
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/MediaStreamAudioSourceNode.cpp b/chromium/third_party/WebKit/Source/modules/webaudio/MediaStreamAudioSourceNode.cpp
index 58b91678423..2664e68c5e9 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/MediaStreamAudioSourceNode.cpp
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/MediaStreamAudioSourceNode.cpp
@@ -35,12 +35,12 @@
namespace WebCore {
-PassRefPtr<MediaStreamAudioSourceNode> MediaStreamAudioSourceNode::create(AudioContext* context, MediaStream* mediaStream, MediaStreamTrack* audioTrack, AudioSourceProvider* audioSourceProvider)
+PassRefPtrWillBeRawPtr<MediaStreamAudioSourceNode> MediaStreamAudioSourceNode::create(AudioContext* context, MediaStream* mediaStream, MediaStreamTrack* audioTrack, PassOwnPtr<AudioSourceProvider> audioSourceProvider)
{
- return adoptRef(new MediaStreamAudioSourceNode(context, mediaStream, audioTrack, audioSourceProvider));
+ return adoptRefWillBeNoop(new MediaStreamAudioSourceNode(context, mediaStream, audioTrack, audioSourceProvider));
}
-MediaStreamAudioSourceNode::MediaStreamAudioSourceNode(AudioContext* context, MediaStream* mediaStream, MediaStreamTrack* audioTrack, AudioSourceProvider* audioSourceProvider)
+MediaStreamAudioSourceNode::MediaStreamAudioSourceNode(AudioContext* context, MediaStream* mediaStream, MediaStreamTrack* audioTrack, PassOwnPtr<AudioSourceProvider> audioSourceProvider)
: AudioSourceNode(context, context->sampleRate())
, m_mediaStream(mediaStream)
, m_audioTrack(audioTrack)
@@ -113,8 +113,12 @@ void MediaStreamAudioSourceNode::process(size_t numberOfFrames)
}
}
-void MediaStreamAudioSourceNode::reset()
+void MediaStreamAudioSourceNode::trace(Visitor* visitor)
{
+ visitor->trace(m_mediaStream);
+ visitor->trace(m_audioTrack);
+ AudioSourceNode::trace(visitor);
+ AudioSourceProviderClient::trace(visitor);
}
} // namespace WebCore
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/MediaStreamAudioSourceNode.h b/chromium/third_party/WebKit/Source/modules/webaudio/MediaStreamAudioSourceNode.h
index 625690bc5e1..42f3f43f2fd 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/MediaStreamAudioSourceNode.h
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/MediaStreamAudioSourceNode.h
@@ -39,32 +39,34 @@ namespace WebCore {
class AudioContext;
-class MediaStreamAudioSourceNode : public AudioSourceNode, public AudioSourceProviderClient {
+class MediaStreamAudioSourceNode FINAL : public AudioSourceNode, public AudioSourceProviderClient {
+ WILL_BE_USING_GARBAGE_COLLECTED_MIXIN(MediaStreamAudioSourceNode);
public:
- static PassRefPtr<MediaStreamAudioSourceNode> create(AudioContext*, MediaStream*, MediaStreamTrack*, AudioSourceProvider*);
+ static PassRefPtrWillBeRawPtr<MediaStreamAudioSourceNode> create(AudioContext*, MediaStream*, MediaStreamTrack*, PassOwnPtr<AudioSourceProvider>);
virtual ~MediaStreamAudioSourceNode();
MediaStream* mediaStream() { return m_mediaStream.get(); }
// AudioNode
- virtual void process(size_t framesToProcess);
- virtual void reset();
+ virtual void process(size_t framesToProcess) OVERRIDE;
// AudioSourceProviderClient
- virtual void setFormat(size_t numberOfChannels, float sampleRate);
+ virtual void setFormat(size_t numberOfChannels, float sampleRate) OVERRIDE;
- AudioSourceProvider* audioSourceProvider() const { return m_audioSourceProvider; }
+ AudioSourceProvider* audioSourceProvider() const { return m_audioSourceProvider.get(); }
+
+ virtual void trace(Visitor*) OVERRIDE;
private:
- MediaStreamAudioSourceNode(AudioContext*, MediaStream*, MediaStreamTrack*, AudioSourceProvider*);
+ MediaStreamAudioSourceNode(AudioContext*, MediaStream*, MediaStreamTrack*, PassOwnPtr<AudioSourceProvider>);
// As an audio source, we will never propagate silence.
virtual bool propagatesSilence() const OVERRIDE { return false; }
- RefPtr<MediaStream> m_mediaStream;
- RefPtr<MediaStreamTrack> m_audioTrack;
- AudioSourceProvider* m_audioSourceProvider;
+ RefPtrWillBeMember<MediaStream> m_mediaStream;
+ RefPtrWillBeMember<MediaStreamTrack> m_audioTrack;
+ OwnPtr<AudioSourceProvider> m_audioSourceProvider;
Mutex m_processLock;
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/OWNERS b/chromium/third_party/WebKit/Source/modules/webaudio/OWNERS
index d591a3eaa09..3cd6baeb1da 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/OWNERS
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/OWNERS
@@ -1 +1,2 @@
kbr@chromium.org
+rtoy@chromium.org
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/OfflineAudioCompletionEvent.cpp b/chromium/third_party/WebKit/Source/modules/webaudio/OfflineAudioCompletionEvent.cpp
index 45f73e94e73..a2a72d1e27b 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/OfflineAudioCompletionEvent.cpp
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/OfflineAudioCompletionEvent.cpp
@@ -28,18 +28,16 @@
#include "modules/webaudio/OfflineAudioCompletionEvent.h"
-#include "core/events/ThreadLocalEventNames.h"
-
namespace WebCore {
-PassRefPtr<OfflineAudioCompletionEvent> OfflineAudioCompletionEvent::create()
+PassRefPtrWillBeRawPtr<OfflineAudioCompletionEvent> OfflineAudioCompletionEvent::create()
{
- return adoptRef(new OfflineAudioCompletionEvent);
+ return adoptRefWillBeNoop(new OfflineAudioCompletionEvent);
}
-PassRefPtr<OfflineAudioCompletionEvent> OfflineAudioCompletionEvent::create(PassRefPtr<AudioBuffer> renderedBuffer)
+PassRefPtrWillBeRawPtr<OfflineAudioCompletionEvent> OfflineAudioCompletionEvent::create(PassRefPtrWillBeRawPtr<AudioBuffer> renderedBuffer)
{
- return adoptRef(new OfflineAudioCompletionEvent(renderedBuffer));
+ return adoptRefWillBeNoop(new OfflineAudioCompletionEvent(renderedBuffer));
}
OfflineAudioCompletionEvent::OfflineAudioCompletionEvent()
@@ -47,7 +45,7 @@ OfflineAudioCompletionEvent::OfflineAudioCompletionEvent()
ScriptWrappable::init(this);
}
-OfflineAudioCompletionEvent::OfflineAudioCompletionEvent(PassRefPtr<AudioBuffer> renderedBuffer)
+OfflineAudioCompletionEvent::OfflineAudioCompletionEvent(PassRefPtrWillBeRawPtr<AudioBuffer> renderedBuffer)
: Event(EventTypeNames::complete, true, false)
, m_renderedBuffer(renderedBuffer)
{
@@ -63,6 +61,12 @@ const AtomicString& OfflineAudioCompletionEvent::interfaceName() const
return EventNames::OfflineAudioCompletionEvent;
}
+void OfflineAudioCompletionEvent::trace(Visitor* visitor)
+{
+ visitor->trace(m_renderedBuffer);
+ Event::trace(visitor);
+}
+
} // namespace WebCore
#endif // ENABLE(WEB_AUDIO)
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/OfflineAudioCompletionEvent.h b/chromium/third_party/WebKit/Source/modules/webaudio/OfflineAudioCompletionEvent.h
index c994c628a71..da5b8582a1d 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/OfflineAudioCompletionEvent.h
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/OfflineAudioCompletionEvent.h
@@ -25,7 +25,7 @@
#ifndef OfflineAudioCompletionEvent_h
#define OfflineAudioCompletionEvent_h
-#include "core/events/Event.h"
+#include "modules/EventModules.h"
#include "modules/webaudio/AudioBuffer.h"
#include "wtf/PassRefPtr.h"
#include "wtf/RefPtr.h"
@@ -34,22 +34,24 @@ namespace WebCore {
class AudioBuffer;
-class OfflineAudioCompletionEvent : public Event {
+class OfflineAudioCompletionEvent FINAL : public Event {
public:
- static PassRefPtr<OfflineAudioCompletionEvent> create();
- static PassRefPtr<OfflineAudioCompletionEvent> create(PassRefPtr<AudioBuffer> renderedBuffer);
+ static PassRefPtrWillBeRawPtr<OfflineAudioCompletionEvent> create();
+ static PassRefPtrWillBeRawPtr<OfflineAudioCompletionEvent> create(PassRefPtrWillBeRawPtr<AudioBuffer> renderedBuffer);
virtual ~OfflineAudioCompletionEvent();
AudioBuffer* renderedBuffer() { return m_renderedBuffer.get(); }
- virtual const AtomicString& interfaceName() const;
+ virtual const AtomicString& interfaceName() const OVERRIDE;
+
+ virtual void trace(Visitor*) OVERRIDE;
private:
OfflineAudioCompletionEvent();
- explicit OfflineAudioCompletionEvent(PassRefPtr<AudioBuffer> renderedBuffer);
+ explicit OfflineAudioCompletionEvent(PassRefPtrWillBeRawPtr<AudioBuffer> renderedBuffer);
- RefPtr<AudioBuffer> m_renderedBuffer;
+ RefPtrWillBeMember<AudioBuffer> m_renderedBuffer;
};
} // namespace WebCore
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/OfflineAudioContext.cpp b/chromium/third_party/WebKit/Source/modules/webaudio/OfflineAudioContext.cpp
index 4cf7947dfe1..bed82c16d9a 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/OfflineAudioContext.cpp
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/OfflineAudioContext.cpp
@@ -36,34 +36,52 @@
namespace WebCore {
-PassRefPtr<OfflineAudioContext> OfflineAudioContext::create(ExecutionContext* context, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionState& exceptionState)
+PassRefPtrWillBeRawPtr<OfflineAudioContext> OfflineAudioContext::create(ExecutionContext* context, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionState& exceptionState)
{
// FIXME: add support for workers.
if (!context || !context->isDocument()) {
exceptionState.throwDOMException(
NotSupportedError,
"Workers are not supported.");
- return 0;
+ return nullptr;
}
Document* document = toDocument(context);
if (!numberOfFrames) {
exceptionState.throwDOMException(SyntaxError, "number of frames cannot be zero.");
- return 0;
+ return nullptr;
}
- if (numberOfChannels > 10) {
- exceptionState.throwDOMException(SyntaxError, "number of channels (" + String::number(numberOfChannels) + ") exceeds maximum (10).");
- return 0;
+ if (numberOfChannels > AudioContext::maxNumberOfChannels()) {
+ exceptionState.throwDOMException(
+ IndexSizeError,
+ ExceptionMessages::indexOutsideRange<unsigned>(
+ "number of channels",
+ numberOfChannels,
+ 0,
+ ExceptionMessages::InclusiveBound,
+ AudioContext::maxNumberOfChannels(),
+ ExceptionMessages::InclusiveBound));
+ return nullptr;
}
if (!isSampleRateRangeGood(sampleRate)) {
exceptionState.throwDOMException(SyntaxError, "sample rate (" + String::number(sampleRate) + ") must be in the range 44100-96000 Hz.");
- return 0;
+ return nullptr;
+ }
+
+ RefPtrWillBeRawPtr<OfflineAudioContext> audioContext(adoptRefWillBeThreadSafeRefCountedGarbageCollected(new OfflineAudioContext(document, numberOfChannels, numberOfFrames, sampleRate)));
+
+ if (!audioContext->destination()) {
+ exceptionState.throwDOMException(
+ NotSupportedError,
+ "OfflineAudioContext(" + String::number(numberOfChannels)
+ + ", " + String::number(numberOfFrames)
+ + ", " + String::number(sampleRate)
+ + ")");
}
- RefPtr<OfflineAudioContext> audioContext(adoptRef(new OfflineAudioContext(document, numberOfChannels, numberOfFrames, sampleRate)));
audioContext->suspendIfNeeded();
return audioContext.release();
}
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/OfflineAudioContext.h b/chromium/third_party/WebKit/Source/modules/webaudio/OfflineAudioContext.h
index 4cd6928da3e..6434069a4c9 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/OfflineAudioContext.h
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/OfflineAudioContext.h
@@ -31,9 +31,9 @@ namespace WebCore {
class ExceptionState;
-class OfflineAudioContext : public AudioContext {
+class OfflineAudioContext FINAL : public AudioContext {
public:
- static PassRefPtr<OfflineAudioContext> create(ExecutionContext*, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionState&);
+ static PassRefPtrWillBeRawPtr<OfflineAudioContext> create(ExecutionContext*, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionState&);
virtual ~OfflineAudioContext();
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/OfflineAudioContext.idl b/chromium/third_party/WebKit/Source/modules/webaudio/OfflineAudioContext.idl
index 5f5f8ac3ee4..5d5ec0c357d 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/OfflineAudioContext.idl
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/OfflineAudioContext.idl
@@ -23,6 +23,7 @@
*/
[
+ WillBeGarbageCollected,
Conditional=WEB_AUDIO,
Constructor(unsigned long numberOfChannels, unsigned long numberOfFrames, float sampleRate),
ConstructorCallWith=ExecutionContext,
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/OfflineAudioDestinationNode.cpp b/chromium/third_party/WebKit/Source/modules/webaudio/OfflineAudioDestinationNode.cpp
index 055458216a6..4bf2fc0a5bb 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/OfflineAudioDestinationNode.cpp
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/OfflineAudioDestinationNode.cpp
@@ -96,6 +96,11 @@ void OfflineAudioDestinationNode::offlineRender()
if (!m_renderBus.get())
return;
+ bool isAudioContextInitialized = context()->isInitialized();
+ ASSERT(isAudioContextInitialized);
+ if (!isAudioContextInitialized)
+ return;
+
bool channelsMatch = m_renderBus->numberOfChannels() == m_renderTarget->numberOfChannels();
ASSERT(channelsMatch);
if (!channelsMatch)
@@ -106,15 +111,6 @@ void OfflineAudioDestinationNode::offlineRender()
if (!isRenderBusAllocated)
return;
- // Synchronize with HRTFDatabaseLoader.
- // The database must be loaded before we can proceed.
- HRTFDatabaseLoader* loader = context()->hrtfDatabaseLoader();
- ASSERT(loader);
- if (!loader)
- return;
-
- loader->waitForLoaderThreadCompletion();
-
// Break up the render target into smaller "render quantize" sized pieces.
// Render until we're finished.
size_t framesToProcess = m_renderTarget->length();
@@ -157,6 +153,12 @@ void OfflineAudioDestinationNode::notifyComplete()
context()->fireCompletionEvent();
}
+void OfflineAudioDestinationNode::trace(Visitor* visitor)
+{
+ visitor->trace(m_renderTarget);
+ AudioDestinationNode::trace(visitor);
+}
+
} // namespace WebCore
#endif // ENABLE(WEB_AUDIO)
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/OfflineAudioDestinationNode.h b/chromium/third_party/WebKit/Source/modules/webaudio/OfflineAudioDestinationNode.h
index a2f2a615417..c2ee3ddb26a 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/OfflineAudioDestinationNode.h
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/OfflineAudioDestinationNode.h
@@ -36,11 +36,11 @@ namespace WebCore {
class AudioBus;
class AudioContext;
-class OfflineAudioDestinationNode : public AudioDestinationNode {
+class OfflineAudioDestinationNode FINAL : public AudioDestinationNode {
public:
- static PassRefPtr<OfflineAudioDestinationNode> create(AudioContext* context, AudioBuffer* renderTarget)
+ static PassRefPtrWillBeRawPtr<OfflineAudioDestinationNode> create(AudioContext* context, AudioBuffer* renderTarget)
{
- return adoptRef(new OfflineAudioDestinationNode(context, renderTarget));
+ return adoptRefWillBeNoop(new OfflineAudioDestinationNode(context, renderTarget));
}
virtual ~OfflineAudioDestinationNode();
@@ -50,10 +50,11 @@ public:
virtual void uninitialize() OVERRIDE;
// AudioDestinationNode
- virtual void enableInput(const String&) OVERRIDE { }
virtual void startRendering() OVERRIDE;
- virtual float sampleRate() const { return m_renderTarget->sampleRate(); }
+ virtual float sampleRate() const OVERRIDE { return m_renderTarget->sampleRate(); }
+
+ virtual void trace(Visitor*) OVERRIDE;
private:
class OfflineRenderingTask;
@@ -62,7 +63,7 @@ private:
OfflineAudioDestinationNode(AudioContext*, AudioBuffer* renderTarget);
// This AudioNode renders into this AudioBuffer.
- RefPtr<AudioBuffer> m_renderTarget;
+ RefPtrWillBeMember<AudioBuffer> m_renderTarget;
// Temporary AudioBus for each render quantum.
RefPtr<AudioBus> m_renderBus;
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/OscillatorNode.cpp b/chromium/third_party/WebKit/Source/modules/webaudio/OscillatorNode.cpp
index 9790a2eef25..7a6be3d0ed4 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/OscillatorNode.cpp
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/OscillatorNode.cpp
@@ -43,9 +43,9 @@ namespace WebCore {
using namespace VectorMath;
-PassRefPtr<OscillatorNode> OscillatorNode::create(AudioContext* context, float sampleRate)
+PassRefPtrWillBeRawPtr<OscillatorNode> OscillatorNode::create(AudioContext* context, float sampleRate)
{
- return adoptRef(new OscillatorNode(context, sampleRate));
+ return adoptRefWillBeNoop(new OscillatorNode(context, sampleRate));
}
OscillatorNode::OscillatorNode(AudioContext* context, float sampleRate)
@@ -107,8 +107,6 @@ void OscillatorNode::setType(const String& type)
setType(SAWTOOTH);
else if (type == "triangle")
setType(TRIANGLE);
- else
- ASSERT_NOT_REACHED();
}
bool OscillatorNode::setType(unsigned type)
@@ -118,22 +116,22 @@ bool OscillatorNode::setType(unsigned type)
switch (type) {
case SINE: {
- DEFINE_STATIC_REF(PeriodicWave, periodicWaveSine, (PeriodicWave::createSine(sampleRate)));
+ DEFINE_STATIC_REF_WILL_BE_PERSISTENT(PeriodicWave, periodicWaveSine, (PeriodicWave::createSine(sampleRate)));
periodicWave = periodicWaveSine;
break;
}
case SQUARE: {
- DEFINE_STATIC_REF(PeriodicWave, periodicWaveSquare, (PeriodicWave::createSquare(sampleRate)));
+ DEFINE_STATIC_REF_WILL_BE_PERSISTENT(PeriodicWave, periodicWaveSquare, (PeriodicWave::createSquare(sampleRate)));
periodicWave = periodicWaveSquare;
break;
}
case SAWTOOTH: {
- DEFINE_STATIC_REF(PeriodicWave, periodicWaveSawtooth, (PeriodicWave::createSawtooth(sampleRate)));
+ DEFINE_STATIC_REF_WILL_BE_PERSISTENT(PeriodicWave, periodicWaveSawtooth, (PeriodicWave::createSawtooth(sampleRate)));
periodicWave = periodicWaveSawtooth;
break;
}
case TRIANGLE: {
- DEFINE_STATIC_REF(PeriodicWave, periodicWaveTriangle, (PeriodicWave::createTriangle(sampleRate)));
+ DEFINE_STATIC_REF_WILL_BE_PERSISTENT(PeriodicWave, periodicWaveTriangle, (PeriodicWave::createTriangle(sampleRate)));
periodicWave = periodicWaveTriangle;
break;
}
@@ -328,11 +326,6 @@ void OscillatorNode::process(size_t framesToProcess)
outputBus->clearSilentFlag();
}
-void OscillatorNode::reset()
-{
- m_virtualReadIndex = 0;
-}
-
void OscillatorNode::setPeriodicWave(PeriodicWave* periodicWave)
{
ASSERT(isMainThread());
@@ -348,6 +341,14 @@ bool OscillatorNode::propagatesSilence() const
return !isPlayingOrScheduled() || hasFinished() || !m_periodicWave.get();
}
+void OscillatorNode::trace(Visitor* visitor)
+{
+ visitor->trace(m_frequency);
+ visitor->trace(m_detune);
+ visitor->trace(m_periodicWave);
+ AudioScheduledSourceNode::trace(visitor);
+}
+
} // namespace WebCore
#endif // ENABLE(WEB_AUDIO)
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/OscillatorNode.h b/chromium/third_party/WebKit/Source/modules/webaudio/OscillatorNode.h
index a4d25390644..be18d7d94ff 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/OscillatorNode.h
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/OscillatorNode.h
@@ -40,7 +40,7 @@ class PeriodicWave;
// OscillatorNode is an audio generator of periodic waveforms.
-class OscillatorNode : public AudioScheduledSourceNode {
+class OscillatorNode FINAL : public AudioScheduledSourceNode {
public:
// The waveform type.
// These must be defined as in the .idl file.
@@ -52,17 +52,15 @@ public:
CUSTOM = 4
};
- static PassRefPtr<OscillatorNode> create(AudioContext*, float sampleRate);
+ static PassRefPtrWillBeRawPtr<OscillatorNode> create(AudioContext*, float sampleRate);
virtual ~OscillatorNode();
// AudioNode
- virtual void process(size_t framesToProcess);
- virtual void reset();
+ virtual void process(size_t framesToProcess) OVERRIDE;
String type() const;
- bool setType(unsigned); // Returns true on success.
void setType(const String&);
AudioParam* frequency() { return m_frequency.get(); }
@@ -70,9 +68,13 @@ public:
void setPeriodicWave(PeriodicWave*);
+ virtual void trace(Visitor*) OVERRIDE;
+
private:
OscillatorNode(AudioContext*, float sampleRate);
+ bool setType(unsigned); // Returns true on success.
+
// Returns true if there are sample-accurate timeline parameter changes.
bool calculateSampleAccuratePhaseIncrements(size_t framesToProcess);
@@ -82,10 +84,10 @@ private:
unsigned short m_type;
// Frequency value in Hertz.
- RefPtr<AudioParam> m_frequency;
+ RefPtrWillBeMember<AudioParam> m_frequency;
// Detune value (deviating from the frequency) in Cents.
- RefPtr<AudioParam> m_detune;
+ RefPtrWillBeMember<AudioParam> m_detune;
bool m_firstRender;
@@ -100,7 +102,7 @@ private:
AudioFloatArray m_phaseIncrements;
AudioFloatArray m_detuneValues;
- RefPtr<PeriodicWave> m_periodicWave;
+ RefPtrWillBeMember<PeriodicWave> m_periodicWave;
};
} // namespace WebCore
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/OscillatorNode.idl b/chromium/third_party/WebKit/Source/modules/webaudio/OscillatorNode.idl
index 7cbe29dbd30..823d20ed521 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/OscillatorNode.idl
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/OscillatorNode.idl
@@ -22,27 +22,20 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+enum OscillatorType {
+ "sine",
+ "square",
+ "sawtooth",
+ "triangle",
+ "custom"
+};
+
// OscillatorNode is an audio generator of periodic waveforms.
[
Conditional=WEB_AUDIO
] interface OscillatorNode : AudioSourceNode {
- // Type constants.
- const unsigned short SINE = 0;
- const unsigned short SQUARE = 1;
- const unsigned short SAWTOOTH = 2;
- const unsigned short TRIANGLE = 3;
- const unsigned short CUSTOM = 4;
-
- [Custom=Setter] attribute DOMString type;
-
- // Playback state constants.
- const unsigned short UNSCHEDULED_STATE = 0;
- const unsigned short SCHEDULED_STATE = 1;
- const unsigned short PLAYING_STATE = 2;
- const unsigned short FINISHED_STATE = 3;
-
- readonly attribute unsigned short playbackState;
+ attribute OscillatorType type;
readonly attribute AudioParam frequency; // in Hertz
readonly attribute AudioParam detune; // in Cents
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/PannerNode.cpp b/chromium/third_party/WebKit/Source/modules/webaudio/PannerNode.cpp
index fcebe1273dc..029c73cf16c 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/PannerNode.cpp
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/PannerNode.cpp
@@ -49,9 +49,24 @@ static void fixNANs(double &x)
PannerNode::PannerNode(AudioContext* context, float sampleRate)
: AudioNode(context, sampleRate)
, m_panningModel(Panner::PanningModelHRTF)
+ , m_distanceModel(DistanceEffect::ModelInverse)
+ , m_position(0, 0, 0)
+ , m_orientation(1, 0, 0)
+ , m_velocity(0, 0, 0)
+ , m_isAzimuthElevationDirty(true)
+ , m_isDistanceConeGainDirty(true)
+ , m_isDopplerRateDirty(true)
, m_lastGain(-1.0)
+ , m_cachedAzimuth(0)
+ , m_cachedElevation(0)
+ , m_cachedDistanceConeGain(1.0f)
+ , m_cachedDopplerRate(1)
, m_connectionCount(0)
{
+ // Load the HRTF database asynchronously so we don't block the Javascript thread while creating the HRTF database.
+ // The HRTF panner will return zeroes until the database is loaded.
+ m_hrtfDatabaseLoader = HRTFDatabaseLoader::createAndLoadAsynchronouslyIfNecessary(context->sampleRate());
+
ScriptWrappable::init(this);
addInput(adoptPtr(new AudioNodeInput(this)));
addOutput(adoptPtr(new AudioNodeOutput(this, 2)));
@@ -61,13 +76,6 @@ PannerNode::PannerNode(AudioContext* context, float sampleRate)
m_channelCountMode = ClampedMax;
m_channelInterpretation = AudioBus::Speakers;
- m_distanceGain = AudioParam::create(context, "distanceGain", 1.0, 0.0, 1.0);
- m_coneGain = AudioParam::create(context, "coneGain", 1.0, 0.0, 1.0);
-
- m_position = FloatPoint3D(0, 0, 0);
- m_orientation = FloatPoint3D(1, 0, 0);
- m_velocity = FloatPoint3D(0, 0, 0);
-
setNodeType(NodeTypePanner);
initialize();
@@ -85,8 +93,12 @@ void PannerNode::pullInputs(size_t framesToProcess)
if (m_connectionCount != context()->connectionCount()) {
m_connectionCount = context()->connectionCount();
- // Recursively go through all nodes connected to us.
- notifyAudioSourcesConnectedToNode(this);
+ // A map for keeping track if we have visited a node or not. This prevents feedback loops
+ // from recursing infinitely. See crbug.com/331446.
+ HashMap<AudioNode*, bool> visitedNodes;
+
+ // Recursively go through all nodes connected to us
+ notifyAudioSourcesConnectedToNode(this, visitedNodes);
}
AudioNode::pullInputs(framesToProcess);
@@ -102,23 +114,35 @@ void PannerNode::process(size_t framesToProcess)
}
AudioBus* source = input(0)->bus();
-
if (!source) {
destination->zero();
return;
}
// The audio thread can't block on this lock, so we call tryLock() instead.
- MutexTryLocker tryLocker(m_pannerLock);
- if (tryLocker.locked()) {
+ MutexTryLocker tryLocker(m_processLock);
+ MutexTryLocker tryListenerLocker(listener()->listenerLock());
+
+ if (tryLocker.locked() && tryListenerLocker.locked()) {
+ // HRTFDatabase should be loaded before proceeding for offline audio context when the panning model is HRTF.
+ if (m_panningModel == Panner::PanningModelHRTF && !m_hrtfDatabaseLoader->isLoaded()) {
+ if (context()->isOfflineContext()) {
+ m_hrtfDatabaseLoader->waitForLoaderThreadCompletion();
+ } else {
+ destination->zero();
+ return;
+ }
+ }
+
// Apply the panning effect.
double azimuth;
double elevation;
- getAzimuthElevation(&azimuth, &elevation);
+ azimuthElevation(&azimuth, &elevation);
+
m_panner->pan(azimuth, elevation, source, destination, framesToProcess);
// Get the distance and cone gain.
- double totalGain = distanceConeGain();
+ float totalGain = distanceConeGain();
// Snap to desired gain at the beginning.
if (m_lastGain == -1.0)
@@ -127,24 +151,19 @@ void PannerNode::process(size_t framesToProcess)
// Apply gain in-place with de-zippering.
destination->copyWithGainFrom(*destination, &m_lastGain, totalGain);
} else {
- // Too bad - The tryLock() failed. We must be in the middle of changing the panner.
+ // Too bad - The tryLock() failed.
+ // We must be in the middle of changing the properties of the panner or the listener.
destination->zero();
}
}
-void PannerNode::reset()
-{
- m_lastGain = -1.0; // force to snap to initial gain
- if (m_panner.get())
- m_panner->reset();
-}
-
void PannerNode::initialize()
{
if (isInitialized())
return;
- m_panner = Panner::create(m_panningModel, sampleRate(), context()->hrtfDatabaseLoader());
+ m_panner = Panner::create(m_panningModel, sampleRate(), m_hrtfDatabaseLoader.get());
+ listener()->addPanner(this);
AudioNode::initialize();
}
@@ -155,6 +174,8 @@ void PannerNode::uninitialize()
return;
m_panner.clear();
+ listener()->removePanner(this);
+
AudioNode::uninitialize();
}
@@ -166,12 +187,10 @@ AudioListener* PannerNode::listener()
String PannerNode::panningModel() const
{
switch (m_panningModel) {
- case EQUALPOWER:
+ case Panner::PanningModelEqualPower:
return "equalpower";
- case HRTF:
+ case Panner::PanningModelHRTF:
return "HRTF";
- case SOUNDFIELD:
- return "soundfield";
default:
ASSERT_NOT_REACHED();
return "HRTF";
@@ -181,34 +200,26 @@ String PannerNode::panningModel() const
void PannerNode::setPanningModel(const String& model)
{
if (model == "equalpower")
- setPanningModel(EQUALPOWER);
+ setPanningModel(Panner::PanningModelEqualPower);
else if (model == "HRTF")
- setPanningModel(HRTF);
- else if (model == "soundfield")
- setPanningModel(SOUNDFIELD);
- else
- ASSERT_NOT_REACHED();
+ setPanningModel(Panner::PanningModelHRTF);
}
bool PannerNode::setPanningModel(unsigned model)
{
switch (model) {
- case EQUALPOWER:
- case HRTF:
+ case Panner::PanningModelEqualPower:
+ case Panner::PanningModelHRTF:
if (!m_panner.get() || model != m_panningModel) {
// This synchronizes with process().
- MutexLocker processLocker(m_pannerLock);
-
- OwnPtr<Panner> newPanner = Panner::create(model, sampleRate(), context()->hrtfDatabaseLoader());
+ MutexLocker processLocker(m_processLock);
+ OwnPtr<Panner> newPanner = Panner::create(model, sampleRate(), m_hrtfDatabaseLoader.get());
m_panner = newPanner.release();
m_panningModel = model;
}
break;
- case SOUNDFIELD:
- // FIXME: Implement sound field model. See // https://bugs.webkit.org/show_bug.cgi?id=77367.
- context()->executionContext()->addConsoleMessage(JSMessageSource, WarningMessageLevel, "'soundfield' panning model not implemented.");
- break;
default:
+ ASSERT_NOT_REACHED();
return false;
}
@@ -238,8 +249,6 @@ void PannerNode::setDistanceModel(const String& model)
setDistanceModel(DistanceEffect::ModelInverse);
else if (model == "exponential")
setDistanceModel(DistanceEffect::ModelExponential);
- else
- ASSERT_NOT_REACHED();
}
bool PannerNode::setDistanceModel(unsigned model)
@@ -248,32 +257,135 @@ bool PannerNode::setDistanceModel(unsigned model)
case DistanceEffect::ModelLinear:
case DistanceEffect::ModelInverse:
case DistanceEffect::ModelExponential:
- m_distanceEffect.setModel(static_cast<DistanceEffect::ModelType>(model), true);
+ if (model != m_distanceModel) {
+ // This synchronizes with process().
+ MutexLocker processLocker(m_processLock);
+ m_distanceEffect.setModel(static_cast<DistanceEffect::ModelType>(model), true);
+ m_distanceModel = model;
+ }
break;
default:
+ ASSERT_NOT_REACHED();
return false;
}
return true;
}
-void PannerNode::getAzimuthElevation(double* outAzimuth, double* outElevation)
+void PannerNode::setRefDistance(double distance)
+{
+ if (refDistance() == distance)
+ return;
+
+ // This synchronizes with process().
+ MutexLocker processLocker(m_processLock);
+ m_distanceEffect.setRefDistance(distance);
+ markPannerAsDirty(PannerNode::DistanceConeGainDirty);
+}
+
+void PannerNode::setMaxDistance(double distance)
+{
+ if (maxDistance() == distance)
+ return;
+
+ // This synchronizes with process().
+ MutexLocker processLocker(m_processLock);
+ m_distanceEffect.setMaxDistance(distance);
+ markPannerAsDirty(PannerNode::DistanceConeGainDirty);
+}
+
+void PannerNode::setRolloffFactor(double factor)
+{
+ if (rolloffFactor() == factor)
+ return;
+
+ // This synchronizes with process().
+ MutexLocker processLocker(m_processLock);
+ m_distanceEffect.setRolloffFactor(factor);
+ markPannerAsDirty(PannerNode::DistanceConeGainDirty);
+}
+
+void PannerNode::setConeInnerAngle(double angle)
+{
+ if (coneInnerAngle() == angle)
+ return;
+
+ // This synchronizes with process().
+ MutexLocker processLocker(m_processLock);
+ m_coneEffect.setInnerAngle(angle);
+ markPannerAsDirty(PannerNode::DistanceConeGainDirty);
+}
+
+void PannerNode::setConeOuterAngle(double angle)
{
- // FIXME: we should cache azimuth and elevation (if possible), so we only re-calculate if a change has been made.
+ if (coneOuterAngle() == angle)
+ return;
+
+ // This synchronizes with process().
+ MutexLocker processLocker(m_processLock);
+ m_coneEffect.setOuterAngle(angle);
+ markPannerAsDirty(PannerNode::DistanceConeGainDirty);
+}
+void PannerNode::setConeOuterGain(double angle)
+{
+ if (coneOuterGain() == angle)
+ return;
+
+ // This synchronizes with process().
+ MutexLocker processLocker(m_processLock);
+ m_coneEffect.setOuterGain(angle);
+ markPannerAsDirty(PannerNode::DistanceConeGainDirty);
+}
+
+void PannerNode::setPosition(float x, float y, float z)
+{
+ FloatPoint3D position = FloatPoint3D(x, y, z);
+
+ if (m_position == position)
+ return;
+
+ // This synchronizes with process().
+ MutexLocker processLocker(m_processLock);
+ m_position = position;
+ markPannerAsDirty(PannerNode::AzimuthElevationDirty | PannerNode::DistanceConeGainDirty | PannerNode::DopplerRateDirty);
+}
+
+void PannerNode::setOrientation(float x, float y, float z)
+{
+ FloatPoint3D orientation = FloatPoint3D(x, y, z);
+
+ if (m_orientation == orientation)
+ return;
+
+ // This synchronizes with process().
+ MutexLocker processLocker(m_processLock);
+ m_orientation = orientation;
+ markPannerAsDirty(PannerNode::DistanceConeGainDirty);
+}
+
+void PannerNode::setVelocity(float x, float y, float z)
+{
+ FloatPoint3D velocity = FloatPoint3D(x, y, z);
+
+ if (m_velocity == velocity)
+ return;
+
+ // This synchronizes with process().
+ MutexLocker processLocker(m_processLock);
+ m_velocity = velocity;
+ markPannerAsDirty(PannerNode::DopplerRateDirty);
+}
+
+void PannerNode::calculateAzimuthElevation(double* outAzimuth, double* outElevation)
+{
double azimuth = 0.0;
// Calculate the source-listener vector
FloatPoint3D listenerPosition = listener()->position();
FloatPoint3D sourceListener = m_position - listenerPosition;
- if (sourceListener.isZero()) {
- // degenerate case if source and listener are at the same point
- *outAzimuth = 0.0;
- *outElevation = 0.0;
- return;
- }
-
+ // normalize() does nothing if the length of |sourceListener| is zero.
sourceListener.normalize();
// Align axes
@@ -321,11 +433,9 @@ void PannerNode::getAzimuthElevation(double* outAzimuth, double* outElevation)
*outElevation = elevation;
}
-float PannerNode::dopplerRate()
+double PannerNode::calculateDopplerRate()
{
double dopplerShift = 1.0;
-
- // FIXME: optimize for case when neither source nor listener has changed...
double dopplerFactor = listener()->dopplerFactor();
if (dopplerFactor > 0.0) {
@@ -345,48 +455,97 @@ float PannerNode::dopplerRate()
double sourceListenerMagnitude = sourceToListener.length();
- double listenerProjection = sourceToListener.dot(listenerVelocity) / sourceListenerMagnitude;
- double sourceProjection = sourceToListener.dot(sourceVelocity) / sourceListenerMagnitude;
+ if (!sourceListenerMagnitude) {
+ // Source and listener are at the same position. Skip the computation of the doppler
+ // shift, and just return the cached value.
+ dopplerShift = m_cachedDopplerRate;
+ } else {
+ double listenerProjection = sourceToListener.dot(listenerVelocity) / sourceListenerMagnitude;
+ double sourceProjection = sourceToListener.dot(sourceVelocity) / sourceListenerMagnitude;
+
+ listenerProjection = -listenerProjection;
+ sourceProjection = -sourceProjection;
+
+ double scaledSpeedOfSound = speedOfSound / dopplerFactor;
+ listenerProjection = min(listenerProjection, scaledSpeedOfSound);
+ sourceProjection = min(sourceProjection, scaledSpeedOfSound);
+
+ dopplerShift = ((speedOfSound - dopplerFactor * listenerProjection) / (speedOfSound - dopplerFactor * sourceProjection));
+ fixNANs(dopplerShift); // avoid illegal values
+
+ // Limit the pitch shifting to 4 octaves up and 3 octaves down.
+ if (dopplerShift > 16.0)
+ dopplerShift = 16.0;
+ else if (dopplerShift < 0.125)
+ dopplerShift = 0.125;
+ }
+ }
+ }
- listenerProjection = -listenerProjection;
- sourceProjection = -sourceProjection;
+ return dopplerShift;
+}
- double scaledSpeedOfSound = speedOfSound / dopplerFactor;
- listenerProjection = min(listenerProjection, scaledSpeedOfSound);
- sourceProjection = min(sourceProjection, scaledSpeedOfSound);
+float PannerNode::calculateDistanceConeGain()
+{
+ FloatPoint3D listenerPosition = listener()->position();
- dopplerShift = ((speedOfSound - dopplerFactor * listenerProjection) / (speedOfSound - dopplerFactor * sourceProjection));
- fixNANs(dopplerShift); // avoid illegal values
+ double listenerDistance = m_position.distanceTo(listenerPosition);
+ double distanceGain = m_distanceEffect.gain(listenerDistance);
+ double coneGain = m_coneEffect.gain(m_position, m_orientation, listenerPosition);
- // Limit the pitch shifting to 4 octaves up and 3 octaves down.
- if (dopplerShift > 16.0)
- dopplerShift = 16.0;
- else if (dopplerShift < 0.125)
- dopplerShift = 0.125;
- }
+ return float(distanceGain * coneGain);
+}
+
+void PannerNode::azimuthElevation(double* outAzimuth, double* outElevation)
+{
+ ASSERT(context()->isAudioThread());
+
+ if (isAzimuthElevationDirty()) {
+ calculateAzimuthElevation(&m_cachedAzimuth, &m_cachedElevation);
+ m_isAzimuthElevationDirty = false;
}
- return static_cast<float>(dopplerShift);
+ *outAzimuth = m_cachedAzimuth;
+ *outElevation = m_cachedElevation;
+}
+
+double PannerNode::dopplerRate()
+{
+ ASSERT(context()->isAudioThread());
+
+ if (isDopplerRateDirty()) {
+ m_cachedDopplerRate = calculateDopplerRate();
+ m_isDopplerRateDirty = false;
+ }
+
+ return m_cachedDopplerRate;
}
float PannerNode::distanceConeGain()
{
- FloatPoint3D listenerPosition = listener()->position();
+ ASSERT(context()->isAudioThread());
- double listenerDistance = m_position.distanceTo(listenerPosition);
- double distanceGain = m_distanceEffect.gain(listenerDistance);
+ if (isDistanceConeGainDirty()) {
+ m_cachedDistanceConeGain = calculateDistanceConeGain();
+ m_isDistanceConeGainDirty = false;
+ }
- m_distanceGain->setValue(static_cast<float>(distanceGain));
+ return m_cachedDistanceConeGain;
+}
- // FIXME: could optimize by caching coneGain
- double coneGain = m_coneEffect.gain(m_position, m_orientation, listenerPosition);
+void PannerNode::markPannerAsDirty(unsigned dirty)
+{
+ if (dirty & PannerNode::AzimuthElevationDirty)
+ m_isAzimuthElevationDirty = true;
- m_coneGain->setValue(static_cast<float>(coneGain));
+ if (dirty & PannerNode::DistanceConeGainDirty)
+ m_isDistanceConeGainDirty = true;
- return float(distanceGain * coneGain);
+ if (dirty & PannerNode::DopplerRateDirty)
+ m_isDopplerRateDirty = true;
}
-void PannerNode::notifyAudioSourcesConnectedToNode(AudioNode* node)
+void PannerNode::notifyAudioSourcesConnectedToNode(AudioNode* node, HashMap<AudioNode*, bool>& visitedNodes)
{
ASSERT(node);
if (!node)
@@ -405,7 +564,14 @@ void PannerNode::notifyAudioSourcesConnectedToNode(AudioNode* node)
for (unsigned j = 0; j < input->numberOfRenderingConnections(); ++j) {
AudioNodeOutput* connectedOutput = input->renderingOutput(j);
AudioNode* connectedNode = connectedOutput->node();
- notifyAudioSourcesConnectedToNode(connectedNode); // recurse
+ HashMap<AudioNode*, bool>::iterator iterator = visitedNodes.find(connectedNode);
+
+ // If we've seen this node already, we don't need to process it again. Otherwise,
+ // mark it as visited and recurse through the node looking for sources.
+ if (iterator == visitedNodes.end()) {
+ visitedNodes.set(connectedNode, true);
+ notifyAudioSourcesConnectedToNode(connectedNode, visitedNodes); // recurse
+ }
}
}
}
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/PannerNode.h b/chromium/third_party/WebKit/Source/modules/webaudio/PannerNode.h
index 358eccabea2..840982b387f 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/PannerNode.h
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/PannerNode.h
@@ -28,11 +28,12 @@
#include "platform/audio/AudioBus.h"
#include "platform/audio/Cone.h"
#include "platform/audio/Distance.h"
+#include "platform/audio/HRTFDatabaseLoader.h"
#include "platform/audio/Panner.h"
#include "modules/webaudio/AudioListener.h"
#include "modules/webaudio/AudioNode.h"
-#include "modules/webaudio/AudioParam.h"
#include "platform/geometry/FloatPoint3D.h"
+#include "wtf/HashMap.h"
#include "wtf/OwnPtr.h"
namespace WebCore {
@@ -44,87 +45,64 @@ namespace WebCore {
// A cone effect will attenuate the gain as the orientation moves away from the listener.
// All of these effects follow the OpenAL specification very closely.
-class PannerNode : public AudioNode {
+class PannerNode FINAL : public AudioNode {
public:
- // These must be defined as in the .idl file and must match those in the Panner class.
+ // These enums are used to distinguish what cached values of panner are dirty.
enum {
- EQUALPOWER = 0,
- HRTF = 1,
- SOUNDFIELD = 2,
+ AzimuthElevationDirty = 0x1,
+ DistanceConeGainDirty = 0x2,
+ DopplerRateDirty = 0x4,
};
- // These must be defined as in the .idl file and must match those
- // in the DistanceEffect class.
- enum {
- LINEAR_DISTANCE = 0,
- INVERSE_DISTANCE = 1,
- EXPONENTIAL_DISTANCE = 2,
- };
-
- static PassRefPtr<PannerNode> create(AudioContext* context, float sampleRate)
+ static PassRefPtrWillBeRawPtr<PannerNode> create(AudioContext* context, float sampleRate)
{
- return adoptRef(new PannerNode(context, sampleRate));
+ return adoptRefWillBeNoop(new PannerNode(context, sampleRate));
}
virtual ~PannerNode();
// AudioNode
- virtual void process(size_t framesToProcess);
- virtual void pullInputs(size_t framesToProcess);
- virtual void reset();
- virtual void initialize();
- virtual void uninitialize();
-
- // Listener
- AudioListener* listener();
+ virtual void process(size_t framesToProcess) OVERRIDE;
+ virtual void pullInputs(size_t framesToProcess) OVERRIDE;
+ virtual void initialize() OVERRIDE;
+ virtual void uninitialize() OVERRIDE;
// Panning model
String panningModel() const;
- bool setPanningModel(unsigned); // Returns true on success.
void setPanningModel(const String&);
- // Position
- FloatPoint3D position() const { return m_position; }
- void setPosition(float x, float y, float z) { m_position = FloatPoint3D(x, y, z); }
-
- // Orientation
- FloatPoint3D orientation() const { return m_position; }
- void setOrientation(float x, float y, float z) { m_orientation = FloatPoint3D(x, y, z); }
-
- // Velocity
- FloatPoint3D velocity() const { return m_velocity; }
- void setVelocity(float x, float y, float z) { m_velocity = FloatPoint3D(x, y, z); }
+ // Position, orientation and velocity
+ void setPosition(float x, float y, float z);
+ void setOrientation(float x, float y, float z);
+ void setVelocity(float x, float y, float z);
// Distance parameters
String distanceModel() const;
- bool setDistanceModel(unsigned); // Returns true on success.
void setDistanceModel(const String&);
double refDistance() { return m_distanceEffect.refDistance(); }
- void setRefDistance(double refDistance) { m_distanceEffect.setRefDistance(refDistance); }
+ void setRefDistance(double);
double maxDistance() { return m_distanceEffect.maxDistance(); }
- void setMaxDistance(double maxDistance) { m_distanceEffect.setMaxDistance(maxDistance); }
+ void setMaxDistance(double);
double rolloffFactor() { return m_distanceEffect.rolloffFactor(); }
- void setRolloffFactor(double rolloffFactor) { m_distanceEffect.setRolloffFactor(rolloffFactor); }
+ void setRolloffFactor(double);
// Sound cones - angles in degrees
double coneInnerAngle() const { return m_coneEffect.innerAngle(); }
- void setConeInnerAngle(double angle) { m_coneEffect.setInnerAngle(angle); }
+ void setConeInnerAngle(double);
double coneOuterAngle() const { return m_coneEffect.outerAngle(); }
- void setConeOuterAngle(double angle) { m_coneEffect.setOuterAngle(angle); }
+ void setConeOuterAngle(double);
double coneOuterGain() const { return m_coneEffect.outerGain(); }
- void setConeOuterGain(double angle) { m_coneEffect.setOuterGain(angle); }
+ void setConeOuterGain(double);
- void getAzimuthElevation(double* outAzimuth, double* outElevation);
- float dopplerRate();
+ void markPannerAsDirty(unsigned);
- // Accessors for dynamically calculated gain values.
- AudioParam* distanceGain() { return m_distanceGain.get(); }
- AudioParam* coneGain() { return m_coneGain.get(); }
+ // It must be called on audio thread, currently called only process() in AudioBufferSourceNode.
+ double dopplerRate();
virtual double tailTime() const OVERRIDE { return m_panner ? m_panner->tailTime() : 0; }
virtual double latencyTime() const OVERRIDE { return m_panner ? m_panner->latencyTime() : 0; }
@@ -132,31 +110,58 @@ public:
private:
PannerNode(AudioContext*, float sampleRate);
- // Returns the combined distance and cone gain attenuation.
+ // AudioContext's listener
+ AudioListener* listener();
+
+ bool setPanningModel(unsigned); // Returns true on success.
+ bool setDistanceModel(unsigned); // Returns true on success.
+
+ void calculateAzimuthElevation(double* outAzimuth, double* outElevation);
+ float calculateDistanceConeGain(); // Returns the combined distance and cone gain attenuation.
+ double calculateDopplerRate();
+
+ void azimuthElevation(double* outAzimuth, double* outElevation);
float distanceConeGain();
+ bool isAzimuthElevationDirty() const { return m_isAzimuthElevationDirty; }
+ bool isDistanceConeGainDirty() const { return m_isDistanceConeGainDirty; }
+ bool isDopplerRateDirty() const { return m_isDopplerRateDirty; }
+
// Notifies any AudioBufferSourceNodes connected to us either directly or indirectly about our existence.
// This is in order to handle the pitch change necessary for the doppler shift.
- void notifyAudioSourcesConnectedToNode(AudioNode*);
+ void notifyAudioSourcesConnectedToNode(AudioNode*, HashMap<AudioNode*, bool> &visitedNodes);
OwnPtr<Panner> m_panner;
unsigned m_panningModel;
+ unsigned m_distanceModel;
+ // Current source location information
FloatPoint3D m_position;
FloatPoint3D m_orientation;
FloatPoint3D m_velocity;
+ bool m_isAzimuthElevationDirty;
+ bool m_isDistanceConeGainDirty;
+ bool m_isDopplerRateDirty;
+
// Gain
- RefPtr<AudioParam> m_distanceGain;
- RefPtr<AudioParam> m_coneGain;
DistanceEffect m_distanceEffect;
ConeEffect m_coneEffect;
float m_lastGain;
+ // Cached values
+ double m_cachedAzimuth;
+ double m_cachedElevation;
+ float m_cachedDistanceConeGain;
+ double m_cachedDopplerRate;
+
+ RefPtr<HRTFDatabaseLoader> m_hrtfDatabaseLoader;
+
+ // AudioContext's connection count
unsigned m_connectionCount;
- // Synchronize process() and setPanningModel() which can change the panner.
- mutable Mutex m_pannerLock;
+ // Synchronize process() with setting of the panning model, source's location information, listener, distance parameters and sound cones.
+ mutable Mutex m_processLock;
};
} // namespace WebCore
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/PannerNode.idl b/chromium/third_party/WebKit/Source/modules/webaudio/PannerNode.idl
index 455740817ba..cbd1fc8f797 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/PannerNode.idl
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/PannerNode.idl
@@ -22,22 +22,23 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+enum PanningModelType {
+ "equalpower",
+ "HRTF"
+};
+
+enum DistanceModelType {
+ "linear",
+ "inverse",
+ "exponential"
+};
+
[
NoInterfaceObject,
Conditional=WEB_AUDIO
] interface PannerNode : AudioNode {
- // Panning model
- const unsigned short EQUALPOWER = 0;
- const unsigned short HRTF = 1;
- const unsigned short SOUNDFIELD = 2;
-
- // Distance model
- const unsigned short LINEAR_DISTANCE = 0;
- const unsigned short INVERSE_DISTANCE = 1;
- const unsigned short EXPONENTIAL_DISTANCE = 2;
-
// Default model for stereo is HRTF
- [Custom=Setter] attribute DOMString panningModel;
+ attribute PanningModelType panningModel;
// Uses a 3D cartesian coordinate system
void setPosition(float x, float y, float z);
@@ -45,7 +46,7 @@
void setVelocity(float x, float y, float z);
// Distance model
- [Custom=Setter] attribute DOMString distanceModel;
+ attribute DistanceModelType distanceModel;
attribute double refDistance;
attribute double maxDistance;
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/PeriodicWave.cpp b/chromium/third_party/WebKit/Source/modules/webaudio/PeriodicWave.cpp
index 28b7a24dc19..3724e5fce51 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/PeriodicWave.cpp
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/PeriodicWave.cpp
@@ -45,43 +45,43 @@ namespace WebCore {
using namespace VectorMath;
-PassRefPtr<PeriodicWave> PeriodicWave::create(float sampleRate, Float32Array* real, Float32Array* imag)
+PassRefPtrWillBeRawPtr<PeriodicWave> PeriodicWave::create(float sampleRate, Float32Array* real, Float32Array* imag)
{
bool isGood = real && imag && real->length() == imag->length();
ASSERT(isGood);
if (isGood) {
- RefPtr<PeriodicWave> periodicWave = adoptRef(new PeriodicWave(sampleRate));
+ RefPtrWillBeRawPtr<PeriodicWave> periodicWave = adoptRefWillBeNoop(new PeriodicWave(sampleRate));
size_t numberOfComponents = real->length();
periodicWave->createBandLimitedTables(real->data(), imag->data(), numberOfComponents);
return periodicWave;
}
- return 0;
+ return nullptr;
}
-PassRefPtr<PeriodicWave> PeriodicWave::createSine(float sampleRate)
+PassRefPtrWillBeRawPtr<PeriodicWave> PeriodicWave::createSine(float sampleRate)
{
- RefPtr<PeriodicWave> periodicWave = adoptRef(new PeriodicWave(sampleRate));
+ RefPtrWillBeRawPtr<PeriodicWave> periodicWave = adoptRefWillBeNoop(new PeriodicWave(sampleRate));
periodicWave->generateBasicWaveform(OscillatorNode::SINE);
return periodicWave;
}
-PassRefPtr<PeriodicWave> PeriodicWave::createSquare(float sampleRate)
+PassRefPtrWillBeRawPtr<PeriodicWave> PeriodicWave::createSquare(float sampleRate)
{
- RefPtr<PeriodicWave> periodicWave = adoptRef(new PeriodicWave(sampleRate));
+ RefPtrWillBeRawPtr<PeriodicWave> periodicWave = adoptRefWillBeNoop(new PeriodicWave(sampleRate));
periodicWave->generateBasicWaveform(OscillatorNode::SQUARE);
return periodicWave;
}
-PassRefPtr<PeriodicWave> PeriodicWave::createSawtooth(float sampleRate)
+PassRefPtrWillBeRawPtr<PeriodicWave> PeriodicWave::createSawtooth(float sampleRate)
{
- RefPtr<PeriodicWave> periodicWave = adoptRef(new PeriodicWave(sampleRate));
+ RefPtrWillBeRawPtr<PeriodicWave> periodicWave = adoptRefWillBeNoop(new PeriodicWave(sampleRate));
periodicWave->generateBasicWaveform(OscillatorNode::SAWTOOTH);
return periodicWave;
}
-PassRefPtr<PeriodicWave> PeriodicWave::createTriangle(float sampleRate)
+PassRefPtrWillBeRawPtr<PeriodicWave> PeriodicWave::createTriangle(float sampleRate)
{
- RefPtr<PeriodicWave> periodicWave = adoptRef(new PeriodicWave(sampleRate));
+ RefPtrWillBeRawPtr<PeriodicWave> periodicWave = adoptRefWillBeNoop(new PeriodicWave(sampleRate));
periodicWave->generateBasicWaveform(OscillatorNode::TRIANGLE);
return periodicWave;
}
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/PeriodicWave.h b/chromium/third_party/WebKit/Source/modules/webaudio/PeriodicWave.h
index d10b3e2c90f..1e62820f3ff 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/PeriodicWave.h
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/PeriodicWave.h
@@ -40,15 +40,15 @@
namespace WebCore {
-class PeriodicWave : public ScriptWrappable, public RefCounted<PeriodicWave> {
+class PeriodicWave : public RefCountedWillBeGarbageCollectedFinalized<PeriodicWave>, public ScriptWrappable {
public:
- static PassRefPtr<PeriodicWave> createSine(float sampleRate);
- static PassRefPtr<PeriodicWave> createSquare(float sampleRate);
- static PassRefPtr<PeriodicWave> createSawtooth(float sampleRate);
- static PassRefPtr<PeriodicWave> createTriangle(float sampleRate);
+ static PassRefPtrWillBeRawPtr<PeriodicWave> createSine(float sampleRate);
+ static PassRefPtrWillBeRawPtr<PeriodicWave> createSquare(float sampleRate);
+ static PassRefPtrWillBeRawPtr<PeriodicWave> createSawtooth(float sampleRate);
+ static PassRefPtrWillBeRawPtr<PeriodicWave> createTriangle(float sampleRate);
// Creates an arbitrary periodic wave given the frequency components (Fourier coefficients).
- static PassRefPtr<PeriodicWave> create(float sampleRate, Float32Array* real, Float32Array* imag);
+ static PassRefPtrWillBeRawPtr<PeriodicWave> create(float sampleRate, Float32Array* real, Float32Array* imag);
// Returns pointers to the lower and higher wave data for the pitch range containing
// the given fundamental frequency. These two tables are in adjacent "pitch" ranges
@@ -62,7 +62,8 @@ public:
float rateScale() const { return m_rateScale; }
unsigned periodicWaveSize() const { return m_periodicWaveSize; }
- float sampleRate() const { return m_sampleRate; }
+
+ void trace(Visitor*) { }
private:
explicit PeriodicWave(float sampleRate);
@@ -81,8 +82,6 @@ private:
float m_rateScale;
- unsigned numberOfRanges() const { return m_numberOfRanges; }
-
// Maximum possible number of partials (before culling).
unsigned maxNumberOfPartials() const;
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/PeriodicWave.idl b/chromium/third_party/WebKit/Source/modules/webaudio/PeriodicWave.idl
index 40a04b127b0..3dccbc7b665 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/PeriodicWave.idl
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/PeriodicWave.idl
@@ -24,6 +24,7 @@
// PeriodicWave represents a periodic audio waveform given by its Fourier coefficients.
[
+ WillBeGarbageCollected,
Conditional=WEB_AUDIO
] interface PeriodicWave {
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/RealtimeAnalyser.cpp b/chromium/third_party/WebKit/Source/modules/webaudio/RealtimeAnalyser.cpp
index d470a267079..b768a243917 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/RealtimeAnalyser.cpp
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/RealtimeAnalyser.cpp
@@ -30,7 +30,6 @@
#include "platform/audio/AudioBus.h"
#include "platform/audio/AudioUtilities.h"
-#include "platform/audio/FFTFrame.h"
#include "platform/audio/VectorMath.h"
#include <algorithm>
@@ -67,13 +66,6 @@ RealtimeAnalyser::RealtimeAnalyser()
m_analysisFrame = adoptPtr(new FFTFrame(DefaultFFTSize));
}
-void RealtimeAnalyser::reset()
-{
- m_writeIndex = 0;
- m_inputBuffer.zero();
- m_magnitudeBuffer.zero();
-}
-
bool RealtimeAnalyser::setFftSize(size_t size)
{
ASSERT(isMainThread());
@@ -145,7 +137,7 @@ void applyWindow(float* p, size_t n)
for (unsigned i = 0; i < n; ++i) {
double x = static_cast<double>(i) / static_cast<double>(n);
- double window = a0 - a1 * cos(2 * piDouble * x) + a2 * cos(4 * piDouble * x);
+ double window = a0 - a1 * cos(twoPiDouble * x) + a2 * cos(twoPiDouble * 2.0 * x);
p[i] *= float(window);
}
}
@@ -185,7 +177,7 @@ void RealtimeAnalyser::doFFTAnalysis()
imagP[0] = 0;
// Normalize so than an input sine wave at 0dBfs registers as 0dBfs (undo FFT scaling factor).
- const double magnitudeScale = 1.0 / DefaultFFTSize;
+ const double magnitudeScale = 1.0 / fftSize;
// A value of 0 does no averaging with the previous result. Larger values produce slower, but smoother changes.
double k = m_smoothingTimeConstant;
@@ -264,6 +256,35 @@ void RealtimeAnalyser::getByteFrequencyData(Uint8Array* destinationArray)
}
}
+void RealtimeAnalyser::getFloatTimeDomainData(Float32Array* destinationArray)
+{
+ ASSERT(isMainThread());
+
+ if (!destinationArray)
+ return;
+
+ unsigned fftSize = this->fftSize();
+ size_t len = min(fftSize, destinationArray->length());
+ if (len > 0) {
+ bool isInputBufferGood = m_inputBuffer.size() == InputBufferSize && m_inputBuffer.size() > fftSize;
+ ASSERT(isInputBufferGood);
+ if (!isInputBufferGood)
+ return;
+
+ float* inputBuffer = m_inputBuffer.data();
+ float* destination = destinationArray->data();
+
+ unsigned writeIndex = m_writeIndex;
+
+ for (unsigned i = 0; i < len; ++i) {
+ // Buffer access is protected due to modulo operation.
+ float value = inputBuffer[(i + writeIndex - fftSize + InputBufferSize) % InputBufferSize];
+
+ destination[i] = value;
+ }
+ }
+}
+
void RealtimeAnalyser::getByteTimeDomainData(Uint8Array* destinationArray)
{
ASSERT(isMainThread());
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/RealtimeAnalyser.h b/chromium/third_party/WebKit/Source/modules/webaudio/RealtimeAnalyser.h
index 5ce798f0703..87e7fda037a 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/RealtimeAnalyser.h
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/RealtimeAnalyser.h
@@ -26,6 +26,7 @@
#define RealtimeAnalyser_h
#include "platform/audio/AudioArray.h"
+#include "platform/audio/FFTFrame.h"
#include "wtf/Forward.h"
#include "wtf/Noncopyable.h"
#include "wtf/OwnPtr.h"
@@ -33,31 +34,29 @@
namespace WebCore {
class AudioBus;
-class FFTFrame;
class RealtimeAnalyser FINAL {
WTF_MAKE_NONCOPYABLE(RealtimeAnalyser);
public:
RealtimeAnalyser();
- void reset();
-
size_t fftSize() const { return m_fftSize; }
bool setFftSize(size_t);
unsigned frequencyBinCount() const { return m_fftSize / 2; }
- void setMinDecibels(float k) { m_minDecibels = k; }
- float minDecibels() const { return static_cast<float>(m_minDecibels); }
+ void setMinDecibels(double k) { m_minDecibels = k; }
+ double minDecibels() const { return m_minDecibels; }
- void setMaxDecibels(float k) { m_maxDecibels = k; }
- float maxDecibels() const { return static_cast<float>(m_maxDecibels); }
+ void setMaxDecibels(double k) { m_maxDecibels = k; }
+ double maxDecibels() const { return m_maxDecibels; }
- void setSmoothingTimeConstant(float k) { m_smoothingTimeConstant = k; }
- float smoothingTimeConstant() const { return static_cast<float>(m_smoothingTimeConstant); }
+ void setSmoothingTimeConstant(double k) { m_smoothingTimeConstant = k; }
+ double smoothingTimeConstant() const { return m_smoothingTimeConstant; }
void getFloatFrequencyData(Float32Array*);
void getByteFrequencyData(Uint8Array*);
+ void getFloatTimeDomainData(Float32Array*);
void getByteTimeDomainData(Uint8Array*);
// The audio thread writes input data here.
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/ScriptProcessorNode.cpp b/chromium/third_party/WebKit/Source/modules/webaudio/ScriptProcessorNode.cpp
index d3f14c884f2..dbbcfdd595f 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/ScriptProcessorNode.cpp
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/ScriptProcessorNode.cpp
@@ -56,7 +56,7 @@ static size_t chooseBufferSize()
return bufferSize;
}
-PassRefPtr<ScriptProcessorNode> ScriptProcessorNode::create(AudioContext* context, float sampleRate, size_t bufferSize, unsigned numberOfInputChannels, unsigned numberOfOutputChannels)
+PassRefPtrWillBeRawPtr<ScriptProcessorNode> ScriptProcessorNode::create(AudioContext* context, float sampleRate, size_t bufferSize, unsigned numberOfInputChannels, unsigned numberOfOutputChannels)
{
// Check for valid buffer size.
switch (bufferSize) {
@@ -72,19 +72,19 @@ PassRefPtr<ScriptProcessorNode> ScriptProcessorNode::create(AudioContext* contex
case 16384:
break;
default:
- return 0;
+ return nullptr;
}
if (!numberOfInputChannels && !numberOfOutputChannels)
- return 0;
+ return nullptr;
if (numberOfInputChannels > AudioContext::maxNumberOfChannels())
- return 0;
+ return nullptr;
if (numberOfOutputChannels > AudioContext::maxNumberOfChannels())
- return 0;
+ return nullptr;
- return adoptRef(new ScriptProcessorNode(context, sampleRate, bufferSize, numberOfInputChannels, numberOfOutputChannels));
+ return adoptRefWillBeNoop(new ScriptProcessorNode(context, sampleRate, bufferSize, numberOfInputChannels, numberOfOutputChannels));
}
ScriptProcessorNode::ScriptProcessorNode(AudioContext* context, float sampleRate, size_t bufferSize, unsigned numberOfInputChannels, unsigned numberOfOutputChannels)
@@ -93,7 +93,6 @@ ScriptProcessorNode::ScriptProcessorNode(AudioContext* context, float sampleRate
, m_doubleBufferIndexForEvent(0)
, m_bufferSize(bufferSize)
, m_bufferReadWriteIndex(0)
- , m_isRequestOutstanding(false)
, m_numberOfInputChannels(numberOfInputChannels)
, m_numberOfOutputChannels(numberOfOutputChannels)
, m_internalInputBus(AudioBus::create(numberOfInputChannels, AudioNode::ProcessingSizeInFrames, false))
@@ -128,8 +127,8 @@ void ScriptProcessorNode::initialize()
// Create double buffers on both the input and output sides.
// These AudioBuffers will be directly accessed in the main thread by JavaScript.
for (unsigned i = 0; i < 2; ++i) {
- RefPtr<AudioBuffer> inputBuffer = m_numberOfInputChannels ? AudioBuffer::create(m_numberOfInputChannels, bufferSize(), sampleRate) : 0;
- RefPtr<AudioBuffer> outputBuffer = m_numberOfOutputChannels ? AudioBuffer::create(m_numberOfOutputChannels, bufferSize(), sampleRate) : 0;
+ RefPtrWillBeRawPtr<AudioBuffer> inputBuffer = m_numberOfInputChannels ? AudioBuffer::create(m_numberOfInputChannels, bufferSize(), sampleRate) : nullptr;
+ RefPtrWillBeRawPtr<AudioBuffer> outputBuffer = m_numberOfOutputChannels ? AudioBuffer::create(m_numberOfOutputChannels, bufferSize(), sampleRate) : nullptr;
m_inputBuffers.append(inputBuffer);
m_outputBuffers.append(outputBuffer);
@@ -214,7 +213,9 @@ void ScriptProcessorNode::process(size_t framesToProcess)
if (!m_bufferReadWriteIndex) {
// Avoid building up requests on the main thread to fire process events when they're not being handled.
// This could be a problem if the main thread is very busy doing other things and is being held up handling previous requests.
- if (m_isRequestOutstanding) {
+ // The audio thread can't block on this lock, so we call tryLock() instead.
+ MutexTryLocker tryLocker(m_processEventLock);
+ if (!tryLocker.locked()) {
// We're late in handling the previous request. The main thread must be very busy.
// The best we can do is clear out the buffer ourself here.
outputBuffer->zero();
@@ -224,7 +225,6 @@ void ScriptProcessorNode::process(size_t framesToProcess)
// Fire the event on the main thread, not this one (which is the realtime audio thread).
m_doubleBufferIndexForEvent = m_doubleBufferIndex;
- m_isRequestOutstanding = true;
callOnMainThread(fireProcessEventDispatch, this);
}
@@ -247,7 +247,7 @@ void ScriptProcessorNode::fireProcessEventDispatch(void* userData)
void ScriptProcessorNode::fireProcessEvent()
{
- ASSERT(isMainThread() && m_isRequestOutstanding);
+ ASSERT(isMainThread());
bool isIndexGood = m_doubleBufferIndexForEvent < 2;
ASSERT(isIndexGood);
@@ -262,22 +262,15 @@ void ScriptProcessorNode::fireProcessEvent()
// Avoid firing the event if the document has already gone away.
if (context()->executionContext()) {
- // Let the audio thread know we've gotten to the point where it's OK for it to make another request.
- m_isRequestOutstanding = false;
+ // This synchronizes with process().
+ MutexLocker processLocker(m_processEventLock);
- // Call the JavaScript event handler which will do the audio processing.
- dispatchEvent(AudioProcessingEvent::create(inputBuffer, outputBuffer));
- }
-}
-
-void ScriptProcessorNode::reset()
-{
- m_bufferReadWriteIndex = 0;
- m_doubleBufferIndex = 0;
+ // Calculate a playbackTime with the buffersize which needs to be processed each time onaudioprocess is called.
+ // The outputBuffer being passed to JS will be played after exhuasting previous outputBuffer by double-buffering.
+ double playbackTime = (context()->currentSampleFrame() + m_bufferSize) / static_cast<double>(context()->sampleRate());
- for (unsigned i = 0; i < 2; ++i) {
- m_inputBuffers[i]->zero();
- m_outputBuffers[i]->zero();
+ // Call the JavaScript event handler which will do the audio processing.
+ dispatchEvent(AudioProcessingEvent::create(inputBuffer, outputBuffer, playbackTime));
}
}
@@ -291,6 +284,13 @@ double ScriptProcessorNode::latencyTime() const
return std::numeric_limits<double>::infinity();
}
+void ScriptProcessorNode::trace(Visitor* visitor)
+{
+ visitor->trace(m_inputBuffers);
+ visitor->trace(m_outputBuffers);
+ AudioNode::trace(visitor);
+}
+
} // namespace WebCore
#endif // ENABLE(WEB_AUDIO)
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/ScriptProcessorNode.h b/chromium/third_party/WebKit/Source/modules/webaudio/ScriptProcessorNode.h
index 3e1aa7a5535..edbe7423101 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/ScriptProcessorNode.h
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/ScriptProcessorNode.h
@@ -44,26 +44,26 @@ class AudioProcessingEvent;
// The "onaudioprocess" attribute is an event listener which will get called periodically with an AudioProcessingEvent which has
// AudioBuffers for each input and output.
-class ScriptProcessorNode : public AudioNode {
+class ScriptProcessorNode FINAL : public AudioNode {
public:
// bufferSize must be one of the following values: 256, 512, 1024, 2048, 4096, 8192, 16384.
// This value controls how frequently the onaudioprocess event handler is called and how many sample-frames need to be processed each call.
// Lower numbers for bufferSize will result in a lower (better) latency. Higher numbers will be necessary to avoid audio breakup and glitches.
// The value chosen must carefully balance between latency and audio quality.
- static PassRefPtr<ScriptProcessorNode> create(AudioContext*, float sampleRate, size_t bufferSize, unsigned numberOfInputChannels, unsigned numberOfOutputChannels);
+ static PassRefPtrWillBeRawPtr<ScriptProcessorNode> create(AudioContext*, float sampleRate, size_t bufferSize, unsigned numberOfInputChannels, unsigned numberOfOutputChannels);
virtual ~ScriptProcessorNode();
// AudioNode
- virtual void process(size_t framesToProcess);
- virtual void reset();
- virtual void initialize();
- virtual void uninitialize();
+ virtual void process(size_t framesToProcess) OVERRIDE;
+ virtual void initialize() OVERRIDE;
+ virtual void uninitialize() OVERRIDE;
size_t bufferSize() const { return m_bufferSize; }
DEFINE_ATTRIBUTE_EVENT_LISTENER(audioprocess);
+ void trace(Visitor*);
private:
virtual double tailTime() const OVERRIDE;
@@ -79,17 +79,19 @@ private:
void swapBuffers() { m_doubleBufferIndex = 1 - m_doubleBufferIndex; }
unsigned m_doubleBufferIndex;
unsigned m_doubleBufferIndexForEvent;
- Vector<RefPtr<AudioBuffer> > m_inputBuffers;
- Vector<RefPtr<AudioBuffer> > m_outputBuffers;
+ WillBeHeapVector<RefPtrWillBeMember<AudioBuffer> > m_inputBuffers;
+ WillBeHeapVector<RefPtrWillBeMember<AudioBuffer> > m_outputBuffers;
size_t m_bufferSize;
unsigned m_bufferReadWriteIndex;
- volatile bool m_isRequestOutstanding;
unsigned m_numberOfInputChannels;
unsigned m_numberOfOutputChannels;
RefPtr<AudioBus> m_internalInputBus;
+
+ // Synchronize process() with fireProcessEvent().
+ mutable Mutex m_processEventLock;
};
} // namespace WebCore
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/WaveShaperDSPKernel.cpp b/chromium/third_party/WebKit/Source/modules/webaudio/WaveShaperDSPKernel.cpp
index 6134abe7571..e2339c43b29 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/WaveShaperDSPKernel.cpp
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/WaveShaperDSPKernel.cpp
@@ -100,24 +100,30 @@ void WaveShaperDSPKernel::processCurve(const float* source, float* destination,
for (unsigned i = 0; i < framesToProcess; ++i) {
const float input = source[i];
- // Calculate a virtual index based on input -1 -> +1 with 0 being at the center of the curve data.
- // Then linearly interpolate between the two points in the curve.
- double virtualIndex = 0.5 * (input + 1) * curveLength;
- int index1 = static_cast<int>(virtualIndex);
- int index2 = index1 + 1;
- double interpolationFactor = virtualIndex - index1;
-
- // Clip index to the input range of the curve.
- // This takes care of input outside of nominal range -1 -> +1
- index1 = max(index1, 0);
- index1 = min(index1, curveLength - 1);
- index2 = max(index2, 0);
- index2 = min(index2, curveLength - 1);
-
- double value1 = curveData[index1];
- double value2 = curveData[index2];
-
- double output = (1.0 - interpolationFactor) * value1 + interpolationFactor * value2;
+ // Calculate a virtual index based on input -1 -> +1 with -1 being curve[0], +1 being
+ // curve[curveLength - 1], and 0 being at the center of the curve data. Then linearly
+ // interpolate between the two points in the curve.
+ double virtualIndex = 0.5 * (input + 1) * (curveLength - 1);
+ double output;
+
+ if (virtualIndex < 0) {
+ // input < -1, so use curve[0]
+ output = curveData[0];
+ } else if (virtualIndex >= curveLength - 1) {
+ // input >= 1, so use last curve value
+ output = curveData[curveLength - 1];
+ } else {
+ // The general case where -1 <= input < 1, where 0 <= virtualIndex < curveLength - 1,
+ // so interpolate between the nearest samples on the curve.
+ unsigned index1 = static_cast<unsigned>(virtualIndex);
+ unsigned index2 = index1 + 1;
+ double interpolationFactor = virtualIndex - index1;
+
+ double value1 = curveData[index1];
+ double value2 = curveData[index2];
+
+ output = (1.0 - interpolationFactor) * value1 + interpolationFactor * value2;
+ }
destination[i] = output;
}
}
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/WaveShaperDSPKernel.h b/chromium/third_party/WebKit/Source/modules/webaudio/WaveShaperDSPKernel.h
index 78d7bfe85c4..54017a18c8f 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/WaveShaperDSPKernel.h
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/WaveShaperDSPKernel.h
@@ -38,13 +38,13 @@ class WaveShaperProcessor;
// WaveShaperDSPKernel is an AudioDSPKernel and is responsible for non-linear distortion on one channel.
-class WaveShaperDSPKernel : public AudioDSPKernel {
+class WaveShaperDSPKernel FINAL : public AudioDSPKernel {
public:
explicit WaveShaperDSPKernel(WaveShaperProcessor*);
// AudioDSPKernel
- virtual void process(const float* source, float* dest, size_t framesToProcess);
- virtual void reset();
+ virtual void process(const float* source, float* dest, size_t framesToProcess) OVERRIDE;
+ virtual void reset() OVERRIDE;
virtual double tailTime() const OVERRIDE { return 0; }
virtual double latencyTime() const OVERRIDE;
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/WaveShaperNode.cpp b/chromium/third_party/WebKit/Source/modules/webaudio/WaveShaperNode.cpp
index 3e84aaa304b..0474826663e 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/WaveShaperNode.cpp
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/WaveShaperNode.cpp
@@ -72,12 +72,7 @@ void WaveShaperNode::setOversample(const String& type, ExceptionState& exception
} else if (type == "4x") {
waveShaperProcessor()->setOversample(WaveShaperProcessor::OverSample4x);
} else {
- exceptionState.throwDOMException(
- InvalidStateError,
- ExceptionMessages::failedToSet(
- "oversample",
- "WaveShaperNode",
- "invalid oversample '" + type + "': must be 'none', '2x', or '4x'."));
+ ASSERT_NOT_REACHED();
}
}
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/WaveShaperNode.h b/chromium/third_party/WebKit/Source/modules/webaudio/WaveShaperNode.h
index 8855630bb4d..159a39fb012 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/WaveShaperNode.h
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/WaveShaperNode.h
@@ -34,11 +34,11 @@ namespace WebCore {
class ExceptionState;
-class WaveShaperNode : public AudioBasicProcessorNode {
+class WaveShaperNode FINAL : public AudioBasicProcessorNode {
public:
- static PassRefPtr<WaveShaperNode> create(AudioContext* context)
+ static PassRefPtrWillBeRawPtr<WaveShaperNode> create(AudioContext* context)
{
- return adoptRef(new WaveShaperNode(context));
+ return adoptRefWillBeNoop(new WaveShaperNode(context));
}
// setCurve() is called on the main thread.
@@ -48,8 +48,6 @@ public:
void setOversample(const String& , ExceptionState&);
String oversample() const;
- double latency() const { return latencyTime(); }
-
private:
explicit WaveShaperNode(AudioContext*);
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/WaveShaperNode.idl b/chromium/third_party/WebKit/Source/modules/webaudio/WaveShaperNode.idl
index e1ee72b6326..e17ad17ee4a 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/WaveShaperNode.idl
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/WaveShaperNode.idl
@@ -22,9 +22,15 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+enum OverSampleType {
+ "none",
+ "2x",
+ "4x"
+};
+
[
Conditional=WEB_AUDIO
] interface WaveShaperNode : AudioNode {
attribute Float32Array curve;
- [RaisesException=Setter] attribute DOMString oversample;
+ [RaisesException=Setter] attribute OverSampleType oversample;
};
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/WaveShaperProcessor.h b/chromium/third_party/WebKit/Source/modules/webaudio/WaveShaperProcessor.h
index 79c826496ff..e69f0a616d0 100644
--- a/chromium/third_party/WebKit/Source/modules/webaudio/WaveShaperProcessor.h
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/WaveShaperProcessor.h
@@ -36,7 +36,7 @@ namespace WebCore {
// WaveShaperProcessor is an AudioDSPKernelProcessor which uses WaveShaperDSPKernel objects to implement non-linear distortion effects.
-class WaveShaperProcessor : public AudioDSPKernelProcessor {
+class WaveShaperProcessor FINAL : public AudioDSPKernelProcessor {
public:
enum OverSampleType {
OverSampleNone,
@@ -48,9 +48,9 @@ public:
virtual ~WaveShaperProcessor();
- virtual PassOwnPtr<AudioDSPKernel> createKernel();
+ virtual PassOwnPtr<AudioDSPKernel> createKernel() OVERRIDE;
- virtual void process(const AudioBus* source, AudioBus* destination, size_t framesToProcess);
+ virtual void process(const AudioBus* source, AudioBus* destination, size_t framesToProcess) OVERRIDE;
void setCurve(Float32Array*);
Float32Array* curve() { return m_curve.get(); }
@@ -63,9 +63,6 @@ private:
RefPtr<Float32Array> m_curve;
OverSampleType m_oversample;
-
- // This synchronizes process() with setCurve().
- mutable Mutex m_processLock;
};
} // namespace WebCore
diff --git a/chromium/third_party/WebKit/Source/modules/webaudio/WindowWebAudio.idl b/chromium/third_party/WebKit/Source/modules/webaudio/WindowWebAudio.idl
new file mode 100644
index 00000000000..75788e12b42
--- /dev/null
+++ b/chromium/third_party/WebKit/Source/modules/webaudio/WindowWebAudio.idl
@@ -0,0 +1,12 @@
+// Copyright (c) 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+[
+ ImplementedAs=DOMWindowWebAudio,
+] partial interface Window {
+ [Conditional=WEB_AUDIO, RuntimeEnabled=WebAudio] attribute AudioContextConstructor AudioContext;
+ [Conditional=WEB_AUDIO, RuntimeEnabled=WebAudio] attribute OfflineAudioContextConstructor OfflineAudioContext;
+ [Conditional=WEB_AUDIO, RuntimeEnabled=WebAudio] attribute AudioContextConstructor webkitAudioContext;
+ [Conditional=WEB_AUDIO, RuntimeEnabled=WebAudio] attribute OfflineAudioContextConstructor webkitOfflineAudioContext;
+};