Make WebAudio API const-correct.
https://bugs.webkit.org/show_bug.cgi?id=76573

Reviewed by Daniel Bates.

Source/WebCore:

No new tests; no net change in functionality, so covered by existing tests.

The non-const data() accessor was renamed mutableData() to expose const-correctness
bugs during compile time:
* platform/audio/AudioChannel.h:
(WebCore::AudioChannel::mutableData):

The following functions were made const correct:
* platform/audio/AudioArray.h:
(WebCore::AudioArray::copyToRange):
* platform/audio/AudioBus.h:
(WebCore::AudioBus::createBufferFromRange):
(WebCore::AudioBus::createBySampleRateConverting):
(WebCore::AudioBus::createByMixingToMono):
* platform/audio/FFTConvolver.cpp:
(WebCore::FFTConvolver::process):
* platform/audio/FFTConvolver.h:
* platform/audio/FFTFrame.cpp:
(WebCore::FFTFrame::doPaddedFFT):
(WebCore::FFTFrame::doFFT):
* platform/audio/FFTFrame.h:
* platform/audio/ReverbConvolverStage.cpp:
(WebCore::ReverbConvolverStage::ReverbConvolverStage):
(WebCore::ReverbConvolverStage::process):
* platform/audio/ReverbConvolverStage.h:
* platform/audio/ReverbInputBuffer.cpp:
(WebCore::ReverbInputBuffer::write):
* platform/audio/ReverbInputBuffer.h:
* platform/audio/SincResampler.cpp:
(WebCore::SincResampler::process):
* platform/audio/SincResampler.h:
* platform/audio/ZeroPole.cpp:
(WebCore::ZeroPole::process):
* platform/audio/ZeroPole.h:
* platform/audio/AudioBus.cpp:
(WebCore::AudioBus::channelByType):
* platform/audio/AudioBus.h:
(WebCore::AudioBus::gain):
* platform/audio/AudioDSPKernelProcessor.cpp:
(WebCore::AudioDSPKernelProcessor::process):
* platform/audio/AudioDSPKernelProcessor.h:
* platform/audio/AudioProcessor.h:
* platform/audio/DynamicsCompressor.cpp:
(WebCore::DynamicsCompressor::process):
* platform/audio/DynamicsCompressor.h:
* platform/audio/DynamicsCompressorKernel.cpp:
(WebCore::DynamicsCompressorKernel::process):
* platform/audio/DynamicsCompressorKernel.h:
* platform/audio/EqualPowerPanner.cpp:
(WebCore::EqualPowerPanner::pan):
* platform/audio/EqualPowerPanner.h:
* platform/audio/HRTFElevation.h:
(WebCore::HRTFElevation::numberOfAzimuths):
* platform/audio/HRTFPanner.cpp:
(WebCore::HRTFPanner::pan):
* platform/audio/HRTFPanner.h:
* platform/audio/Panner.h:
* platform/audio/Reverb.cpp:
(WebCore::Reverb::process):
* platform/audio/Reverb.h:
* platform/audio/ReverbConvolver.cpp:
(WebCore::ReverbConvolver::process):
* platform/audio/ReverbConvolver.h:
* platform/audio/ffmpeg/FFTFrameFFMPEG.cpp:
(WebCore::FFTFrame::doFFT):
* platform/audio/mkl/FFTFrameMKL.cpp:
(WebCore::FFTFrame::doFFT):

The following functions were modified to use the renamed mutableData() accessor:
* platform/audio/AudioBus.cpp:
(WebCore::AudioBus::processWithGainFromMonoStereo):
(WebCore::AudioBus::copyWithSampleAccurateGainValuesFrom):
* platform/audio/AudioChannel.cpp:
(WebCore::AudioChannel::scale):
(WebCore::AudioChannel::copyFrom):
(WebCore::AudioChannel::copyFromRange):
(WebCore::AudioChannel::sumFrom):
* platform/audio/AudioDSPKernelProcessor.cpp:
(WebCore::AudioDSPKernelProcessor::process):
* platform/audio/AudioResampler.cpp:
(WebCore::AudioResampler::process):
* platform/audio/DynamicsCompressor.cpp:
(WebCore::DynamicsCompressor::process):
* platform/audio/EqualPowerPanner.cpp:
(WebCore::EqualPowerPanner::pan):
* platform/audio/HRTFKernel.cpp:
(WebCore::extractAverageGroupDelay):
(WebCore::HRTFKernel::HRTFKernel):
(WebCore::HRTFKernel::createImpulseResponse):
* platform/audio/HRTFPanner.cpp:
(WebCore::HRTFPanner::pan):
* platform/audio/MultiChannelResampler.cpp:
(WebCore::MultiChannelResampler::process):
* platform/audio/Reverb.cpp:
(WebCore::Reverb::process):
* platform/audio/ReverbConvolver.cpp:
(WebCore::ReverbConvolver::ReverbConvolver):
(WebCore::ReverbConvolver::process):
* platform/audio/mac/AudioFileReaderMac.cpp:
(WebCore::AudioFileReader::createBus):
* platform/audio/mac/FFTFrameMac.cpp:
(WebCore::FFTFrame::doFFT):
* webaudio/AudioBufferSourceNode.cpp:
(WebCore::AudioBufferSourceNode::process):
(WebCore::AudioBufferSourceNode::renderFromBuffer):
* webaudio/BiquadProcessor.cpp:
(WebCore::BiquadProcessor::process):
* webaudio/JavaScriptAudioNode.cpp:
(WebCore::JavaScriptAudioNode::process):
* webaudio/OfflineAudioDestinationNode.cpp:
(WebCore::OfflineAudioDestinationNode::render):
* webaudio/RealtimeAnalyser.cpp:
(WebCore::RealtimeAnalyser::writeInput):
* webaudio/WaveShaperProcessor.cpp:
(WebCore::WaveShaperProcessor::process):

Source/WebKit/chromium:

The following functions were modified to use the renamed mutableData() accessor:
* src/AudioDestinationChromium.cpp:
(WebCore::AudioDestinationChromium::FIFO::fillBuffer):
(WebCore::AudioDestinationChromium::FIFO::consume):
* src/WebAudioData.cpp:
(WebCore::WebAudioBus::channelData):
* src/WebMediaPlayerClientImpl.cpp:
(WebKit::WebMediaPlayerClientImpl::AudioSourceProviderImpl::provideInput):

git-svn-id: http://svn.webkit.org/repository/webkit/trunk@105431 268f45cc-cd09-0410-ab3c-d52691b4dbfc
diff --git a/Source/WebCore/platform/audio/AudioArray.h b/Source/WebCore/platform/audio/AudioArray.h
index a14c950..7a2251b 100644
--- a/Source/WebCore/platform/audio/AudioArray.h
+++ b/Source/WebCore/platform/audio/AudioArray.h
@@ -127,7 +127,7 @@
         memset(this->data() + start, 0, sizeof(T) * (end - start));
     }
 
-    void copyToRange(T* sourceData, unsigned start, unsigned end)
+    void copyToRange(const T* sourceData, unsigned start, unsigned end)
     {
         bool isSafe = (start <= end) && (end <= this->size());
         ASSERT(isSafe);
diff --git a/Source/WebCore/platform/audio/AudioBus.cpp b/Source/WebCore/platform/audio/AudioBus.cpp
index 30489ba..b8107b9 100644
--- a/Source/WebCore/platform/audio/AudioBus.cpp
+++ b/Source/WebCore/platform/audio/AudioBus.cpp
@@ -130,6 +130,11 @@
     return 0;
 }
 
+const AudioChannel* AudioBus::channelByType(unsigned type) const
+{
+    return const_cast<AudioBus*>(this)->channelByType(type);
+}
+
 // Returns true if the channel count and frame-size match.
 bool AudioBus::topologyMatches(const AudioBus& bus) const
 {
@@ -143,7 +148,7 @@
     return true;
 }
 
-PassOwnPtr<AudioBus> AudioBus::createBufferFromRange(AudioBus* sourceBuffer, unsigned startFrame, unsigned endFrame)
+PassOwnPtr<AudioBus> AudioBus::createBufferFromRange(const AudioBus* sourceBuffer, unsigned startFrame, unsigned endFrame)
 {
     size_t numberOfSourceFrames = sourceBuffer->length();
     unsigned numberOfChannels = sourceBuffer->numberOfChannels();
@@ -345,8 +350,8 @@
     const float* sourceL = sourceBusSafe.channelByType(ChannelLeft)->data();
     const float* sourceR = numberOfSourceChannels > 1 ? sourceBusSafe.channelByType(ChannelRight)->data() : 0;
 
-    float* destinationL = channelByType(ChannelLeft)->data();
-    float* destinationR = numberOfDestinationChannels > 1 ? channelByType(ChannelRight)->data() : 0;
+    float* destinationL = channelByType(ChannelLeft)->mutableData();
+    float* destinationR = numberOfDestinationChannels > 1 ? channelByType(ChannelRight)->mutableData() : 0;
 
     const float DezipperRate = 0.005f;
     int framesToProcess = length();
@@ -437,7 +442,7 @@
     for (unsigned channelIndex = 0; channelIndex < numberOfChannels(); ++channelIndex) {
         if (sourceBus.numberOfChannels() == numberOfChannels())
             source = sourceBus.channel(channelIndex)->data();
-        float* destination = channel(channelIndex)->data();
+        float* destination = channel(channelIndex)->mutableData();
         vmul(source, 1, gainValues, 1, destination, 1, numberOfGainValues);
     }
 }
@@ -452,7 +457,7 @@
     processWithGainFrom(sourceBus, lastMixGain, targetGain, true);
 }
 
-PassOwnPtr<AudioBus> AudioBus::createBySampleRateConverting(AudioBus* sourceBus, bool mixToMono, double newSampleRate)
+PassOwnPtr<AudioBus> AudioBus::createBySampleRateConverting(const AudioBus* sourceBus, bool mixToMono, double newSampleRate)
 {
     // sourceBus's sample-rate must be known.
     ASSERT(sourceBus && sourceBus->sampleRate());
@@ -476,7 +481,7 @@
     }
     
     // First, mix to mono (if necessary) then sample-rate convert.
-    AudioBus* resamplerSourceBus;
+    const AudioBus* resamplerSourceBus;
     OwnPtr<AudioBus> mixedMonoBus;
     if (mixToMono) {
         mixedMonoBus = AudioBus::createByMixingToMono(sourceBus);
@@ -497,8 +502,8 @@
 
     // Sample-rate convert each channel.
     for (unsigned i = 0; i < numberOfDestinationChannels; ++i) {
-        float* source = resamplerSourceBus->channel(i)->data();
-        float* destination = destinationBus->channel(i)->data();
+        const float* source = resamplerSourceBus->channel(i)->data();
+        float* destination = destinationBus->channel(i)->mutableData();
 
         SincResampler resampler(sampleRateRatio);
         resampler.process(source, destination, sourceLength);
@@ -508,7 +513,7 @@
     return destinationBus.release();
 }
 
-PassOwnPtr<AudioBus> AudioBus::createByMixingToMono(AudioBus* sourceBus)
+PassOwnPtr<AudioBus> AudioBus::createByMixingToMono(const AudioBus* sourceBus)
 {
     switch (sourceBus->numberOfChannels()) {
     case 1:
@@ -519,9 +524,9 @@
             unsigned n = sourceBus->length();
             OwnPtr<AudioBus> destinationBus(adoptPtr(new AudioBus(1, n)));
 
-            float* sourceL = sourceBus->channel(0)->data();
-            float* sourceR = sourceBus->channel(1)->data();
-            float* destination = destinationBus->channel(0)->data();
+            const float* sourceL = sourceBus->channel(0)->data();
+            const float* sourceR = sourceBus->channel(1)->data();
+            float* destination = destinationBus->channel(0)->mutableData();
         
             // Do the mono mixdown.
             for (unsigned i = 0; i < n; ++i)
diff --git a/Source/WebCore/platform/audio/AudioBus.h b/Source/WebCore/platform/audio/AudioBus.h
index 6370c8b..b3da0f4 100644
--- a/Source/WebCore/platform/audio/AudioBus.h
+++ b/Source/WebCore/platform/audio/AudioBus.h
@@ -71,6 +71,7 @@
     AudioChannel* channel(unsigned channel) { return m_channels[channel].get(); }
     const AudioChannel* channel(unsigned channel) const { return const_cast<AudioBus*>(this)->m_channels[channel].get(); }
     AudioChannel* channelByType(unsigned type);
+    const AudioChannel* channelByType(unsigned type) const;
 
     // Number of sample-frames
     size_t length() const { return m_length; }
@@ -87,24 +88,24 @@
 
     // Creates a new buffer from a range in the source buffer.
     // 0 may be returned if the range does not fit in the sourceBuffer
-    static PassOwnPtr<AudioBus> createBufferFromRange(AudioBus* sourceBuffer, unsigned startFrame, unsigned endFrame);
+    static PassOwnPtr<AudioBus> createBufferFromRange(const AudioBus* sourceBuffer, unsigned startFrame, unsigned endFrame);
 
 
     // Creates a new AudioBus by sample-rate converting sourceBus to the newSampleRate.
     // setSampleRate() must have been previously called on sourceBus.
     // Note: sample-rate conversion is already handled in the file-reading code for the mac port, so we don't need this.
-    static PassOwnPtr<AudioBus> createBySampleRateConverting(AudioBus* sourceBus, bool mixToMono, double newSampleRate);
+    static PassOwnPtr<AudioBus> createBySampleRateConverting(const AudioBus* sourceBus, bool mixToMono, double newSampleRate);
 
     // Creates a new AudioBus by mixing all the channels down to mono.
     // If sourceBus is already mono, then the returned AudioBus will simply be a copy.
-    static PassOwnPtr<AudioBus> createByMixingToMono(AudioBus* sourceBus);
+    static PassOwnPtr<AudioBus> createByMixingToMono(const AudioBus* sourceBus);
 
     // Scales all samples by the same amount.
     void scale(float scale);
 
     // Master gain for this bus - used with sumWithGainFrom() below
     void setGain(float gain) { m_busGain = gain; }
-    float gain() { return m_busGain; }
+    float gain() const { return m_busGain; }
 
     void reset() { m_isFirstTime = true; } // for de-zippering
 
diff --git a/Source/WebCore/platform/audio/AudioChannel.cpp b/Source/WebCore/platform/audio/AudioChannel.cpp
index ebbf3bb..3c748e6 100644
--- a/Source/WebCore/platform/audio/AudioChannel.cpp
+++ b/Source/WebCore/platform/audio/AudioChannel.cpp
@@ -43,7 +43,7 @@
 
 void AudioChannel::scale(float scale)
 {
-    vsmul(data(), 1, &scale, data(), 1, length());
+    vsmul(data(), 1, &scale, mutableData(), 1, length());
 }
 
 void AudioChannel::copyFrom(const AudioChannel* sourceChannel)
@@ -53,7 +53,7 @@
     if (!isSafe)
         return;
 
-    memcpy(data(), sourceChannel->data(), sizeof(float) * length());
+    memcpy(mutableData(), sourceChannel->data(), sizeof(float) * length());
 }
 
 void AudioChannel::copyFromRange(const AudioChannel* sourceChannel, unsigned startFrame, unsigned endFrame)
@@ -72,7 +72,7 @@
         return;
 
     const float* source = sourceChannel->data();
-    float* destination = data();
+    float* destination = mutableData();
     memcpy(destination, source + startFrame, sizeof(float) * rangeLength);
 }
 
@@ -83,7 +83,7 @@
     if (!isSafe)
         return;
 
-    vadd(data(), 1, sourceChannel->data(), 1, data(), 1, length());
+    vadd(data(), 1, sourceChannel->data(), 1, mutableData(), 1, length());
 }
 
 float AudioChannel::maxAbsValue() const
diff --git a/Source/WebCore/platform/audio/AudioChannel.h b/Source/WebCore/platform/audio/AudioChannel.h
index 8803e75..24de3f9 100644
--- a/Source/WebCore/platform/audio/AudioChannel.h
+++ b/Source/WebCore/platform/audio/AudioChannel.h
@@ -73,7 +73,7 @@
     size_t length() const { return m_length; }
 
     // Direct access to PCM sample data
-    float* data() { return m_rawPointer ? m_rawPointer : m_memBuffer->data(); }
+    float* mutableData() { return m_rawPointer ? m_rawPointer : m_memBuffer->data(); }
     const float* data() const { return m_rawPointer ? m_rawPointer : m_memBuffer->data(); }
 
     // Zeroes out all sample values in buffer.
diff --git a/Source/WebCore/platform/audio/AudioDSPKernelProcessor.cpp b/Source/WebCore/platform/audio/AudioDSPKernelProcessor.cpp
index cf4d2d3..5f9139f 100644
--- a/Source/WebCore/platform/audio/AudioDSPKernelProcessor.cpp
+++ b/Source/WebCore/platform/audio/AudioDSPKernelProcessor.cpp
@@ -71,7 +71,7 @@
     m_initialized = false;
 }
 
-void AudioDSPKernelProcessor::process(AudioBus* source, AudioBus* destination, size_t framesToProcess)
+void AudioDSPKernelProcessor::process(const AudioBus* source, AudioBus* destination, size_t framesToProcess)
 {
     ASSERT(source && destination);
     if (!source || !destination)
@@ -88,7 +88,7 @@
         return;
         
     for (unsigned i = 0; i < m_kernels.size(); ++i)
-        m_kernels[i]->process(source->channel(i)->data(), destination->channel(i)->data(), framesToProcess);
+        m_kernels[i]->process(source->channel(i)->data(), destination->channel(i)->mutableData(), framesToProcess);
 }
 
 // Resets filter state
diff --git a/Source/WebCore/platform/audio/AudioDSPKernelProcessor.h b/Source/WebCore/platform/audio/AudioDSPKernelProcessor.h
index 40b5ab8..7f8f81d 100644
--- a/Source/WebCore/platform/audio/AudioDSPKernelProcessor.h
+++ b/Source/WebCore/platform/audio/AudioDSPKernelProcessor.h
@@ -59,7 +59,7 @@
     // AudioProcessor methods
     virtual void initialize();
     virtual void uninitialize();
-    virtual void process(AudioBus* source, AudioBus* destination, size_t framesToProcess);
+    virtual void process(const AudioBus* source, AudioBus* destination, size_t framesToProcess);
     virtual void reset();
     virtual void setNumberOfChannels(unsigned numberOfChannels);
 
diff --git a/Source/WebCore/platform/audio/AudioProcessor.h b/Source/WebCore/platform/audio/AudioProcessor.h
index 2d7b60a..469f833 100644
--- a/Source/WebCore/platform/audio/AudioProcessor.h
+++ b/Source/WebCore/platform/audio/AudioProcessor.h
@@ -54,7 +54,7 @@
     virtual void uninitialize() = 0;
 
     // Processes the source to destination bus.  The number of channels must match in source and destination.
-    virtual void process(AudioBus* source, AudioBus* destination, size_t framesToProcess) = 0;
+    virtual void process(const AudioBus* source, AudioBus* destination, size_t framesToProcess) = 0;
 
     // Resets filter state
     virtual void reset() = 0;
diff --git a/Source/WebCore/platform/audio/AudioResampler.cpp b/Source/WebCore/platform/audio/AudioResampler.cpp
index ba5b58e..1a9f81e 100644
--- a/Source/WebCore/platform/audio/AudioResampler.cpp
+++ b/Source/WebCore/platform/audio/AudioResampler.cpp
@@ -103,7 +103,7 @@
     // Now that we have the source data, resample each channel into the destination bus.
     // FIXME: optimize for the common stereo case where it's faster to process both left/right channels in the same inner loop.
     for (unsigned i = 0; i < numberOfChannels; ++i) {
-        float* destination = destinationBus->channel(i)->data();
+        float* destination = destinationBus->channel(i)->mutableData();
         m_kernels[i]->process(destination, framesToProcess);
     }
 }
diff --git a/Source/WebCore/platform/audio/DynamicsCompressor.cpp b/Source/WebCore/platform/audio/DynamicsCompressor.cpp
index c4795f4..a552057 100644
--- a/Source/WebCore/platform/audio/DynamicsCompressor.cpp
+++ b/Source/WebCore/platform/audio/DynamicsCompressor.cpp
@@ -115,10 +115,10 @@
     setEmphasisStageParameters(3, gain, anchorFreq / (filterStageRatio * filterStageRatio * filterStageRatio));
 }
 
-void DynamicsCompressor::process(AudioBus* sourceBus, AudioBus* destinationBus, unsigned framesToProcess)
+void DynamicsCompressor::process(const AudioBus* sourceBus, AudioBus* destinationBus, unsigned framesToProcess)
 {
-    float* sourceL = sourceBus->channel(0)->data();
-    float* sourceR;
+    const float* sourceL = sourceBus->channel(0)->data();
+    const float* sourceR;
 
     if (sourceBus->numberOfChannels() > 1)
         sourceR = sourceBus->channel(1)->data();
@@ -127,8 +127,8 @@
 
     ASSERT(destinationBus->numberOfChannels() == 2);
 
-    float* destinationL = destinationBus->channel(0)->data();
-    float* destinationR = destinationBus->channel(1)->data();
+    float* destinationL = destinationBus->channel(0)->mutableData();
+    float* destinationR = destinationBus->channel(1)->mutableData();
 
     float filterStageGain = parameterValue(ParamFilterStageGain);
     float filterStageRatio = parameterValue(ParamFilterStageRatio);
diff --git a/Source/WebCore/platform/audio/DynamicsCompressor.h b/Source/WebCore/platform/audio/DynamicsCompressor.h
index 2152951..e0115ee 100644
--- a/Source/WebCore/platform/audio/DynamicsCompressor.h
+++ b/Source/WebCore/platform/audio/DynamicsCompressor.h
@@ -64,7 +64,7 @@
 
     DynamicsCompressor(bool isStereo, float sampleRate);
 
-    void process(AudioBus* sourceBus, AudioBus* destinationBus, unsigned framesToProcess);
+    void process(const AudioBus* sourceBus, AudioBus* destinationBus, unsigned framesToProcess);
     void reset();
 
     float parameterValue(unsigned parameterID);
diff --git a/Source/WebCore/platform/audio/DynamicsCompressorKernel.cpp b/Source/WebCore/platform/audio/DynamicsCompressorKernel.cpp
index e9f496a..a7a4c12 100644
--- a/Source/WebCore/platform/audio/DynamicsCompressorKernel.cpp
+++ b/Source/WebCore/platform/audio/DynamicsCompressorKernel.cpp
@@ -82,9 +82,9 @@
     }
 }
 
-void DynamicsCompressorKernel::process(float* sourceL,
+void DynamicsCompressorKernel::process(const float* sourceL,
                                        float* destinationL,
-                                       float* sourceR, /* stereo-linked */
+                                       const float* sourceR, /* stereo-linked */
                                        float* destinationR,
                                        unsigned framesToProcess,
 
diff --git a/Source/WebCore/platform/audio/DynamicsCompressorKernel.h b/Source/WebCore/platform/audio/DynamicsCompressorKernel.h
index 8e5f709..cf319b3 100644
--- a/Source/WebCore/platform/audio/DynamicsCompressorKernel.h
+++ b/Source/WebCore/platform/audio/DynamicsCompressorKernel.h
@@ -38,9 +38,9 @@
     DynamicsCompressorKernel(float sampleRate);
 
     // Performs stereo-linked compression.
-    void process(float *sourceL,
+    void process(const float *sourceL,
                  float *destinationL,
-                 float *sourceR,
+                 const float *sourceR,
                  float *destinationR,
                  unsigned framesToProcess,
 
diff --git a/Source/WebCore/platform/audio/EqualPowerPanner.cpp b/Source/WebCore/platform/audio/EqualPowerPanner.cpp
index 0da7622..a2d2dff 100644
--- a/Source/WebCore/platform/audio/EqualPowerPanner.cpp
+++ b/Source/WebCore/platform/audio/EqualPowerPanner.cpp
@@ -49,7 +49,7 @@
     m_smoothingConstant = AudioUtilities::discreteTimeConstantForSampleRate(SmoothingTimeConstant, sampleRate);
 }
 
-void EqualPowerPanner::pan(double azimuth, double /*elevation*/, AudioBus* inputBus, AudioBus* outputBus, size_t framesToProcess)
+void EqualPowerPanner::pan(double azimuth, double /*elevation*/, const AudioBus* inputBus, AudioBus* outputBus, size_t framesToProcess)
 {
     // FIXME: implement stereo sources
     bool isInputSafe = inputBus && inputBus->numberOfChannels() == 1 && framesToProcess <= inputBus->length();
@@ -62,10 +62,10 @@
     if (!isOutputSafe)
         return;
 
-    AudioChannel* channel = inputBus->channel(0);
-    float* sourceP = channel->data();                               
-    float* destinationL = outputBus->channelByType(AudioBus::ChannelLeft)->data();
-    float* destinationR = outputBus->channelByType(AudioBus::ChannelRight)->data();
+    const AudioChannel* channel = inputBus->channel(0);
+    const float* sourceP = channel->data();                               
+    float* destinationL = outputBus->channelByType(AudioBus::ChannelLeft)->mutableData();
+    float* destinationR = outputBus->channelByType(AudioBus::ChannelRight)->mutableData();
 
     if (!sourceP || !destinationL || !destinationR)
         return;
diff --git a/Source/WebCore/platform/audio/EqualPowerPanner.h b/Source/WebCore/platform/audio/EqualPowerPanner.h
index 4f6001d..016cd4a 100644
--- a/Source/WebCore/platform/audio/EqualPowerPanner.h
+++ b/Source/WebCore/platform/audio/EqualPowerPanner.h
@@ -35,7 +35,7 @@
 public:
     EqualPowerPanner(float sampleRate);
 
-    virtual void pan(double azimuth, double elevation, AudioBus* inputBus, AudioBus* outputBuf, size_t framesToProcess);
+    virtual void pan(double azimuth, double elevation, const AudioBus* inputBus, AudioBus* outputBuf, size_t framesToProcess);
 
     virtual void reset() { m_isFirstRender = true; }
 
diff --git a/Source/WebCore/platform/audio/FFTConvolver.cpp b/Source/WebCore/platform/audio/FFTConvolver.cpp
index 9093433..2321de0 100644
--- a/Source/WebCore/platform/audio/FFTConvolver.cpp
+++ b/Source/WebCore/platform/audio/FFTConvolver.cpp
@@ -47,7 +47,7 @@
 {
 }
 
-void FFTConvolver::process(FFTFrame* fftKernel, float* sourceP, float* destP, size_t framesToProcess)
+void FFTConvolver::process(FFTFrame* fftKernel, const float* sourceP, float* destP, size_t framesToProcess)
 {
     // FIXME: make so framesToProcess is not required to fit evenly into fftSize/2
 
diff --git a/Source/WebCore/platform/audio/FFTConvolver.h b/Source/WebCore/platform/audio/FFTConvolver.h
index c1b5002..375bf2c 100644
--- a/Source/WebCore/platform/audio/FFTConvolver.h
+++ b/Source/WebCore/platform/audio/FFTConvolver.h
@@ -46,7 +46,7 @@
     // The input to output latency is equal to fftSize / 2
     //
     // Processing in-place is allowed...
-    void process(FFTFrame* fftKernel, float* sourceP, float* destP, size_t framesToProcess);
+    void process(FFTFrame* fftKernel, const float* sourceP, float* destP, size_t framesToProcess);
 
     void reset();
 
diff --git a/Source/WebCore/platform/audio/FFTFrame.cpp b/Source/WebCore/platform/audio/FFTFrame.cpp
index 4f032a2..a1a0a50 100644
--- a/Source/WebCore/platform/audio/FFTFrame.cpp
+++ b/Source/WebCore/platform/audio/FFTFrame.cpp
@@ -43,7 +43,7 @@
 
 namespace WebCore {
 
-void FFTFrame::doPaddedFFT(float* data, size_t dataSize)
+void FFTFrame::doPaddedFFT(const float* data, size_t dataSize)
 {
     // Zero-pad the impulse response
     AudioFloatArray paddedResponse(fftSize()); // zero-initialized
diff --git a/Source/WebCore/platform/audio/FFTFrame.h b/Source/WebCore/platform/audio/FFTFrame.h
index b25d279..042633c 100644
--- a/Source/WebCore/platform/audio/FFTFrame.h
+++ b/Source/WebCore/platform/audio/FFTFrame.h
@@ -73,7 +73,7 @@
 
     static void initialize();
     static void cleanup();
-    void doFFT(float* data);
+    void doFFT(const float* data);
     void doInverseFFT(float* data);
     void multiply(const FFTFrame& frame); // multiplies ourself with frame : effectively operator*=()
 
@@ -88,7 +88,7 @@
     // Interpolates from frame1 -> frame2 as x goes from 0.0 -> 1.0
     static PassOwnPtr<FFTFrame> createInterpolatedFrame(const FFTFrame& frame1, const FFTFrame& frame2, double x);
 
-    void doPaddedFFT(float* data, size_t dataSize); // zero-padding with dataSize <= fftSize
+    void doPaddedFFT(const float* data, size_t dataSize); // zero-padding with dataSize <= fftSize
     double extractAverageGroupDelay();
     void addConstantGroupDelay(double sampleFrameDelay);
 
diff --git a/Source/WebCore/platform/audio/HRTFElevation.h b/Source/WebCore/platform/audio/HRTFElevation.h
index ccff097..446e66d 100644
--- a/Source/WebCore/platform/audio/HRTFElevation.h
+++ b/Source/WebCore/platform/audio/HRTFElevation.h
@@ -60,7 +60,7 @@
     HRTFKernelList* kernelListR() { return m_kernelListR.get(); }
 
     double elevationAngle() const { return m_elevationAngle; }
-    unsigned numberOfAzimuths() { return NumberOfTotalAzimuths; }
+    unsigned numberOfAzimuths() const { return NumberOfTotalAzimuths; }
     float sampleRate() const { return m_sampleRate; }
     
     // Returns the left and right kernels for the given azimuth index.
diff --git a/Source/WebCore/platform/audio/HRTFKernel.cpp b/Source/WebCore/platform/audio/HRTFKernel.cpp
index c44de93..391f904 100644
--- a/Source/WebCore/platform/audio/HRTFKernel.cpp
+++ b/Source/WebCore/platform/audio/HRTFKernel.cpp
@@ -50,7 +50,7 @@
 {
     ASSERT(channel);
         
-    float* impulseP = channel->data();
+    float* impulseP = channel->mutableData();
     
     bool isSizeGood = channel->length() >= analysisFFTSize;
     ASSERT(isSizeGood);
@@ -78,7 +78,7 @@
     // Determine the leading delay (average group delay) for the response.
     m_frameDelay = extractAverageGroupDelay(channel, fftSize / 2);
 
-    float* impulseResponse = channel->data();
+    float* impulseResponse = channel->mutableData();
     size_t responseLength = channel->length();
 
     if (bassBoost) {
@@ -114,7 +114,7 @@
 
     // Add leading delay back in.
     fftFrame.addConstantGroupDelay(m_frameDelay);
-    fftFrame.doInverseFFT(channel->data());
+    fftFrame.doInverseFFT(channel->mutableData());
 
     return channel.release();
 }
diff --git a/Source/WebCore/platform/audio/HRTFPanner.cpp b/Source/WebCore/platform/audio/HRTFPanner.cpp
index 978371c..f09961f 100644
--- a/Source/WebCore/platform/audio/HRTFPanner.cpp
+++ b/Source/WebCore/platform/audio/HRTFPanner.cpp
@@ -111,7 +111,7 @@
     return desiredAzimuthIndex;
 }
 
-void HRTFPanner::pan(double desiredAzimuth, double elevation, AudioBus* inputBus, AudioBus* outputBus, size_t framesToProcess)
+void HRTFPanner::pan(double desiredAzimuth, double elevation, const AudioBus* inputBus, AudioBus* outputBus, size_t framesToProcess)
 {
     unsigned numInputChannels = inputBus ? inputBus->numberOfChannels() : 0;
 
@@ -147,14 +147,14 @@
 
     // Normally, we'll just be dealing with mono sources.
     // If we have a stereo input, implement stereo panning with left source processed by left HRTF, and right source by right HRTF.
-    AudioChannel* inputChannelL = inputBus->channelByType(AudioBus::ChannelLeft);
-    AudioChannel* inputChannelR = numInputChannels > 1 ? inputBus->channelByType(AudioBus::ChannelRight) : 0;
+    const AudioChannel* inputChannelL = inputBus->channelByType(AudioBus::ChannelLeft);
+    const AudioChannel* inputChannelR = numInputChannels > 1 ? inputBus->channelByType(AudioBus::ChannelRight) : 0;
 
     // Get source and destination pointers.
-    float* sourceL = inputChannelL->data();
-    float* sourceR = numInputChannels > 1 ? inputChannelR->data() : sourceL;
-    float* destinationL = outputBus->channelByType(AudioBus::ChannelLeft)->data();
-    float* destinationR = outputBus->channelByType(AudioBus::ChannelRight)->data();
+    const float* sourceL = inputChannelL->data();
+    const float* sourceR = numInputChannels > 1 ? inputChannelR->data() : sourceL;
+    float* destinationL = outputBus->channelByType(AudioBus::ChannelLeft)->mutableData();
+    float* destinationR = outputBus->channelByType(AudioBus::ChannelRight)->mutableData();
 
     double azimuthBlend;
     int desiredAzimuthIndex = calculateDesiredAzimuthIndexAndBlend(azimuth, azimuthBlend);
@@ -207,8 +207,8 @@
             
         // Calculate the source and destination pointers for the current segment.
         unsigned offset = segment * framesPerSegment;
-        float* segmentSourceL = sourceL + offset;
-        float* segmentSourceR = sourceR + offset;
+        const float* segmentSourceL = sourceL + offset;
+        const float* segmentSourceR = sourceR + offset;
         float* segmentDestinationL = destinationL + offset;
         float* segmentDestinationR = destinationR + offset;
 
diff --git a/Source/WebCore/platform/audio/HRTFPanner.h b/Source/WebCore/platform/audio/HRTFPanner.h
index e771ba2..ad6f79a 100644
--- a/Source/WebCore/platform/audio/HRTFPanner.h
+++ b/Source/WebCore/platform/audio/HRTFPanner.h
@@ -37,7 +37,7 @@
     virtual ~HRTFPanner();
 
     // Panner
-    virtual void pan(double azimuth, double elevation, AudioBus* inputBus, AudioBus* outputBus, size_t framesToProcess);
+    virtual void pan(double azimuth, double elevation, const AudioBus* inputBus, AudioBus* outputBus, size_t framesToProcess);
     virtual void reset();
 
     size_t fftSize() { return fftSizeForSampleRate(m_sampleRate); }
diff --git a/Source/WebCore/platform/audio/MultiChannelResampler.cpp b/Source/WebCore/platform/audio/MultiChannelResampler.cpp
index db51e90..f6c07e7 100644
--- a/Source/WebCore/platform/audio/MultiChannelResampler.cpp
+++ b/Source/WebCore/platform/audio/MultiChannelResampler.cpp
@@ -77,7 +77,7 @@
         // Copy the channel data from what we received from m_multiChannelProvider.
         ASSERT(m_currentChannel <= m_numberOfChannels);
         if (m_currentChannel < m_numberOfChannels) {
-            memcpy(bus->channel(0)->data(), m_multiChannelBus->channel(m_currentChannel)->data(), sizeof(float) * framesToProcess);
+            memcpy(bus->channel(0)->mutableData(), m_multiChannelBus->channel(m_currentChannel)->data(), sizeof(float) * framesToProcess);
             ++m_currentChannel;
         }
     }
@@ -113,7 +113,7 @@
         // However, if it calls provideInput() for the first channel, then it will call it for the remaining
         // channels, since they all buffer in the same way and are processing the same number of frames.
         m_kernels[channelIndex]->process(&channelProvider,
-                                         destination->channel(channelIndex)->data(),
+                                         destination->channel(channelIndex)->mutableData(),
                                          framesToProcess);
     }
 }
diff --git a/Source/WebCore/platform/audio/Panner.h b/Source/WebCore/platform/audio/Panner.h
index 4b72832..d8b8dd0 100644
--- a/Source/WebCore/platform/audio/Panner.h
+++ b/Source/WebCore/platform/audio/Panner.h
@@ -53,7 +53,7 @@
 
     PanningModel panningModel() const { return m_panningModel; }
 
-    virtual void pan(double azimuth, double elevation, AudioBus* inputBus, AudioBus* outputBus, size_t framesToProcess) = 0;
+    virtual void pan(double azimuth, double elevation, const AudioBus* inputBus, AudioBus* outputBus, size_t framesToProcess) = 0;
 
     virtual void reset() = 0;
 
diff --git a/Source/WebCore/platform/audio/Reverb.cpp b/Source/WebCore/platform/audio/Reverb.cpp
index 341626f..122e21b 100644
--- a/Source/WebCore/platform/audio/Reverb.cpp
+++ b/Source/WebCore/platform/audio/Reverb.cpp
@@ -130,7 +130,7 @@
         m_tempBuffer = adoptPtr(new AudioBus(2, MaxFrameSize));
 }
 
-void Reverb::process(AudioBus* sourceBus, AudioBus* destinationBus, size_t framesToProcess)
+void Reverb::process(const AudioBus* sourceBus, AudioBus* destinationBus, size_t framesToProcess)
 {
     // Do a fairly comprehensive sanity check.
     // If these conditions are satisfied, all of the source and destination pointers will be valid for the various matrixing cases.
@@ -148,7 +148,7 @@
     }
 
     AudioChannel* destinationChannelL = destinationBus->channel(0);
-    AudioChannel* sourceChannelL = sourceBus->channel(0);
+    const AudioChannel* sourceChannelL = sourceBus->channel(0);
 
     // Handle input -> output matrixing...
     size_t numInputChannels = sourceBus->numberOfChannels();
@@ -157,7 +157,7 @@
 
     if (numInputChannels == 2 && numReverbChannels == 2 && numOutputChannels == 2) {
         // 2 -> 2 -> 2
-        AudioChannel* sourceChannelR = sourceBus->channel(1);
+        const AudioChannel* sourceChannelR = sourceBus->channel(1);
         AudioChannel* destinationChannelR = destinationBus->channel(1);
         m_convolvers[0]->process(sourceChannelL, destinationChannelL, framesToProcess);
         m_convolvers[1]->process(sourceChannelR, destinationChannelR, framesToProcess);
@@ -177,13 +177,13 @@
         ASSERT(isCopySafe);
         if (!isCopySafe)
             return;
-        memcpy(destinationChannelR->data(), destinationChannelL->data(), sizeof(float) * framesToProcess);
+        memcpy(destinationChannelR->mutableData(), destinationChannelL->data(), sizeof(float) * framesToProcess);
     } else if (numInputChannels == 1 && numReverbChannels == 1 && numOutputChannels == 1) {
         // 1 -> 1 -> 1
         m_convolvers[0]->process(sourceChannelL, destinationChannelL, framesToProcess);
     } else if (numInputChannels == 2 && numReverbChannels == 4 && numOutputChannels == 2) {
         // 2 -> 4 -> 2 ("True" stereo)
-        AudioChannel* sourceChannelR = sourceBus->channel(1);
+        const AudioChannel* sourceChannelR = sourceBus->channel(1);
         AudioChannel* destinationChannelR = destinationBus->channel(1);
 
         AudioChannel* tempChannelL = m_tempBuffer->channel(0);
diff --git a/Source/WebCore/platform/audio/Reverb.h b/Source/WebCore/platform/audio/Reverb.h
index f162e0b..779e7bb 100644
--- a/Source/WebCore/platform/audio/Reverb.h
+++ b/Source/WebCore/platform/audio/Reverb.h
@@ -45,7 +45,7 @@
     // renderSliceSize is a rendering hint, so the FFTs can be optimized to not all occur at the same time (very bad when rendering on a real-time thread).
     Reverb(AudioBus* impulseResponseBuffer, size_t renderSliceSize, size_t maxFFTSize, size_t numberOfChannels, bool useBackgroundThreads, bool normalize);
 
-    void process(AudioBus* sourceBus, AudioBus* destinationBus, size_t framesToProcess);
+    void process(const AudioBus* sourceBus, AudioBus* destinationBus, size_t framesToProcess);
     void reset();
 
     unsigned impulseResponseLength() const { return m_impulseResponseLength; }
diff --git a/Source/WebCore/platform/audio/ReverbConvolver.cpp b/Source/WebCore/platform/audio/ReverbConvolver.cpp
index bbb5508..c611414 100644
--- a/Source/WebCore/platform/audio/ReverbConvolver.cpp
+++ b/Source/WebCore/platform/audio/ReverbConvolver.cpp
@@ -82,7 +82,7 @@
     // Otherwise, assume we're being run from a command-line tool.
     bool hasRealtimeConstraint = useBackgroundThreads;
 
-    float* response = impulseResponse->data();
+    const float* response = impulseResponse->data();
     size_t totalResponseLength = impulseResponse->length();
 
     // Because we're not using direct-convolution in the leading portion, the reverb has an overall latency of half the first-stage FFT size
@@ -175,15 +175,15 @@
     }
 }
 
-void ReverbConvolver::process(AudioChannel* sourceChannel, AudioChannel* destinationChannel, size_t framesToProcess)
+void ReverbConvolver::process(const AudioChannel* sourceChannel, AudioChannel* destinationChannel, size_t framesToProcess)
 {
     bool isSafe = sourceChannel && destinationChannel && sourceChannel->length() >= framesToProcess && destinationChannel->length() >= framesToProcess;
     ASSERT(isSafe);
     if (!isSafe)
         return;
         
-    float* source = sourceChannel->data();
-    float* destination = destinationChannel->data();
+    const float* source = sourceChannel->data();
+    float* destination = destinationChannel->mutableData();
     bool isDataSafe = source && destination;
     ASSERT(isDataSafe);
     if (!isDataSafe)
diff --git a/Source/WebCore/platform/audio/ReverbConvolver.h b/Source/WebCore/platform/audio/ReverbConvolver.h
index 013b684..370b872 100644
--- a/Source/WebCore/platform/audio/ReverbConvolver.h
+++ b/Source/WebCore/platform/audio/ReverbConvolver.h
@@ -52,7 +52,7 @@
     ReverbConvolver(AudioChannel* impulseResponse, size_t renderSliceSize, size_t maxFFTSize, size_t convolverRenderPhase, bool useBackgroundThreads);
     ~ReverbConvolver();
 
-    void process(AudioChannel* sourceChannel, AudioChannel* destinationChannel, size_t framesToProcess);
+    void process(const AudioChannel* sourceChannel, AudioChannel* destinationChannel, size_t framesToProcess);
     void reset();
 
     size_t impulseResponseLength() const { return m_impulseResponseLength; }
diff --git a/Source/WebCore/platform/audio/ReverbConvolverStage.cpp b/Source/WebCore/platform/audio/ReverbConvolverStage.cpp
index f207d19..53bb650 100644
--- a/Source/WebCore/platform/audio/ReverbConvolverStage.cpp
+++ b/Source/WebCore/platform/audio/ReverbConvolverStage.cpp
@@ -43,7 +43,7 @@
 
 using namespace VectorMath;
 
-ReverbConvolverStage::ReverbConvolverStage(float* impulseResponse, size_t responseLength, size_t reverbTotalLatency, size_t stageOffset, size_t stageLength,
+ReverbConvolverStage::ReverbConvolverStage(const float* impulseResponse, size_t responseLength, size_t reverbTotalLatency, size_t stageOffset, size_t stageLength,
                                            size_t fftSize, size_t renderPhase, size_t renderSliceSize, ReverbAccumulationBuffer* accumulationBuffer)
     : m_fftKernel(fftSize)
     , m_accumulationBuffer(accumulationBuffer)
@@ -88,7 +88,7 @@
     process(source, framesToProcess);
 }
 
-void ReverbConvolverStage::process(float* source, size_t framesToProcess)
+void ReverbConvolverStage::process(const float* source, size_t framesToProcess)
 {
     ASSERT(source);
     if (!source)
@@ -96,7 +96,8 @@
     
     // Deal with pre-delay stream : note special handling of zero delay.
 
-    float* preDelayedSource;
+    const float* preDelayedSource;
+    float* preDelayedDestination;
     float* temporaryBuffer;
     bool isTemporaryBufferSafe = false;
     if (m_preDelayLength > 0) {
@@ -108,10 +109,12 @@
 
         isTemporaryBufferSafe = framesToProcess <= m_temporaryBuffer.size();
 
-        preDelayedSource = m_preDelayBuffer.data() + m_preReadWriteIndex;
+        preDelayedDestination = m_preDelayBuffer.data() + m_preReadWriteIndex;
+        preDelayedSource = preDelayedDestination;
         temporaryBuffer = m_temporaryBuffer.data();        
     } else {
         // Zero delay
+        preDelayedDestination = 0;
         preDelayedSource = source;
         temporaryBuffer = m_preDelayBuffer.data();
         
@@ -138,7 +141,7 @@
 
     // Finally copy input to pre-delay.
     if (m_preDelayLength > 0) {
-        memcpy(preDelayedSource, source, sizeof(float) * framesToProcess);
+        memcpy(preDelayedDestination, source, sizeof(float) * framesToProcess);
         m_preReadWriteIndex += framesToProcess;
 
         ASSERT(m_preReadWriteIndex <= m_preDelayLength);
diff --git a/Source/WebCore/platform/audio/ReverbConvolverStage.h b/Source/WebCore/platform/audio/ReverbConvolverStage.h
index fc05a0e..9811bc6 100644
--- a/Source/WebCore/platform/audio/ReverbConvolverStage.h
+++ b/Source/WebCore/platform/audio/ReverbConvolverStage.h
@@ -45,11 +45,11 @@
 public:
     // renderPhase is useful to know so that we can manipulate the pre versus post delay so that stages will perform
     // their heavy work (FFT processing) on different slices to balance the load in a real-time thread.
-    ReverbConvolverStage(float* impulseResponse, size_t responseLength, size_t reverbTotalLatency, size_t stageOffset, size_t stageLength,
+    ReverbConvolverStage(const float* impulseResponse, size_t responseLength, size_t reverbTotalLatency, size_t stageOffset, size_t stageLength,
                          size_t fftSize, size_t renderPhase, size_t renderSliceSize, ReverbAccumulationBuffer* accumulationBuffer);
 
     // WARNING: framesToProcess must be such that it evenly divides the delay buffer size (stage_offset).
-    void process(float* source, size_t framesToProcess);
+    void process(const float* source, size_t framesToProcess);
 
     void processInBackground(ReverbConvolver* convolver, size_t framesToProcess);
 
diff --git a/Source/WebCore/platform/audio/ReverbInputBuffer.cpp b/Source/WebCore/platform/audio/ReverbInputBuffer.cpp
index f270f6f..1be9af8 100644
--- a/Source/WebCore/platform/audio/ReverbInputBuffer.cpp
+++ b/Source/WebCore/platform/audio/ReverbInputBuffer.cpp
@@ -40,7 +40,7 @@
 {
 }
 
-void ReverbInputBuffer::write(float* sourceP, size_t numberOfFrames)
+void ReverbInputBuffer::write(const float* sourceP, size_t numberOfFrames)
 {
     size_t bufferLength = m_buffer.size();
     bool isCopySafe = m_writeIndex + numberOfFrames <= bufferLength;
diff --git a/Source/WebCore/platform/audio/ReverbInputBuffer.h b/Source/WebCore/platform/audio/ReverbInputBuffer.h
index 15a2818..5036575 100644
--- a/Source/WebCore/platform/audio/ReverbInputBuffer.h
+++ b/Source/WebCore/platform/audio/ReverbInputBuffer.h
@@ -41,7 +41,7 @@
     // The realtime audio thread keeps writing samples here.
     // The assumption is that the buffer's length is evenly divisible by numberOfFrames (for nearly all cases this will be fine).
     // FIXME: remove numberOfFrames restriction...
-    void write(float* sourceP, size_t numberOfFrames);
+    void write(const float* sourceP, size_t numberOfFrames);
 
     // Background threads can call this to check if there's anything to read...
     size_t writeIndex() const { return m_writeIndex; }
diff --git a/Source/WebCore/platform/audio/SincResampler.cpp b/Source/WebCore/platform/audio/SincResampler.cpp
index 0e4b849..1ee692c 100644
--- a/Source/WebCore/platform/audio/SincResampler.cpp
+++ b/Source/WebCore/platform/audio/SincResampler.cpp
@@ -135,6 +135,8 @@
     
     // Wrap the provided buffer by an AudioBus for use by the source provider.
     AudioBus bus(1, numberOfSourceFrames, false);
+
+    // FIXME: Find a way to make the following const-correct:
     bus.setChannelMemory(0, buffer, numberOfSourceFrames);
     
     m_sourceProvider->provideInput(&bus, numberOfSourceFrames);
@@ -146,7 +148,7 @@
 
 class BufferSourceProvider : public AudioSourceProvider {
 public:
-    BufferSourceProvider(float* source, size_t numberOfSourceFrames)
+    BufferSourceProvider(const float* source, size_t numberOfSourceFrames)
         : m_source(source)
         , m_sourceFramesAvailable(numberOfSourceFrames)
     {
@@ -159,7 +161,7 @@
         if (!m_source || !bus)
             return;
             
-        float* buffer = bus->channel(0)->data();
+        float* buffer = bus->channel(0)->mutableData();
 
         // Clamp to number of frames available and zero-pad.
         size_t framesToCopy = min(m_sourceFramesAvailable, framesToProcess);
@@ -174,13 +176,13 @@
     }
     
 private:
-    float* m_source;
+    const float* m_source;
     size_t m_sourceFramesAvailable;
 };
 
 } // namespace
 
-void SincResampler::process(float* source, float* destination, unsigned numberOfSourceFrames)
+void SincResampler::process(const float* source, float* destination, unsigned numberOfSourceFrames)
 {
     // Resample an in-memory buffer using an AudioSourceProvider.
     BufferSourceProvider sourceProvider(source, numberOfSourceFrames);
diff --git a/Source/WebCore/platform/audio/SincResampler.h b/Source/WebCore/platform/audio/SincResampler.h
index bbe0c55..04dbf3f 100644
--- a/Source/WebCore/platform/audio/SincResampler.h
+++ b/Source/WebCore/platform/audio/SincResampler.h
@@ -44,7 +44,7 @@
     SincResampler(double scaleFactor, unsigned kernelSize = 32, unsigned numberOfKernelOffsets = 32);
     
     // Processes numberOfSourceFrames from source to produce numberOfSourceFrames / scaleFactor frames in destination.
-    void process(float* source, float* destination, unsigned numberOfSourceFrames);
+    void process(const float* source, float* destination, unsigned numberOfSourceFrames);
 
     // Process with input source callback function for streaming applications.
     void process(AudioSourceProvider*, float* destination, size_t framesToProcess);
@@ -71,7 +71,7 @@
     // Source is copied into this buffer for each processing pass.
     AudioFloatArray m_inputBuffer;
 
-    float* m_source;
+    const float* m_source;
     unsigned m_sourceFramesAvailable;
     
     // m_sourceProvider is used to provide the audio input stream to the resampler.
diff --git a/Source/WebCore/platform/audio/ZeroPole.cpp b/Source/WebCore/platform/audio/ZeroPole.cpp
index 2fa4400..9e6f1b6 100644
--- a/Source/WebCore/platform/audio/ZeroPole.cpp
+++ b/Source/WebCore/platform/audio/ZeroPole.cpp
@@ -36,7 +36,7 @@
 
 namespace WebCore {
 
-void ZeroPole::process(float *source, float *destination, unsigned framesToProcess)
+void ZeroPole::process(const float *source, float *destination, unsigned framesToProcess)
 {
     float zero = m_zero;
     float pole = m_pole;
diff --git a/Source/WebCore/platform/audio/ZeroPole.h b/Source/WebCore/platform/audio/ZeroPole.h
index 93fd0d6..4cb1d17 100644
--- a/Source/WebCore/platform/audio/ZeroPole.h
+++ b/Source/WebCore/platform/audio/ZeroPole.h
@@ -43,7 +43,7 @@
     {
     }
 
-    void process(float *source, float *destination, unsigned framesToProcess);
+    void process(const float *source, float *destination, unsigned framesToProcess);
 
     // Reset filter state.
     void reset() { m_lastX = 0; m_lastY = 0; }
diff --git a/Source/WebCore/platform/audio/ffmpeg/FFTFrameFFMPEG.cpp b/Source/WebCore/platform/audio/ffmpeg/FFTFrameFFMPEG.cpp
index 9f89324..d4624a7 100644
--- a/Source/WebCore/platform/audio/ffmpeg/FFTFrameFFMPEG.cpp
+++ b/Source/WebCore/platform/audio/ffmpeg/FFTFrameFFMPEG.cpp
@@ -135,7 +135,7 @@
     VectorMath::vsmul(imagP1, 1, &scale, imagP1, 1, halfSize);
 }
 
-void FFTFrame::doFFT(float* data)
+void FFTFrame::doFFT(const float* data)
 {
     // Copy since processing is in-place.
     float* p = m_complexData.data();
diff --git a/Source/WebCore/platform/audio/mac/AudioFileReaderMac.cpp b/Source/WebCore/platform/audio/mac/AudioFileReaderMac.cpp
index 9550263..d0ecf1a 100644
--- a/Source/WebCore/platform/audio/mac/AudioFileReaderMac.cpp
+++ b/Source/WebCore/platform/audio/mac/AudioFileReaderMac.cpp
@@ -212,7 +212,7 @@
         for (size_t i = 0; i < numberOfChannels; ++i) {
             bufferList->mBuffers[i].mNumberChannels = 1;
             bufferList->mBuffers[i].mDataByteSize = numberOfFrames * sizeof(float);
-            bufferList->mBuffers[i].mData = audioBus->channel(i)->data();
+            bufferList->mBuffers[i].mData = audioBus->channel(i)->mutableData();
         }
     }
 
@@ -224,7 +224,7 @@
 
     if (mixToMono && numberOfChannels == 2) {
         // Mix stereo down to mono
-        float* destL = audioBus->channel(0)->data();
+        float* destL = audioBus->channel(0)->mutableData();
         for (size_t i = 0; i < numberOfFrames; i++)
             destL[i] = 0.5f * (bufferL[i] + bufferR[i]);
     }
diff --git a/Source/WebCore/platform/audio/mac/FFTFrameMac.cpp b/Source/WebCore/platform/audio/mac/FFTFrameMac.cpp
index 6dfbbec..8cef928 100644
--- a/Source/WebCore/platform/audio/mac/FFTFrameMac.cpp
+++ b/Source/WebCore/platform/audio/mac/FFTFrameMac.cpp
@@ -127,7 +127,7 @@
     VectorMath::vsmul(imagP1, 1, &scale, imagP1, 1, halfSize);
 }
 
-void FFTFrame::doFFT(float* data)
+void FFTFrame::doFFT(const float* data)
 {
     vDSP_ctoz((DSPComplex*)data, 2, &m_frame, 1, m_FFTSize / 2);
     vDSP_fft_zrip(m_FFTSetup, &m_frame, 1, m_log2FFTSize, FFT_FORWARD);
diff --git a/Source/WebCore/platform/audio/mkl/FFTFrameMKL.cpp b/Source/WebCore/platform/audio/mkl/FFTFrameMKL.cpp
index 6bf2c1e..0f3a282 100644
--- a/Source/WebCore/platform/audio/mkl/FFTFrameMKL.cpp
+++ b/Source/WebCore/platform/audio/mkl/FFTFrameMKL.cpp
@@ -170,7 +170,7 @@
     }
 }
 
-void FFTFrame::doFFT(float* data)
+void FFTFrame::doFFT(const float* data)
 {
     // Compute Forward transform.
     MKL_LONG status = DftiComputeForward(m_handle, data, m_complexData.data());