2018-02-27 08:17:12 +08:00
|
|
|
/*
|
|
|
|
==============================================================================
|
|
|
|
|
|
|
|
This file was auto-generated!
|
|
|
|
|
|
|
|
It contains the basic framework code for a JUCE plugin processor.
|
|
|
|
|
|
|
|
==============================================================================
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "PluginProcessor.h"
|
|
|
|
#include "PluginEditor.h"
|
2018-02-27 08:25:20 +08:00
|
|
|
#include "SoundfontSynthVoice.h"
|
|
|
|
#include "SoundfontSynthSound.h"
|
2018-04-11 07:52:44 +08:00
|
|
|
#include "ExposesComponents.h"
|
2019-06-24 01:12:25 +08:00
|
|
|
#include "MidiConstants.h"
|
2018-02-27 08:25:20 +08:00
|
|
|
|
|
|
|
AudioProcessor* JUCE_CALLTYPE createPluginFilter();
|
2018-02-27 08:17:12 +08:00
|
|
|
|
|
|
|
|
|
|
|
//==============================================================================
|
2018-02-27 08:39:50 +08:00
|
|
|
JuicySFAudioProcessor::JuicySFAudioProcessor()
|
2018-02-27 08:25:20 +08:00
|
|
|
: AudioProcessor (getBusesProperties()),
|
|
|
|
lastUIWidth(400),
|
2018-04-10 07:11:22 +08:00
|
|
|
lastUIHeight(300),
|
2018-04-10 07:51:21 +08:00
|
|
|
soundFontPath(String()),
|
2018-04-13 07:40:27 +08:00
|
|
|
lastPreset(-1),
|
|
|
|
lastBank(-1),
|
2018-04-11 06:29:32 +08:00
|
|
|
fluidSynthModel(*this)/*,
|
|
|
|
pluginEditor(nullptr)*/
|
2018-02-27 08:17:12 +08:00
|
|
|
{
|
2018-02-27 08:25:20 +08:00
|
|
|
initialiseSynth();
|
2018-02-27 08:17:12 +08:00
|
|
|
}
|
|
|
|
|
2018-02-27 08:39:50 +08:00
|
|
|
JuicySFAudioProcessor::~JuicySFAudioProcessor()
|
2018-02-27 08:17:12 +08:00
|
|
|
{
|
2018-02-27 08:25:20 +08:00
|
|
|
// delete fluidSynthModel;
|
|
|
|
}
|
|
|
|
|
2018-02-27 08:39:50 +08:00
|
|
|
void JuicySFAudioProcessor::initialiseSynth() {
|
2018-04-10 08:20:23 +08:00
|
|
|
fluidSynthModel.initialise();
|
2018-02-27 08:25:20 +08:00
|
|
|
|
|
|
|
fluidSynth = fluidSynthModel.getSynth();
|
|
|
|
|
|
|
|
const int numVoices = 8;
|
|
|
|
|
|
|
|
// Add some voices...
|
|
|
|
for (int i = numVoices; --i >= 0;)
|
|
|
|
synth.addVoice (new SoundfontSynthVoice(fluidSynthModel.getSynth()));
|
|
|
|
|
|
|
|
// ..and give the synth a sound to play
|
|
|
|
synth.addSound (new SoundfontSynthSound());
|
2018-02-27 08:17:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
//==============================================================================
|
2018-02-27 08:39:50 +08:00
|
|
|
const String JuicySFAudioProcessor::getName() const
|
2018-02-27 08:17:12 +08:00
|
|
|
{
|
|
|
|
return JucePlugin_Name;
|
|
|
|
}
|
|
|
|
|
2018-02-27 08:39:50 +08:00
|
|
|
bool JuicySFAudioProcessor::acceptsMidi() const
|
2018-02-27 08:17:12 +08:00
|
|
|
{
|
|
|
|
#if JucePlugin_WantsMidiInput
|
|
|
|
return true;
|
|
|
|
#else
|
|
|
|
return false;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2018-02-27 08:39:50 +08:00
|
|
|
bool JuicySFAudioProcessor::producesMidi() const
|
2018-02-27 08:17:12 +08:00
|
|
|
{
|
|
|
|
#if JucePlugin_ProducesMidiOutput
|
|
|
|
return true;
|
|
|
|
#else
|
|
|
|
return false;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2018-02-27 08:39:50 +08:00
|
|
|
double JuicySFAudioProcessor::getTailLengthSeconds() const
|
2018-02-27 08:17:12 +08:00
|
|
|
{
|
|
|
|
return 0.0;
|
|
|
|
}
|
|
|
|
|
2018-02-27 08:39:50 +08:00
|
|
|
int JuicySFAudioProcessor::getNumPrograms()
|
2018-02-27 08:17:12 +08:00
|
|
|
{
|
|
|
|
return 1; // NB: some hosts don't cope very well if you tell them there are 0 programs,
|
|
|
|
// so this should be at least 1, even if you're not really implementing programs.
|
|
|
|
}
|
|
|
|
|
2018-02-27 08:39:50 +08:00
|
|
|
int JuicySFAudioProcessor::getCurrentProgram()
|
2018-02-27 08:17:12 +08:00
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-02-27 08:39:50 +08:00
|
|
|
void JuicySFAudioProcessor::setCurrentProgram (int index)
|
2018-02-27 08:17:12 +08:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2018-02-27 08:39:50 +08:00
|
|
|
const String JuicySFAudioProcessor::getProgramName (int index)
|
2018-02-27 08:17:12 +08:00
|
|
|
{
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
|
2018-02-27 08:39:50 +08:00
|
|
|
void JuicySFAudioProcessor::changeProgramName (int index, const String& newName)
|
2018-02-27 08:17:12 +08:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
//==============================================================================
|
2018-02-27 08:39:50 +08:00
|
|
|
void JuicySFAudioProcessor::prepareToPlay (double sampleRate, int /*samplesPerBlock*/)
|
2018-02-27 08:17:12 +08:00
|
|
|
{
|
|
|
|
// Use this method as the place to do any pre-playback
|
|
|
|
// initialisation that you need..
|
2018-02-27 08:25:20 +08:00
|
|
|
synth.setCurrentPlaybackSampleRate (sampleRate);
|
|
|
|
keyboardState.reset();
|
2018-04-16 04:32:26 +08:00
|
|
|
fluidSynthModel.setSampleRate(static_cast<float>(sampleRate));
|
2018-02-27 08:25:20 +08:00
|
|
|
|
|
|
|
reset();
|
2018-02-27 08:17:12 +08:00
|
|
|
}
|
|
|
|
|
2018-02-27 08:39:50 +08:00
|
|
|
void JuicySFAudioProcessor::releaseResources()
|
2018-02-27 08:17:12 +08:00
|
|
|
{
|
|
|
|
// When playback stops, you can use this as an opportunity to free up any
|
|
|
|
// spare memory, etc.
|
2018-02-27 08:25:20 +08:00
|
|
|
keyboardState.reset();
|
2018-02-27 08:17:12 +08:00
|
|
|
}
|
|
|
|
|
2018-02-27 08:39:50 +08:00
|
|
|
bool JuicySFAudioProcessor::isBusesLayoutSupported (const BusesLayout& layouts) const
|
2018-02-27 08:17:12 +08:00
|
|
|
{
|
2018-02-27 08:25:20 +08:00
|
|
|
// Only mono/stereo and input/output must have same layout
|
|
|
|
const AudioChannelSet& mainOutput = layouts.getMainOutputChannelSet();
|
|
|
|
const AudioChannelSet& mainInput = layouts.getMainInputChannelSet();
|
|
|
|
|
|
|
|
// input and output layout must either be the same or the input must be disabled altogether
|
|
|
|
if (! mainInput.isDisabled() && mainInput != mainOutput)
|
2018-02-27 08:17:12 +08:00
|
|
|
return false;
|
|
|
|
|
2018-02-27 08:25:20 +08:00
|
|
|
// do not allow disabling the main buses
|
|
|
|
if (mainOutput.isDisabled())
|
2018-02-27 08:17:12 +08:00
|
|
|
return false;
|
|
|
|
|
2018-02-27 08:25:20 +08:00
|
|
|
// only allow stereo and mono
|
|
|
|
return mainOutput.size() <= 2;
|
2018-02-27 08:17:12 +08:00
|
|
|
}
|
|
|
|
|
2018-02-27 08:39:50 +08:00
|
|
|
AudioProcessor::BusesProperties JuicySFAudioProcessor::getBusesProperties() {
|
2019-06-23 18:13:25 +08:00
|
|
|
return BusesProperties()
|
2018-02-27 08:25:20 +08:00
|
|
|
.withOutput ("Output", AudioChannelSet::stereo(), true);
|
|
|
|
}
|
|
|
|
|
2018-02-27 08:39:50 +08:00
|
|
|
void JuicySFAudioProcessor::processBlock (AudioBuffer<float>& buffer, MidiBuffer& midiMessages) {
|
2018-02-27 08:25:20 +08:00
|
|
|
jassert (!isUsingDoublePrecision());
|
|
|
|
const int numSamples = buffer.getNumSamples();
|
|
|
|
|
|
|
|
// Now pass any incoming midi messages to our keyboard state object, and let it
|
|
|
|
// add messages to the buffer if the user is clicking on the on-screen keys
|
|
|
|
keyboardState.processNextMidiBuffer (midiMessages, 0, numSamples, true);
|
2019-06-24 01:12:25 +08:00
|
|
|
|
|
|
|
MidiBuffer processedMidi;
|
|
|
|
int time;
|
|
|
|
MidiMessage m;
|
|
|
|
|
|
|
|
// TODO: factor into a MidiCollector
|
|
|
|
for (MidiBuffer::Iterator i (midiMessages); i.getNextEvent (m, time);) {
|
2019-06-24 06:06:12 +08:00
|
|
|
Logger::outputDebugString ( m.getDescription() );
|
|
|
|
|
|
|
|
// explicitly not handling note_on/off, or pitch_bend, because these are (for better or worse)
|
|
|
|
// responsibilities of SoundfontSynthVoice.
|
|
|
|
// well, by that logic maybe I should move program change onto Voice. but it doesn't feel like a per-voice concern.
|
2019-06-24 01:12:25 +08:00
|
|
|
if (m.isController()) {
|
|
|
|
fluid_midi_event_t *midi_event(new_fluid_midi_event());
|
|
|
|
fluid_midi_event_set_type(midi_event, static_cast<int>(CONTROL_CHANGE));
|
|
|
|
fluid_midi_event_set_channel(midi_event, fluidSynthModel.getChannel());
|
|
|
|
fluid_midi_event_set_control(midi_event, m.getControllerNumber());
|
|
|
|
fluid_midi_event_set_value(midi_event, m.getControllerValue());
|
|
|
|
fluid_synth_handle_midi_event(fluidSynth, midi_event);
|
2019-06-24 06:06:12 +08:00
|
|
|
delete_fluid_midi_event(midi_event);
|
|
|
|
} else if (m.isProgramChange()) {
|
|
|
|
fluid_midi_event_t *midi_event(new_fluid_midi_event());
|
|
|
|
fluid_midi_event_set_type(midi_event, static_cast<int>(PROGRAM_CHANGE));
|
|
|
|
fluid_midi_event_set_channel(midi_event, fluidSynthModel.getChannel());
|
|
|
|
fluid_midi_event_set_program(midi_event, m.getProgramChangeNumber());
|
|
|
|
fluid_synth_handle_midi_event(fluidSynth, midi_event);
|
|
|
|
delete_fluid_midi_event(midi_event);
|
|
|
|
} else if (m.isChannelPressure()) {
|
|
|
|
fluid_midi_event_t *midi_event(new_fluid_midi_event());
|
|
|
|
fluid_midi_event_set_type(midi_event, static_cast<int>(CHANNEL_PRESSURE));
|
|
|
|
fluid_midi_event_set_channel(midi_event, fluidSynthModel.getChannel());
|
|
|
|
fluid_midi_event_set_program(midi_event, m.getChannelPressureValue());
|
|
|
|
fluid_synth_handle_midi_event(fluidSynth, midi_event);
|
|
|
|
delete_fluid_midi_event(midi_event);
|
|
|
|
} else if (m.isResetAllControllers()) {
|
|
|
|
fluid_midi_event_t *midi_event(new_fluid_midi_event());
|
|
|
|
fluid_midi_event_set_type(midi_event, static_cast<int>(MIDI_SYSTEM_RESET));
|
|
|
|
fluid_synth_handle_midi_event(fluidSynth, midi_event);
|
|
|
|
delete_fluid_midi_event(midi_event);
|
|
|
|
} else if (m.isSysEx()) {
|
|
|
|
fluid_midi_event_t *midi_event(new_fluid_midi_event());
|
|
|
|
fluid_midi_event_set_type(midi_event, static_cast<int>(MIDI_SYSEX));
|
|
|
|
// I assume that the MidiMessage's sysex buffer would be freed anyway when MidiMessage is destroyed, so set dynamic=false
|
|
|
|
// to ensure that fluidsynth does not attempt to free the sysex buffer during delete_fluid_midi_event()
|
|
|
|
fluid_midi_event_set_sysex(midi_event, const_cast<juce::uint8*>(m.getSysExData()), m.getSysExDataSize(), static_cast<int>(false));
|
|
|
|
fluid_synth_handle_midi_event(fluidSynth, midi_event);
|
2019-06-24 01:12:25 +08:00
|
|
|
delete_fluid_midi_event(midi_event);
|
|
|
|
}
|
|
|
|
}
|
2018-02-27 08:25:20 +08:00
|
|
|
|
|
|
|
// and now get our synth to process these midi events and generate its output.
|
|
|
|
synth.renderNextBlock (buffer, midiMessages, 0, numSamples);
|
2019-06-23 06:24:16 +08:00
|
|
|
fluid_synth_process(fluidSynth, numSamples, 0, nullptr, buffer.getNumChannels(), buffer.getArrayOfWritePointers());
|
2018-02-27 08:17:12 +08:00
|
|
|
|
2018-02-28 07:33:19 +08:00
|
|
|
// (see juce_VST3_Wrapper.cpp for the assertion this would trip otherwise)
|
|
|
|
// we are !JucePlugin_ProducesMidiOutput, so clear remaining MIDI messages from our buffer
|
|
|
|
midiMessages.clear();
|
|
|
|
|
2018-02-27 08:17:12 +08:00
|
|
|
// In case we have more outputs than inputs, this code clears any output
|
|
|
|
// channels that didn't contain input data, (because these aren't
|
|
|
|
// guaranteed to be empty - they may contain garbage).
|
|
|
|
// This is here to avoid people getting screaming feedback
|
|
|
|
// when they first compile a plugin, but obviously you don't need to keep
|
|
|
|
// this code if your algorithm always overwrites all the output channels.
|
2018-02-27 08:25:20 +08:00
|
|
|
// for (int i = getTotalNumInputChannels(); i < getTotalNumOutputChannels(); ++i)
|
|
|
|
// buffer.clear (i, 0, numSamples);
|
2018-02-27 08:17:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
//==============================================================================
|
2018-02-27 08:39:50 +08:00
|
|
|
bool JuicySFAudioProcessor::hasEditor() const
|
2018-02-27 08:17:12 +08:00
|
|
|
{
|
|
|
|
return true; // (change this to false if you choose to not supply an editor)
|
|
|
|
}
|
|
|
|
|
2018-02-27 08:39:50 +08:00
|
|
|
AudioProcessorEditor* JuicySFAudioProcessor::createEditor()
|
2018-02-27 08:17:12 +08:00
|
|
|
{
|
2018-04-11 06:29:32 +08:00
|
|
|
// grab a raw pointer to it for our own use
|
|
|
|
return /*pluginEditor = */new JuicySFAudioProcessorEditor (*this);
|
2018-02-27 08:17:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
//==============================================================================
|
2018-02-27 08:39:50 +08:00
|
|
|
void JuicySFAudioProcessor::getStateInformation (MemoryBlock& destData)
|
2018-02-27 08:17:12 +08:00
|
|
|
{
|
|
|
|
// You should use this method to store your parameters in the memory block.
|
|
|
|
// You could do that either as raw data, or use the XML or ValueTree classes
|
|
|
|
// as intermediaries to make it easy to save and load complex data.
|
2018-02-27 08:25:20 +08:00
|
|
|
|
|
|
|
// Create an outer XML element..
|
|
|
|
XmlElement xml ("MYPLUGINSETTINGS");
|
|
|
|
|
2018-04-10 07:11:22 +08:00
|
|
|
// add some attributes to it..
|
|
|
|
xml.setAttribute ("uiWidth", lastUIWidth);
|
|
|
|
xml.setAttribute ("uiHeight", lastUIHeight);
|
2018-04-10 07:51:21 +08:00
|
|
|
xml.setAttribute ("soundFontPath", soundFontPath);
|
2018-04-13 07:40:27 +08:00
|
|
|
xml.setAttribute ("preset", lastPreset);
|
|
|
|
xml.setAttribute ("bank", lastBank);
|
2018-04-10 07:11:22 +08:00
|
|
|
|
|
|
|
// list<StateChangeSubscriber*>::iterator p;
|
|
|
|
// for(p = stateChangeSubscribers.begin(); p != stateChangeSubscribers.end(); p++) {
|
|
|
|
// (*p)->getStateInformation(xml);
|
|
|
|
// }
|
2018-02-27 08:25:20 +08:00
|
|
|
|
|
|
|
// Store the values of all our parameters, using their param ID as the XML attribute
|
|
|
|
for (auto* param : getParameters())
|
|
|
|
if (auto* p = dynamic_cast<AudioProcessorParameterWithID*> (param))
|
|
|
|
xml.setAttribute (p->paramID, p->getValue());
|
|
|
|
|
|
|
|
// then use this helper function to stuff it into the binary blob and return it..
|
|
|
|
copyXmlToBinary (xml, destData);
|
2018-02-27 08:17:12 +08:00
|
|
|
}
|
|
|
|
|
2018-02-27 08:39:50 +08:00
|
|
|
void JuicySFAudioProcessor::setStateInformation (const void* data, int sizeInBytes)
|
2018-02-27 08:17:12 +08:00
|
|
|
{
|
|
|
|
// You should use this method to restore your parameters from this memory block,
|
|
|
|
// whose contents will have been created by the getStateInformation() call.
|
2018-02-27 08:25:20 +08:00
|
|
|
// This getXmlFromBinary() helper function retrieves our XML from the binary blob..
|
|
|
|
ScopedPointer<XmlElement> xmlState (getXmlFromBinary (data, sizeInBytes));
|
|
|
|
|
|
|
|
if (xmlState != nullptr)
|
|
|
|
{
|
|
|
|
// make sure that it's actually our type of XML object..
|
|
|
|
if (xmlState->hasTagName ("MYPLUGINSETTINGS"))
|
|
|
|
{
|
2018-04-10 07:11:22 +08:00
|
|
|
// list<StateChangeSubscriber*>::iterator p;
|
|
|
|
// for(p = stateChangeSubscribers.begin(); p != stateChangeSubscribers.end(); p++) {
|
|
|
|
// (*p)->setStateInformation(xmlState);
|
|
|
|
// }
|
|
|
|
|
|
|
|
// ok, now pull out our last window size..
|
|
|
|
lastUIWidth = jmax (xmlState->getIntAttribute ("uiWidth", lastUIWidth), 400);
|
|
|
|
lastUIHeight = jmax (xmlState->getIntAttribute ("uiHeight", lastUIHeight), 300);
|
2018-04-10 07:51:21 +08:00
|
|
|
soundFontPath = xmlState->getStringAttribute ("soundFontPath", soundFontPath);
|
2018-04-13 07:40:27 +08:00
|
|
|
lastPreset = xmlState->getIntAttribute ("preset", lastPreset);
|
|
|
|
lastBank = xmlState->getIntAttribute ("bank", lastBank);
|
2018-02-27 08:25:20 +08:00
|
|
|
|
|
|
|
// Now reload our parameters..
|
|
|
|
for (auto* param : getParameters())
|
|
|
|
if (auto* p = dynamic_cast<AudioProcessorParameterWithID*> (param))
|
|
|
|
p->setValue ((float) xmlState->getDoubleAttribute (p->paramID, p->getValue()));
|
2018-04-11 06:29:32 +08:00
|
|
|
|
2018-04-13 08:14:07 +08:00
|
|
|
fluidSynthModel.onFileNameChanged(soundFontPath, lastBank, lastPreset);
|
2018-04-11 06:29:32 +08:00
|
|
|
|
|
|
|
AudioProcessorEditor* editor = getActiveEditor();
|
|
|
|
if (editor != nullptr) {
|
|
|
|
editor->setSize(lastUIWidth, lastUIHeight);
|
2018-04-11 07:52:44 +08:00
|
|
|
|
|
|
|
jassert(dynamic_cast<ExposesComponents*> (editor) != nullptr);
|
|
|
|
ExposesComponents* exposesComponents = dynamic_cast<ExposesComponents*> (editor);
|
|
|
|
exposesComponents->getFilePicker().setDisplayedFilePath(soundFontPath);
|
2018-04-11 06:29:32 +08:00
|
|
|
}
|
2018-04-11 07:08:15 +08:00
|
|
|
|
|
|
|
// const String& currentSoundFontAbsPath = fluidSynthModel->getCurrentSoundFontAbsPath();
|
|
|
|
// if (currentSoundFontAbsPath.isNotEmpty()) {
|
|
|
|
// fileChooser.setCurrentFile(File(currentSoundFontAbsPath), true, dontSendNotification);
|
|
|
|
// }
|
2018-02-27 08:25:20 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-10 07:11:22 +08:00
|
|
|
//void JuicySFAudioProcessor::subscribeToStateChanges(StateChangeSubscriber* subscriber) {
|
|
|
|
// stateChangeSubscribers.push_back(subscriber);
|
|
|
|
//}
|
|
|
|
//
|
|
|
|
//void JuicySFAudioProcessor::unsubscribeFromStateChanges(StateChangeSubscriber* subscriber) {
|
|
|
|
// stateChangeSubscribers.remove(subscriber);
|
|
|
|
//}
|
2018-03-19 07:35:29 +08:00
|
|
|
|
2018-02-27 08:25:20 +08:00
|
|
|
// FluidSynth only supports float in its process function, so that's all we can support.
|
2018-02-27 08:39:50 +08:00
|
|
|
bool JuicySFAudioProcessor::supportsDoublePrecisionProcessing() const {
|
2018-02-27 08:25:20 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2018-02-27 08:39:50 +08:00
|
|
|
FluidSynthModel* JuicySFAudioProcessor::getFluidSynthModel() {
|
2018-02-27 08:25:20 +08:00
|
|
|
return &fluidSynthModel;
|
2018-02-27 08:17:12 +08:00
|
|
|
}
|
|
|
|
|
2018-04-10 08:17:50 +08:00
|
|
|
void JuicySFAudioProcessor::setSoundFontPath(const String& value) {
|
|
|
|
soundFontPath = value;
|
|
|
|
}
|
|
|
|
|
|
|
|
String& JuicySFAudioProcessor::getSoundFontPath() {
|
|
|
|
return soundFontPath;
|
|
|
|
}
|
2018-04-13 07:40:27 +08:00
|
|
|
int JuicySFAudioProcessor::getPreset() {
|
|
|
|
return lastPreset;
|
|
|
|
}
|
|
|
|
int JuicySFAudioProcessor::getBank() {
|
|
|
|
return lastBank;
|
|
|
|
}
|
|
|
|
void JuicySFAudioProcessor::setPreset(int preset) {
|
|
|
|
lastPreset = preset;
|
|
|
|
}
|
|
|
|
void JuicySFAudioProcessor::setBank(int bank) {
|
|
|
|
lastBank = bank;
|
|
|
|
}
|
2018-04-10 08:17:50 +08:00
|
|
|
|
2018-02-27 08:17:12 +08:00
|
|
|
//==============================================================================
|
|
|
|
// This creates new instances of the plugin..
|
|
|
|
AudioProcessor* JUCE_CALLTYPE createPluginFilter()
|
|
|
|
{
|
2018-02-27 08:39:50 +08:00
|
|
|
return new JuicySFAudioProcessor();
|
2018-02-27 08:25:20 +08:00
|
|
|
}
|