2018-02-27 08:17:12 +08:00
|
|
|
/*
|
|
|
|
==============================================================================
|
|
|
|
|
|
|
|
This file was auto-generated!
|
|
|
|
|
|
|
|
It contains the basic framework code for a JUCE plugin processor.
|
|
|
|
|
|
|
|
==============================================================================
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "PluginProcessor.h"
|
|
|
|
#include "PluginEditor.h"
|
2018-02-27 08:25:20 +08:00
|
|
|
#include "SoundfontSynthVoice.h"
|
|
|
|
#include "SoundfontSynthSound.h"
|
|
|
|
|
|
|
|
AudioProcessor* JUCE_CALLTYPE createPluginFilter();
|
2018-02-27 08:17:12 +08:00
|
|
|
|
|
|
|
|
|
|
|
//==============================================================================
|
2018-02-27 08:39:50 +08:00
|
|
|
JuicySFAudioProcessor::JuicySFAudioProcessor()
|
2018-02-27 08:25:20 +08:00
|
|
|
: AudioProcessor (getBusesProperties()),
|
|
|
|
lastUIWidth(400),
|
2018-04-10 07:11:22 +08:00
|
|
|
lastUIHeight(300),
|
2018-04-10 07:51:21 +08:00
|
|
|
soundFontPath(String()),
|
2018-04-10 08:20:23 +08:00
|
|
|
fluidSynthModel(*this)
|
2018-02-27 08:17:12 +08:00
|
|
|
{
|
2018-02-27 08:25:20 +08:00
|
|
|
initialiseSynth();
|
2018-02-27 08:17:12 +08:00
|
|
|
}
|
|
|
|
|
2018-02-27 08:39:50 +08:00
|
|
|
JuicySFAudioProcessor::~JuicySFAudioProcessor()
|
2018-02-27 08:17:12 +08:00
|
|
|
{
|
2018-02-27 08:25:20 +08:00
|
|
|
// delete fluidSynthModel;
|
|
|
|
}
|
|
|
|
|
2018-02-27 08:39:50 +08:00
|
|
|
void JuicySFAudioProcessor::initialiseSynth() {
|
2018-04-10 08:20:23 +08:00
|
|
|
fluidSynthModel.initialise();
|
2018-02-27 08:25:20 +08:00
|
|
|
|
|
|
|
fluidSynth = fluidSynthModel.getSynth();
|
|
|
|
|
|
|
|
const int numVoices = 8;
|
|
|
|
|
|
|
|
// Add some voices...
|
|
|
|
for (int i = numVoices; --i >= 0;)
|
|
|
|
synth.addVoice (new SoundfontSynthVoice(fluidSynthModel.getSynth()));
|
|
|
|
|
|
|
|
// ..and give the synth a sound to play
|
|
|
|
synth.addSound (new SoundfontSynthSound());
|
2018-02-27 08:17:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
//==============================================================================
|
2018-02-27 08:39:50 +08:00
|
|
|
const String JuicySFAudioProcessor::getName() const
|
2018-02-27 08:17:12 +08:00
|
|
|
{
|
|
|
|
return JucePlugin_Name;
|
|
|
|
}
|
|
|
|
|
2018-02-27 08:39:50 +08:00
|
|
|
bool JuicySFAudioProcessor::acceptsMidi() const
|
2018-02-27 08:17:12 +08:00
|
|
|
{
|
|
|
|
#if JucePlugin_WantsMidiInput
|
|
|
|
return true;
|
|
|
|
#else
|
|
|
|
return false;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2018-02-27 08:39:50 +08:00
|
|
|
bool JuicySFAudioProcessor::producesMidi() const
|
2018-02-27 08:17:12 +08:00
|
|
|
{
|
|
|
|
#if JucePlugin_ProducesMidiOutput
|
|
|
|
return true;
|
|
|
|
#else
|
|
|
|
return false;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2018-02-27 08:39:50 +08:00
|
|
|
double JuicySFAudioProcessor::getTailLengthSeconds() const
|
2018-02-27 08:17:12 +08:00
|
|
|
{
|
|
|
|
return 0.0;
|
|
|
|
}
|
|
|
|
|
2018-02-27 08:39:50 +08:00
|
|
|
int JuicySFAudioProcessor::getNumPrograms()
|
2018-02-27 08:17:12 +08:00
|
|
|
{
|
|
|
|
return 1; // NB: some hosts don't cope very well if you tell them there are 0 programs,
|
|
|
|
// so this should be at least 1, even if you're not really implementing programs.
|
|
|
|
}
|
|
|
|
|
2018-02-27 08:39:50 +08:00
|
|
|
int JuicySFAudioProcessor::getCurrentProgram()
|
2018-02-27 08:17:12 +08:00
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-02-27 08:39:50 +08:00
|
|
|
void JuicySFAudioProcessor::setCurrentProgram (int index)
|
2018-02-27 08:17:12 +08:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2018-02-27 08:39:50 +08:00
|
|
|
const String JuicySFAudioProcessor::getProgramName (int index)
|
2018-02-27 08:17:12 +08:00
|
|
|
{
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
|
2018-02-27 08:39:50 +08:00
|
|
|
void JuicySFAudioProcessor::changeProgramName (int index, const String& newName)
|
2018-02-27 08:17:12 +08:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
//==============================================================================
|
2018-02-27 08:39:50 +08:00
|
|
|
void JuicySFAudioProcessor::prepareToPlay (double sampleRate, int /*samplesPerBlock*/)
|
2018-02-27 08:17:12 +08:00
|
|
|
{
|
|
|
|
// Use this method as the place to do any pre-playback
|
|
|
|
// initialisation that you need..
|
2018-02-27 08:25:20 +08:00
|
|
|
synth.setCurrentPlaybackSampleRate (sampleRate);
|
|
|
|
keyboardState.reset();
|
|
|
|
|
|
|
|
reset();
|
2018-02-27 08:17:12 +08:00
|
|
|
}
|
|
|
|
|
2018-02-27 08:39:50 +08:00
|
|
|
void JuicySFAudioProcessor::releaseResources()
|
2018-02-27 08:17:12 +08:00
|
|
|
{
|
|
|
|
// When playback stops, you can use this as an opportunity to free up any
|
|
|
|
// spare memory, etc.
|
2018-02-27 08:25:20 +08:00
|
|
|
keyboardState.reset();
|
2018-02-27 08:17:12 +08:00
|
|
|
}
|
|
|
|
|
2018-02-27 08:39:50 +08:00
|
|
|
bool JuicySFAudioProcessor::isBusesLayoutSupported (const BusesLayout& layouts) const
|
2018-02-27 08:17:12 +08:00
|
|
|
{
|
2018-02-27 08:25:20 +08:00
|
|
|
// Only mono/stereo and input/output must have same layout
|
|
|
|
const AudioChannelSet& mainOutput = layouts.getMainOutputChannelSet();
|
|
|
|
const AudioChannelSet& mainInput = layouts.getMainInputChannelSet();
|
|
|
|
|
|
|
|
// input and output layout must either be the same or the input must be disabled altogether
|
|
|
|
if (! mainInput.isDisabled() && mainInput != mainOutput)
|
2018-02-27 08:17:12 +08:00
|
|
|
return false;
|
|
|
|
|
2018-02-27 08:25:20 +08:00
|
|
|
// do not allow disabling the main buses
|
|
|
|
if (mainOutput.isDisabled())
|
2018-02-27 08:17:12 +08:00
|
|
|
return false;
|
|
|
|
|
2018-02-27 08:25:20 +08:00
|
|
|
// only allow stereo and mono
|
|
|
|
return mainOutput.size() <= 2;
|
2018-02-27 08:17:12 +08:00
|
|
|
}
|
|
|
|
|
2018-02-27 08:39:50 +08:00
|
|
|
AudioProcessor::BusesProperties JuicySFAudioProcessor::getBusesProperties() {
|
2018-02-27 08:25:20 +08:00
|
|
|
return BusesProperties().withInput ("Input", AudioChannelSet::stereo(), true)
|
|
|
|
.withOutput ("Output", AudioChannelSet::stereo(), true);
|
|
|
|
}
|
|
|
|
|
2018-02-27 08:39:50 +08:00
|
|
|
void JuicySFAudioProcessor::processBlock (AudioBuffer<float>& buffer, MidiBuffer& midiMessages) {
|
2018-02-27 08:25:20 +08:00
|
|
|
jassert (!isUsingDoublePrecision());
|
|
|
|
const int numSamples = buffer.getNumSamples();
|
|
|
|
|
|
|
|
// Now pass any incoming midi messages to our keyboard state object, and let it
|
|
|
|
// add messages to the buffer if the user is clicking on the on-screen keys
|
|
|
|
keyboardState.processNextMidiBuffer (midiMessages, 0, numSamples, true);
|
|
|
|
|
|
|
|
// and now get our synth to process these midi events and generate its output.
|
|
|
|
synth.renderNextBlock (buffer, midiMessages, 0, numSamples);
|
|
|
|
fluid_synth_process(fluidSynth, numSamples, 1, nullptr, buffer.getNumChannels(), buffer.getArrayOfWritePointers());
|
2018-02-27 08:17:12 +08:00
|
|
|
|
2018-02-28 07:33:19 +08:00
|
|
|
// (see juce_VST3_Wrapper.cpp for the assertion this would trip otherwise)
|
|
|
|
// we are !JucePlugin_ProducesMidiOutput, so clear remaining MIDI messages from our buffer
|
|
|
|
midiMessages.clear();
|
|
|
|
|
2018-02-27 08:17:12 +08:00
|
|
|
// In case we have more outputs than inputs, this code clears any output
|
|
|
|
// channels that didn't contain input data, (because these aren't
|
|
|
|
// guaranteed to be empty - they may contain garbage).
|
|
|
|
// This is here to avoid people getting screaming feedback
|
|
|
|
// when they first compile a plugin, but obviously you don't need to keep
|
|
|
|
// this code if your algorithm always overwrites all the output channels.
|
2018-02-27 08:25:20 +08:00
|
|
|
// for (int i = getTotalNumInputChannels(); i < getTotalNumOutputChannels(); ++i)
|
|
|
|
// buffer.clear (i, 0, numSamples);
|
2018-02-27 08:17:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
//==============================================================================
|
2018-02-27 08:39:50 +08:00
|
|
|
bool JuicySFAudioProcessor::hasEditor() const
|
2018-02-27 08:17:12 +08:00
|
|
|
{
|
|
|
|
return true; // (change this to false if you choose to not supply an editor)
|
|
|
|
}
|
|
|
|
|
2018-02-27 08:39:50 +08:00
|
|
|
AudioProcessorEditor* JuicySFAudioProcessor::createEditor()
|
2018-02-27 08:17:12 +08:00
|
|
|
{
|
2018-02-28 07:34:22 +08:00
|
|
|
return new JuicySFAudioProcessorEditor (*this);
|
2018-02-27 08:17:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
//==============================================================================
|
2018-02-27 08:39:50 +08:00
|
|
|
void JuicySFAudioProcessor::getStateInformation (MemoryBlock& destData)
|
2018-02-27 08:17:12 +08:00
|
|
|
{
|
|
|
|
// You should use this method to store your parameters in the memory block.
|
|
|
|
// You could do that either as raw data, or use the XML or ValueTree classes
|
|
|
|
// as intermediaries to make it easy to save and load complex data.
|
2018-02-27 08:25:20 +08:00
|
|
|
|
|
|
|
// Create an outer XML element..
|
|
|
|
XmlElement xml ("MYPLUGINSETTINGS");
|
|
|
|
|
2018-04-10 07:11:22 +08:00
|
|
|
// add some attributes to it..
|
|
|
|
xml.setAttribute ("uiWidth", lastUIWidth);
|
|
|
|
xml.setAttribute ("uiHeight", lastUIHeight);
|
2018-04-10 07:51:21 +08:00
|
|
|
xml.setAttribute ("soundFontPath", soundFontPath);
|
2018-04-10 07:11:22 +08:00
|
|
|
|
|
|
|
// list<StateChangeSubscriber*>::iterator p;
|
|
|
|
// for(p = stateChangeSubscribers.begin(); p != stateChangeSubscribers.end(); p++) {
|
|
|
|
// (*p)->getStateInformation(xml);
|
|
|
|
// }
|
2018-02-27 08:25:20 +08:00
|
|
|
|
|
|
|
// Store the values of all our parameters, using their param ID as the XML attribute
|
|
|
|
for (auto* param : getParameters())
|
|
|
|
if (auto* p = dynamic_cast<AudioProcessorParameterWithID*> (param))
|
|
|
|
xml.setAttribute (p->paramID, p->getValue());
|
|
|
|
|
|
|
|
// then use this helper function to stuff it into the binary blob and return it..
|
|
|
|
copyXmlToBinary (xml, destData);
|
2018-02-27 08:17:12 +08:00
|
|
|
}
|
|
|
|
|
2018-02-27 08:39:50 +08:00
|
|
|
void JuicySFAudioProcessor::setStateInformation (const void* data, int sizeInBytes)
|
2018-02-27 08:17:12 +08:00
|
|
|
{
|
|
|
|
// You should use this method to restore your parameters from this memory block,
|
|
|
|
// whose contents will have been created by the getStateInformation() call.
|
2018-02-27 08:25:20 +08:00
|
|
|
// This getXmlFromBinary() helper function retrieves our XML from the binary blob..
|
|
|
|
ScopedPointer<XmlElement> xmlState (getXmlFromBinary (data, sizeInBytes));
|
|
|
|
|
|
|
|
if (xmlState != nullptr)
|
|
|
|
{
|
|
|
|
// make sure that it's actually our type of XML object..
|
|
|
|
if (xmlState->hasTagName ("MYPLUGINSETTINGS"))
|
|
|
|
{
|
2018-04-10 07:11:22 +08:00
|
|
|
// list<StateChangeSubscriber*>::iterator p;
|
|
|
|
// for(p = stateChangeSubscribers.begin(); p != stateChangeSubscribers.end(); p++) {
|
|
|
|
// (*p)->setStateInformation(xmlState);
|
|
|
|
// }
|
|
|
|
|
|
|
|
// ok, now pull out our last window size..
|
|
|
|
lastUIWidth = jmax (xmlState->getIntAttribute ("uiWidth", lastUIWidth), 400);
|
|
|
|
lastUIHeight = jmax (xmlState->getIntAttribute ("uiHeight", lastUIHeight), 300);
|
2018-04-10 07:51:21 +08:00
|
|
|
soundFontPath = xmlState->getStringAttribute ("soundFontPath", soundFontPath);
|
2018-02-27 08:25:20 +08:00
|
|
|
|
|
|
|
// Now reload our parameters..
|
|
|
|
for (auto* param : getParameters())
|
|
|
|
if (auto* p = dynamic_cast<AudioProcessorParameterWithID*> (param))
|
|
|
|
p->setValue ((float) xmlState->getDoubleAttribute (p->paramID, p->getValue()));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-10 07:11:22 +08:00
|
|
|
//void JuicySFAudioProcessor::subscribeToStateChanges(StateChangeSubscriber* subscriber) {
|
|
|
|
// stateChangeSubscribers.push_back(subscriber);
|
|
|
|
//}
|
|
|
|
//
|
|
|
|
//void JuicySFAudioProcessor::unsubscribeFromStateChanges(StateChangeSubscriber* subscriber) {
|
|
|
|
// stateChangeSubscribers.remove(subscriber);
|
|
|
|
//}
|
2018-03-19 07:35:29 +08:00
|
|
|
|
2018-02-27 08:25:20 +08:00
|
|
|
// FluidSynth only supports float in its process function, so that's all we can support.
|
2018-02-27 08:39:50 +08:00
|
|
|
bool JuicySFAudioProcessor::supportsDoublePrecisionProcessing() const {
|
2018-02-27 08:25:20 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2018-02-27 08:39:50 +08:00
|
|
|
FluidSynthModel* JuicySFAudioProcessor::getFluidSynthModel() {
|
2018-02-27 08:25:20 +08:00
|
|
|
return &fluidSynthModel;
|
2018-02-27 08:17:12 +08:00
|
|
|
}
|
|
|
|
|
2018-04-10 08:17:50 +08:00
|
|
|
void JuicySFAudioProcessor::setSoundFontPath(const String& value) {
|
|
|
|
soundFontPath = value;
|
|
|
|
}
|
|
|
|
|
|
|
|
String& JuicySFAudioProcessor::getSoundFontPath() {
|
|
|
|
return soundFontPath;
|
|
|
|
}
|
|
|
|
|
2018-02-27 08:17:12 +08:00
|
|
|
//==============================================================================
|
|
|
|
// This creates new instances of the plugin..
|
|
|
|
AudioProcessor* JUCE_CALLTYPE createPluginFilter()
|
|
|
|
{
|
2018-02-27 08:39:50 +08:00
|
|
|
return new JuicySFAudioProcessor();
|
2018-02-27 08:25:20 +08:00
|
|
|
}
|