juicysfplugin/Source/PluginProcessor.cpp

440 lines
18 KiB
C++
Raw Normal View History

2018-02-27 08:17:12 +08:00
/*
==============================================================================
This file was auto-generated!
It contains the basic framework code for a JUCE plugin processor.
==============================================================================
*/
#include "PluginProcessor.h"
#include "PluginEditor.h"
2018-02-27 08:25:20 +08:00
#include "SoundfontSynthVoice.h"
#include "SoundfontSynthSound.h"
#include "ExposesComponents.h"
#include "MidiConstants.h"
#include "Util.h"
#include "SharesParams.h"
#include "Params.h"
#include "MidiConstants.h"
using namespace std;
using Parameter = AudioProcessorValueTreeState::Parameter;
2018-02-27 08:25:20 +08:00
AudioProcessor* JUCE_CALLTYPE createPluginFilter();
2018-02-27 08:17:12 +08:00
//==============================================================================
2018-02-27 08:39:50 +08:00
JuicySFAudioProcessor::JuicySFAudioProcessor()
: AudioProcessor{getBusesProperties()}
, sharedParams{static_pointer_cast<SharesParams>(make_shared<Params>())}
, valueTreeState{*this, nullptr,
{ "PARAMETERS" /* MYPLUGINSETTINGS */ },
createParameterLayout()
}
, fluidSynthModel{sharedParams}
//, fluidSynthModel{*this}
//, pluginEditor(nullptr)
2018-02-27 08:17:12 +08:00
{
2018-02-27 08:25:20 +08:00
initialiseSynth();
2018-02-27 08:17:12 +08:00
}
AudioProcessorValueTreeState::ParameterLayout JuicySFAudioProcessor::createParameterLayout() {
// std::vector<std::unique_ptr<AudioParameterInt>> params;
// for (int i = 1; i < 9; ++i)
// params.push_back (std::make_unique<AudioParameterInt> (String (i), String (i), 0, i, 0));
// https://stackoverflow.com/a/8469002/5257399
unique_ptr<AudioParameterInt> params[] = {
make_unique<AudioParameterInt>("attack", "volume envelope attack time", 0, 127, 0, "A" ),
make_unique<AudioParameterInt>("decay", "volume envelope sustain attentuation", 0, 127, 0, "D" ),
make_unique<AudioParameterInt>("sustain", "volume envelope decay time", 0, 127, 0, "S" ),
make_unique<AudioParameterInt>("release", "volume envelope release time", 0, 127, 0, "R" ),
make_unique<AudioParameterInt>("filterCutOff", "low-pass filter cut-off frequency", 0, 127, 0, "Cut" ),
make_unique<AudioParameterInt>("filterResonance", "low-pass filter resonance attentuation", 0, 127, 0, "Res" ),
};
return {
make_move_iterator(begin(params)),
make_move_iterator(end(params))
};
}
2018-02-27 08:39:50 +08:00
JuicySFAudioProcessor::~JuicySFAudioProcessor()
2018-02-27 08:17:12 +08:00
{
2018-02-27 08:25:20 +08:00
// delete fluidSynthModel;
}
2018-02-27 08:39:50 +08:00
void JuicySFAudioProcessor::initialiseSynth() {
2018-04-10 08:20:23 +08:00
fluidSynthModel.initialise();
2018-02-27 08:25:20 +08:00
fluidSynth = fluidSynthModel.getSynth();
const int numVoices = 8;
// Add some voices...
for (int i = numVoices; --i >= 0;)
synth.addVoice(new SoundfontSynthVoice(fluidSynthModel.getSynth()));
2018-02-27 08:25:20 +08:00
// ..and give the synth a sound to play
synth.addSound(new SoundfontSynthSound());
2018-02-27 08:17:12 +08:00
}
//==============================================================================
2018-02-27 08:39:50 +08:00
const String JuicySFAudioProcessor::getName() const
2018-02-27 08:17:12 +08:00
{
return JucePlugin_Name;
}
2018-02-27 08:39:50 +08:00
bool JuicySFAudioProcessor::acceptsMidi() const
2018-02-27 08:17:12 +08:00
{
#if JucePlugin_WantsMidiInput
return true;
#else
return false;
#endif
}
2018-02-27 08:39:50 +08:00
bool JuicySFAudioProcessor::producesMidi() const
2018-02-27 08:17:12 +08:00
{
#if JucePlugin_ProducesMidiOutput
return true;
#else
return false;
#endif
}
2018-02-27 08:39:50 +08:00
double JuicySFAudioProcessor::getTailLengthSeconds() const
2018-02-27 08:17:12 +08:00
{
return 0.0;
}
2018-02-27 08:39:50 +08:00
int JuicySFAudioProcessor::getNumPrograms()
2018-02-27 08:17:12 +08:00
{
return 1; // NB: some hosts don't cope very well if you tell them there are 0 programs,
// so this should be at least 1, even if you're not really implementing programs.
}
2018-02-27 08:39:50 +08:00
int JuicySFAudioProcessor::getCurrentProgram()
2018-02-27 08:17:12 +08:00
{
return 0;
}
2018-02-27 08:39:50 +08:00
void JuicySFAudioProcessor::setCurrentProgram (int index)
2018-02-27 08:17:12 +08:00
{
}
2018-02-27 08:39:50 +08:00
const String JuicySFAudioProcessor::getProgramName (int index)
2018-02-27 08:17:12 +08:00
{
return {};
}
2018-02-27 08:39:50 +08:00
void JuicySFAudioProcessor::changeProgramName (int index, const String& newName)
2018-02-27 08:17:12 +08:00
{
}
//==============================================================================
2018-02-27 08:39:50 +08:00
void JuicySFAudioProcessor::prepareToPlay (double sampleRate, int /*samplesPerBlock*/)
2018-02-27 08:17:12 +08:00
{
// Use this method as the place to do any pre-playback
// initialisation that you need..
2018-02-27 08:25:20 +08:00
synth.setCurrentPlaybackSampleRate (sampleRate);
keyboardState.reset();
2018-04-16 04:32:26 +08:00
fluidSynthModel.setSampleRate(static_cast<float>(sampleRate));
2018-02-27 08:25:20 +08:00
reset();
2018-02-27 08:17:12 +08:00
}
2018-02-27 08:39:50 +08:00
void JuicySFAudioProcessor::releaseResources()
2018-02-27 08:17:12 +08:00
{
// When playback stops, you can use this as an opportunity to free up any
// spare memory, etc.
2018-02-27 08:25:20 +08:00
keyboardState.reset();
2018-02-27 08:17:12 +08:00
}
2018-02-27 08:39:50 +08:00
bool JuicySFAudioProcessor::isBusesLayoutSupported (const BusesLayout& layouts) const
2018-02-27 08:17:12 +08:00
{
2018-02-27 08:25:20 +08:00
// Only mono/stereo and input/output must have same layout
const AudioChannelSet& mainOutput = layouts.getMainOutputChannelSet();
const AudioChannelSet& mainInput = layouts.getMainInputChannelSet();
// input and output layout must either be the same or the input must be disabled altogether
if (! mainInput.isDisabled() && mainInput != mainOutput)
2018-02-27 08:17:12 +08:00
return false;
2018-02-27 08:25:20 +08:00
// do not allow disabling the main buses
if (mainOutput.isDisabled())
2018-02-27 08:17:12 +08:00
return false;
2018-02-27 08:25:20 +08:00
// only allow stereo and mono
return mainOutput.size() <= 2;
2018-02-27 08:17:12 +08:00
}
2018-02-27 08:39:50 +08:00
AudioProcessor::BusesProperties JuicySFAudioProcessor::getBusesProperties() {
2019-06-23 18:13:25 +08:00
return BusesProperties()
2018-02-27 08:25:20 +08:00
.withOutput ("Output", AudioChannelSet::stereo(), true);
}
2018-02-27 08:39:50 +08:00
void JuicySFAudioProcessor::processBlock (AudioBuffer<float>& buffer, MidiBuffer& midiMessages) {
2018-02-27 08:25:20 +08:00
jassert (!isUsingDoublePrecision());
const int numSamples = buffer.getNumSamples();
// Now pass any incoming midi messages to our keyboard state object, and let it
// add messages to the buffer if the user is clicking on the on-screen keys
keyboardState.processNextMidiBuffer (midiMessages, 0, numSamples, true);
MidiBuffer processedMidi;
int time;
MidiMessage m;
// TODO: factor into a MidiCollector
for (MidiBuffer::Iterator i (midiMessages); i.getNextEvent (m, time);) {
DEBUG_PRINT ( m.getDescription() );
// explicitly not handling note_on/off, or pitch_bend, because these are (for better or worse)
// responsibilities of SoundfontSynthVoice.
// well, by that logic maybe I should move program change onto Voice. but it doesn't feel like a per-voice concern.
if (m.isController()) {
// shared_ptr<fluid_midi_event_t> midi_event{
// new_fluid_midi_event(),
// [](fluid_midi_event_t *event) {
// delete_fluid_midi_event(midi_event);
// }};
fluid_midi_event_t *midi_event(new_fluid_midi_event());
fluid_midi_event_set_type(midi_event, static_cast<int>(CONTROL_CHANGE));
fluid_midi_event_set_channel(midi_event, fluidSynthModel.getChannel());
fluid_midi_event_set_control(midi_event, m.getControllerNumber());
fluid_midi_event_set_value(midi_event, m.getControllerValue());
fluid_synth_handle_midi_event(fluidSynth, midi_event);
delete_fluid_midi_event(midi_event);
switch(static_cast<fluid_midi_control_change>(m.getControllerNumber())) {
case SOUND_CTRL2: { // MIDI CC 71 Timbre/Harmonic Intensity (filter resonance)
valueTreeState.state.setProperty({"filterResonance"}, m.getControllerValue(), nullptr);
break;
}
case SOUND_CTRL3: { // MIDI CC 72 Release time
// valueTreeState.state.setProperty({"release"}, m.getControllerValue(), nullptr);
// valueTreeState.state.flushParameterValuesToValueTree();
// jassert(dynamic_cast<ExposesComponents*> (editor) != nullptr);
RangedAudioParameter *param {valueTreeState.getParameter("release")};
// dynamic_cast<AudioParameterInt&>(*param)
jassert(dynamic_cast<AudioParameterInt*> (param) != nullptr);
AudioParameterInt* castParam {dynamic_cast<AudioParameterInt*> (param)};
// castParam->setValue(m.getControllerValue());
// castParam->
// param->setValue(m.getControllerValue());
// param->setValueNotifyingHost(m.getControllerValue());
*castParam = m.getControllerValue();
break;
}
case SOUND_CTRL4: { // MIDI CC 73 Attack time
valueTreeState.state.setProperty({"attack"}, m.getControllerValue(), nullptr);
break;
}
case SOUND_CTRL5: { // MIDI CC 74 Brightness (cutoff frequency, FILTERFC)
valueTreeState.state.setProperty({"filterCutOff"}, m.getControllerValue(), nullptr);
break;
}
case SOUND_CTRL6: { // MIDI CC 75 Decay Time
valueTreeState.state.setProperty({"decay"}, m.getControllerValue(), nullptr);
break;
}
case SOUND_CTRL10: { // MIDI CC 79 undefined
valueTreeState.state.setProperty({"sustain"}, m.getControllerValue(), nullptr);
break;
}
default: {
break;
}
}
// sharedParams->acceptMidiControlEvent(m.getControllerNumber(), m.getControllerValue());
// AudioProcessorEditor* editor{getActiveEditor()};
// jassert(dynamic_cast<ExposesComponents*> (editor) != nullptr);
// ExposesComponents* exposesComponents{dynamic_cast<ExposesComponents*>(editor)};
// exposesComponents->getSliders().acceptMidiControlEvent(m.getControllerNumber(), m.getControllerValue());
} else if (m.isProgramChange()) {
fluid_midi_event_t *midi_event(new_fluid_midi_event());
fluid_midi_event_set_type(midi_event, static_cast<int>(PROGRAM_CHANGE));
fluid_midi_event_set_channel(midi_event, fluidSynthModel.getChannel());
fluid_midi_event_set_program(midi_event, m.getProgramChangeNumber());
fluid_synth_handle_midi_event(fluidSynth, midi_event);
delete_fluid_midi_event(midi_event);
} else if (m.isPitchWheel()) {
fluid_midi_event_t *midi_event(new_fluid_midi_event());
fluid_midi_event_set_type(midi_event, static_cast<int>(PITCH_BEND));
fluid_midi_event_set_channel(midi_event, fluidSynthModel.getChannel());
fluid_midi_event_set_pitch(midi_event, m.getPitchWheelValue());
fluid_synth_handle_midi_event(fluidSynth, midi_event);
delete_fluid_midi_event(midi_event);
} else if (m.isChannelPressure()) {
fluid_midi_event_t *midi_event(new_fluid_midi_event());
fluid_midi_event_set_type(midi_event, static_cast<int>(CHANNEL_PRESSURE));
fluid_midi_event_set_channel(midi_event, fluidSynthModel.getChannel());
fluid_midi_event_set_program(midi_event, m.getChannelPressureValue());
fluid_synth_handle_midi_event(fluidSynth, midi_event);
delete_fluid_midi_event(midi_event);
2019-06-24 06:17:28 +08:00
} else if (m.isAftertouch()) {
fluid_midi_event_t *midi_event(new_fluid_midi_event());
fluid_midi_event_set_type(midi_event, static_cast<int>(KEY_PRESSURE));
fluid_midi_event_set_channel(midi_event, fluidSynthModel.getChannel());
fluid_midi_event_set_key(midi_event, m.getNoteNumber());
fluid_midi_event_set_value(midi_event, m.getAfterTouchValue());
fluid_synth_handle_midi_event(fluidSynth, midi_event);
delete_fluid_midi_event(midi_event);
// } else if (m.isMetaEvent()) {
// fluid_midi_event_t *midi_event(new_fluid_midi_event());
// fluid_midi_event_set_type(midi_event, static_cast<int>(MIDI_SYSTEM_RESET));
// fluid_synth_handle_midi_event(fluidSynth, midi_event);
// delete_fluid_midi_event(midi_event);
} else if (m.isSysEx()) {
fluid_midi_event_t *midi_event(new_fluid_midi_event());
fluid_midi_event_set_type(midi_event, static_cast<int>(MIDI_SYSEX));
// I assume that the MidiMessage's sysex buffer would be freed anyway when MidiMessage is destroyed, so set dynamic=false
// to ensure that fluidsynth does not attempt to free the sysex buffer during delete_fluid_midi_event()
fluid_midi_event_set_sysex(midi_event, const_cast<juce::uint8*>(m.getSysExData()), m.getSysExDataSize(), static_cast<int>(false));
fluid_synth_handle_midi_event(fluidSynth, midi_event);
delete_fluid_midi_event(midi_event);
}
}
// int pval;
// 73: 64 attack
// 75: decay
// 79: sustain
// 72: 64 release
// fluid_synth_get_cc(fluidSynth, 0, 73, &pval);
// Logger::outputDebugString ( juce::String::formatted("hey: %d\n", pval) );
2018-02-27 08:25:20 +08:00
// and now get our synth to process these midi events and generate its output.
synth.renderNextBlock (buffer, midiMessages, 0, numSamples);
fluid_synth_process(fluidSynth, numSamples, 0, nullptr, buffer.getNumChannels(), buffer.getArrayOfWritePointers());
2018-02-27 08:17:12 +08:00
// (see juce_VST3_Wrapper.cpp for the assertion this would trip otherwise)
// we are !JucePlugin_ProducesMidiOutput, so clear remaining MIDI messages from our buffer
midiMessages.clear();
2018-02-27 08:17:12 +08:00
// In case we have more outputs than inputs, this code clears any output
// channels that didn't contain input data, (because these aren't
// guaranteed to be empty - they may contain garbage).
// This is here to avoid people getting screaming feedback
// when they first compile a plugin, but obviously you don't need to keep
// this code if your algorithm always overwrites all the output channels.
2018-02-27 08:25:20 +08:00
// for (int i = getTotalNumInputChannels(); i < getTotalNumOutputChannels(); ++i)
// buffer.clear (i, 0, numSamples);
2018-02-27 08:17:12 +08:00
}
//==============================================================================
2018-02-27 08:39:50 +08:00
bool JuicySFAudioProcessor::hasEditor() const
2018-02-27 08:17:12 +08:00
{
return true; // (change this to false if you choose to not supply an editor)
}
2018-02-27 08:39:50 +08:00
AudioProcessorEditor* JuicySFAudioProcessor::createEditor()
2018-02-27 08:17:12 +08:00
{
// grab a raw pointer to it for our own use
return /*pluginEditor = */new JuicySFAudioProcessorEditor (*this, valueTreeState);
2018-02-27 08:17:12 +08:00
}
//==============================================================================
2018-02-27 08:39:50 +08:00
void JuicySFAudioProcessor::getStateInformation (MemoryBlock& destData)
2018-02-27 08:17:12 +08:00
{
// You should use this method to store your parameters in the memory block.
// You could do that either as raw data, or use the XML or ValueTree classes
// as intermediaries to make it easy to save and load complex data.
2018-02-27 08:25:20 +08:00
// Create an outer XML element..
XmlElement xml{"MYPLUGINSETTINGS"};
sharedParams->setAttributesOnXml(xml);
// list<StateChangeSubscriber*>::iterator p;
// for(p = stateChangeSubscribers.begin(); p != stateChangeSubscribers.end(); p++) {
// (*p)->getStateInformation(xml);
// }
2018-02-27 08:25:20 +08:00
// Store the values of all our parameters, using their param ID as the XML attribute
for (auto* param : getParameters())
if (auto* p = dynamic_cast<AudioProcessorParameterWithID*> (param))
xml.setAttribute (p->paramID, p->getValue());
// then use this helper function to stuff it into the binary blob and return it..
copyXmlToBinary (xml, destData);
2018-02-27 08:17:12 +08:00
}
2018-02-27 08:39:50 +08:00
void JuicySFAudioProcessor::setStateInformation (const void* data, int sizeInBytes)
2018-02-27 08:17:12 +08:00
{
// You should use this method to restore your parameters from this memory block,
// whose contents will have been created by the getStateInformation() call.
2018-02-27 08:25:20 +08:00
// This getXmlFromBinary() helper function retrieves our XML from the binary blob..
shared_ptr<XmlElement> xmlState{getXmlFromBinary(data, sizeInBytes)};
2018-02-27 08:25:20 +08:00
if (xmlState != nullptr)
{
// make sure that it's actually our type of XML object..
if (xmlState->hasTagName ("MYPLUGINSETTINGS"))
{
// list<StateChangeSubscriber*>::iterator p;
// for(p = stateChangeSubscribers.begin(); p != stateChangeSubscribers.end(); p++) {
// (*p)->setStateInformation(xmlState);
// }
// ok, now pull out our last window size..
sharedParams->loadAttributesFromXml(xmlState);
2018-02-27 08:25:20 +08:00
// Now reload our parameters..
for (auto* param : getParameters())
if (auto* p = dynamic_cast<AudioProcessorParameterWithID*> (param))
p->setValue ((float) xmlState->getDoubleAttribute (p->paramID, p->getValue()));
fluidSynthModel.onFileNameChanged(
sharedParams->getSoundFontPath(),
sharedParams->getBank(),
sharedParams->getPreset());
AudioProcessorEditor* editor{getActiveEditor()};
if (editor != nullptr) {
editor->setSize(
sharedParams->getUiWidth(),
sharedParams->getUiHeight());
jassert(dynamic_cast<ExposesComponents*> (editor) != nullptr);
ExposesComponents* exposesComponents = dynamic_cast<ExposesComponents*> (editor);
exposesComponents->getFilePicker().setDisplayedFilePath(sharedParams->getSoundFontPath());
}
// const String& currentSoundFontAbsPath = fluidSynthModel->getCurrentSoundFontAbsPath();
// if (currentSoundFontAbsPath.isNotEmpty()) {
// fileChooser.setCurrentFile(File(currentSoundFontAbsPath), true, dontSendNotification);
// }
2018-02-27 08:25:20 +08:00
}
}
}
//void JuicySFAudioProcessor::subscribeToStateChanges(StateChangeSubscriber* subscriber) {
// stateChangeSubscribers.push_back(subscriber);
//}
//
//void JuicySFAudioProcessor::unsubscribeFromStateChanges(StateChangeSubscriber* subscriber) {
// stateChangeSubscribers.remove(subscriber);
//}
2018-02-27 08:25:20 +08:00
// FluidSynth only supports float in its process function, so that's all we can support.
2018-02-27 08:39:50 +08:00
bool JuicySFAudioProcessor::supportsDoublePrecisionProcessing() const {
2018-02-27 08:25:20 +08:00
return false;
}
2018-02-27 08:39:50 +08:00
FluidSynthModel* JuicySFAudioProcessor::getFluidSynthModel() {
2018-02-27 08:25:20 +08:00
return &fluidSynthModel;
2018-02-27 08:17:12 +08:00
}
//==============================================================================
// This creates new instances of the plugin..
AudioProcessor* JUCE_CALLTYPE createPluginFilter()
{
2018-02-27 08:39:50 +08:00
return new JuicySFAudioProcessor();
2018-02-27 08:25:20 +08:00
}