juicysfplugin/Source/PluginProcessor.cpp

542 lines
26 KiB
C++
Raw Normal View History

2018-02-27 08:17:12 +08:00
/*
==============================================================================
This file was auto-generated!
It contains the basic framework code for a JUCE plugin processor.
==============================================================================
*/
#include "PluginProcessor.h"
#include "PluginEditor.h"
2018-02-27 08:25:20 +08:00
#include "SoundfontSynthVoice.h"
#include "SoundfontSynthSound.h"
#include "ExposesComponents.h"
#include "MidiConstants.h"
#include "Util.h"
#include "SharesParams.h"
#include "Params.h"
#include "GuiConstants.h"
using namespace std;
using Parameter = AudioProcessorValueTreeState::Parameter;
2018-02-27 08:25:20 +08:00
AudioProcessor* JUCE_CALLTYPE createPluginFilter();
2018-02-27 08:17:12 +08:00
//==============================================================================
//, sharedParams{static_pointer_cast<SharesParams>(make_shared<Params>())}
2018-02-27 08:39:50 +08:00
JuicySFAudioProcessor::JuicySFAudioProcessor()
: AudioProcessor{getBusesProperties()}
// , sharedParams{}
, valueTreeState{
*this,
nullptr,
2019-07-09 06:36:27 +08:00
"MYPLUGINSETTINGS",
createParameterLayout()}
, fluidSynthModel{valueTreeState}
//, fluidSynthModel{*this}
//, pluginEditor(nullptr)
2018-02-27 08:17:12 +08:00
{
2019-07-09 06:36:27 +08:00
valueTreeState.state.appendChild({ "uiState", {
{ "width", GuiConstants::minWidth },
{ "height", GuiConstants::minHeight }
}, {} }, nullptr);
valueTreeState.state.appendChild({ "soundFont", {
{ "path", "" },
}, {} }, nullptr);
// no properties, no subtrees (yet)
valueTreeState.state.appendChild({ "presets", {}, {} }, nullptr);
// no properties, no subtrees (yet)
valueTreeState.state.appendChild({ "banks", {}, {} }, nullptr);
// valueTreeState.state.setProperty("soundFontPath", "", nullptr);
2019-07-09 06:36:27 +08:00
// valueTreeState.state.appendChild({ "soundFontPath", {} }, nullptr);
2018-02-27 08:25:20 +08:00
initialiseSynth();
2018-02-27 08:17:12 +08:00
}
AudioProcessorValueTreeState::ParameterLayout JuicySFAudioProcessor::createParameterLayout() {
// std::vector<std::unique_ptr<AudioParameterInt>> params;
// for (int i = 1; i < 9; ++i)
// params.push_back (std::make_unique<AudioParameterInt> (String (i), String (i), 0, i, 0));
// make_unique<AudioParameter>("soundfontPath", "filepath to soundfont", 0, 127, 0, "A" ),
// https://stackoverflow.com/a/8469002/5257399
unique_ptr<AudioParameterInt> params[] {
// make_unique<AudioParameterInt>("uiWidthPersist", "width of this plugin's GUI. Editor listens for changes (e.g. on load)", GuiConstants::minWidth, GuiConstants::maxWidth, GuiConstants::minWidth, "UI Width Persist" ),
// make_unique<AudioParameterInt>("uiHeightPersist", "height of this plugin's GUI. Editor listens for changes (e.g. on load)", GuiConstants::minHeight, GuiConstants::maxHeight, GuiConstants::minHeight, "UI Height Persist" ),
// make_unique<AudioParameterInt>("uiWidthTemp", "width of this plugin's GUI. Editor writes here on change (e.g. on window resize). Processor copies this into Persist before any save.", GuiConstants::minWidth, GuiConstants::maxWidth, GuiConstants::minWidth, "UI Width Temp" ),
// make_unique<AudioParameterInt>("uiHeightTemp", "height of this plugin's GUI. Editor writes here on change (e.g. on window resize). Processor copies this into Persist before any save.", GuiConstants::minHeight, GuiConstants::maxHeight, GuiConstants::minHeight, "UI Height Temp" ),
2019-07-09 06:36:27 +08:00
// make_unique<AudioParameterInt>("uiWidth", "width of this plugin's GUI", GuiConstants::minWidth, GuiConstants::maxWidth, GuiConstants::minWidth, "UI Width" ),
// make_unique<AudioParameterInt>("uiHeight", "height of this plugin's GUI", GuiConstants::minHeight, GuiConstants::maxHeight, GuiConstants::minHeight, "UI Height" ),
// todo: check whether bank really is 0-127
make_unique<AudioParameterInt>("bank", "which bank is selected in the soundfont", MidiConstants::midiMinValue, MidiConstants::midiMaxValue, MidiConstants::midiMinValue, "Bank" ),
// note: banks may be sparse, and lack a 0th preset. so defend against this.
make_unique<AudioParameterInt>("preset", "which patch (aka patch, program, instrument) is selected in the soundfont", MidiConstants::midiMinValue, MidiConstants::midiMaxValue, MidiConstants::midiMinValue, "Preset" ),
make_unique<AudioParameterInt>("attack", "volume envelope attack time", MidiConstants::midiMinValue, MidiConstants::midiMaxValue, MidiConstants::midiMinValue, "A" ),
make_unique<AudioParameterInt>("decay", "volume envelope sustain attentuation", MidiConstants::midiMinValue, MidiConstants::midiMaxValue, MidiConstants::midiMinValue, "D" ),
make_unique<AudioParameterInt>("sustain", "volume envelope decay time", MidiConstants::midiMinValue, MidiConstants::midiMaxValue, MidiConstants::midiMinValue, "S" ),
make_unique<AudioParameterInt>("release", "volume envelope release time", MidiConstants::midiMinValue, MidiConstants::midiMaxValue, MidiConstants::midiMinValue, "R" ),
make_unique<AudioParameterInt>("filterCutOff", "low-pass filter cut-off frequency", MidiConstants::midiMinValue, MidiConstants::midiMaxValue, MidiConstants::midiMinValue, "Cut" ),
make_unique<AudioParameterInt>("filterResonance", "low-pass filter resonance attentuation", MidiConstants::midiMinValue, MidiConstants::midiMaxValue, MidiConstants::midiMinValue, "Res" ),
};
return {
make_move_iterator(begin(params)),
make_move_iterator(end(params))
};
}
2018-02-27 08:39:50 +08:00
JuicySFAudioProcessor::~JuicySFAudioProcessor()
2018-02-27 08:17:12 +08:00
{
2018-02-27 08:25:20 +08:00
// delete fluidSynthModel;
}
2018-02-27 08:39:50 +08:00
void JuicySFAudioProcessor::initialiseSynth() {
2018-04-10 08:20:23 +08:00
fluidSynthModel.initialise();
2018-02-27 08:25:20 +08:00
// fluidSynth = fluidSynthModel.getSynth();
2018-02-27 08:25:20 +08:00
const int numVoices = 8;
// Add some voices...
for (int i = numVoices; --i >= 0;)
synth.addVoice(new SoundfontSynthVoice(fluidSynthModel.getSynth()));
2018-02-27 08:25:20 +08:00
// ..and give the synth a sound to play
synth.addSound(new SoundfontSynthSound());
2018-02-27 08:17:12 +08:00
}
//==============================================================================
2018-02-27 08:39:50 +08:00
const String JuicySFAudioProcessor::getName() const
2018-02-27 08:17:12 +08:00
{
return JucePlugin_Name;
}
2018-02-27 08:39:50 +08:00
bool JuicySFAudioProcessor::acceptsMidi() const
2018-02-27 08:17:12 +08:00
{
#if JucePlugin_WantsMidiInput
return true;
#else
return false;
#endif
}
2018-02-27 08:39:50 +08:00
bool JuicySFAudioProcessor::producesMidi() const
2018-02-27 08:17:12 +08:00
{
#if JucePlugin_ProducesMidiOutput
return true;
#else
return false;
#endif
}
2018-02-27 08:39:50 +08:00
double JuicySFAudioProcessor::getTailLengthSeconds() const
2018-02-27 08:17:12 +08:00
{
return 0.0;
}
2018-02-27 08:39:50 +08:00
int JuicySFAudioProcessor::getNumPrograms()
2018-02-27 08:17:12 +08:00
{
return 1; // NB: some hosts don't cope very well if you tell them there are 0 programs,
// so this should be at least 1, even if you're not really implementing programs.
}
2018-02-27 08:39:50 +08:00
int JuicySFAudioProcessor::getCurrentProgram()
2018-02-27 08:17:12 +08:00
{
return 0;
}
2018-02-27 08:39:50 +08:00
void JuicySFAudioProcessor::setCurrentProgram (int index)
2018-02-27 08:17:12 +08:00
{
}
2018-02-27 08:39:50 +08:00
const String JuicySFAudioProcessor::getProgramName (int index)
2018-02-27 08:17:12 +08:00
{
return {};
}
2018-02-27 08:39:50 +08:00
void JuicySFAudioProcessor::changeProgramName (int index, const String& newName)
2018-02-27 08:17:12 +08:00
{
}
//==============================================================================
2018-02-27 08:39:50 +08:00
void JuicySFAudioProcessor::prepareToPlay (double sampleRate, int /*samplesPerBlock*/)
2018-02-27 08:17:12 +08:00
{
// Use this method as the place to do any pre-playback
// initialisation that you need..
2018-02-27 08:25:20 +08:00
synth.setCurrentPlaybackSampleRate (sampleRate);
keyboardState.reset();
2018-04-16 04:32:26 +08:00
fluidSynthModel.setSampleRate(static_cast<float>(sampleRate));
2018-02-27 08:25:20 +08:00
reset();
2018-02-27 08:17:12 +08:00
}
2018-02-27 08:39:50 +08:00
void JuicySFAudioProcessor::releaseResources()
2018-02-27 08:17:12 +08:00
{
// When playback stops, you can use this as an opportunity to free up any
// spare memory, etc.
2018-02-27 08:25:20 +08:00
keyboardState.reset();
2018-02-27 08:17:12 +08:00
}
2018-02-27 08:39:50 +08:00
bool JuicySFAudioProcessor::isBusesLayoutSupported (const BusesLayout& layouts) const
2018-02-27 08:17:12 +08:00
{
2018-02-27 08:25:20 +08:00
// Only mono/stereo and input/output must have same layout
const AudioChannelSet& mainOutput = layouts.getMainOutputChannelSet();
const AudioChannelSet& mainInput = layouts.getMainInputChannelSet();
// input and output layout must either be the same or the input must be disabled altogether
if (! mainInput.isDisabled() && mainInput != mainOutput)
2018-02-27 08:17:12 +08:00
return false;
2018-02-27 08:25:20 +08:00
// do not allow disabling the main buses
if (mainOutput.isDisabled())
2018-02-27 08:17:12 +08:00
return false;
2018-02-27 08:25:20 +08:00
// only allow stereo and mono
return mainOutput.size() <= 2;
2018-02-27 08:17:12 +08:00
}
2018-02-27 08:39:50 +08:00
AudioProcessor::BusesProperties JuicySFAudioProcessor::getBusesProperties() {
2019-06-23 18:13:25 +08:00
return BusesProperties()
2018-02-27 08:25:20 +08:00
.withOutput ("Output", AudioChannelSet::stereo(), true);
}
2018-02-27 08:39:50 +08:00
void JuicySFAudioProcessor::processBlock (AudioBuffer<float>& buffer, MidiBuffer& midiMessages) {
2018-02-27 08:25:20 +08:00
jassert (!isUsingDoublePrecision());
const int numSamples = buffer.getNumSamples();
// Now pass any incoming midi messages to our keyboard state object, and let it
// add messages to the buffer if the user is clicking on the on-screen keys
keyboardState.processNextMidiBuffer (midiMessages, 0, numSamples, true);
MidiBuffer processedMidi;
int time;
MidiMessage m;
// TODO: factor into a MidiCollector
for (MidiBuffer::Iterator i (midiMessages); i.getNextEvent (m, time);) {
DEBUG_PRINT ( m.getDescription() );
// explicitly not handling note_on/off, or pitch_bend, because these are (for better or worse)
// responsibilities of SoundfontSynthVoice.
// well, by that logic maybe I should move program change onto Voice. but it doesn't feel like a per-voice concern.
if (m.isController()) {
// shared_ptr<fluid_midi_event_t> midi_event{
// new_fluid_midi_event(),
// [](fluid_midi_event_t *event) {
// delete_fluid_midi_event(midi_event);
// }};
fluid_midi_event_t *midi_event(new_fluid_midi_event());
fluid_midi_event_set_type(midi_event, static_cast<int>(CONTROL_CHANGE));
fluid_midi_event_set_channel(midi_event, fluidSynthModel.getChannel());
fluid_midi_event_set_control(midi_event, m.getControllerNumber());
fluid_midi_event_set_value(midi_event, m.getControllerValue());
fluid_synth_handle_midi_event(fluidSynthModel.getSynth().get(), midi_event);
delete_fluid_midi_event(midi_event);
switch(static_cast<fluid_midi_control_change>(m.getControllerNumber())) {
case SOUND_CTRL2: { // MIDI CC 71 Timbre/Harmonic Intensity (filter resonance)
2019-07-09 06:36:27 +08:00
// valueTreeState.state.setProperty({"filterResonance"}, m.getControllerValue(), nullptr);
RangedAudioParameter *param {valueTreeState.getParameter("filterResonance")};
jassert(dynamic_cast<AudioParameterInt*> (param) != nullptr);
AudioParameterInt* castParam {dynamic_cast<AudioParameterInt*> (param)};
*castParam = m.getControllerValue();
break;
}
case SOUND_CTRL3: { // MIDI CC 72 Release time
RangedAudioParameter *param {valueTreeState.getParameter("release")};
jassert(dynamic_cast<AudioParameterInt*> (param) != nullptr);
AudioParameterInt* castParam {dynamic_cast<AudioParameterInt*> (param)};
*castParam = m.getControllerValue();
break;
}
case SOUND_CTRL4: { // MIDI CC 73 Attack time
RangedAudioParameter *param {valueTreeState.getParameter("release")};
jassert(dynamic_cast<AudioParameterInt*> (param) != nullptr);
AudioParameterInt* castParam {dynamic_cast<AudioParameterInt*> (param)};
*castParam = m.getControllerValue();
break;
}
case SOUND_CTRL5: { // MIDI CC 74 Brightness (cutoff frequency, FILTERFC)
RangedAudioParameter *param {valueTreeState.getParameter("filterCutOff")};
jassert(dynamic_cast<AudioParameterInt*> (param) != nullptr);
AudioParameterInt* castParam {dynamic_cast<AudioParameterInt*> (param)};
*castParam = m.getControllerValue();
break;
}
case SOUND_CTRL6: { // MIDI CC 75 Decay Time
RangedAudioParameter *param {valueTreeState.getParameter("decay")};
jassert(dynamic_cast<AudioParameterInt*> (param) != nullptr);
AudioParameterInt* castParam {dynamic_cast<AudioParameterInt*> (param)};
*castParam = m.getControllerValue();
break;
}
case SOUND_CTRL10: { // MIDI CC 79 undefined
RangedAudioParameter *param {valueTreeState.getParameter("sustain")};
jassert(dynamic_cast<AudioParameterInt*> (param) != nullptr);
AudioParameterInt* castParam {dynamic_cast<AudioParameterInt*> (param)};
*castParam = m.getControllerValue();
break;
}
default: {
break;
}
}
// sharedParams->acceptMidiControlEvent(m.getControllerNumber(), m.getControllerValue());
// AudioProcessorEditor* editor{getActiveEditor()};
// jassert(dynamic_cast<ExposesComponents*> (editor) != nullptr);
// ExposesComponents* exposesComponents{dynamic_cast<ExposesComponents*>(editor)};
// exposesComponents->getSliders().acceptMidiControlEvent(m.getControllerNumber(), m.getControllerValue());
} else if (m.isProgramChange()) {
fluid_midi_event_t *midi_event(new_fluid_midi_event());
fluid_midi_event_set_type(midi_event, static_cast<int>(PROGRAM_CHANGE));
fluid_midi_event_set_channel(midi_event, fluidSynthModel.getChannel());
fluid_midi_event_set_program(midi_event, m.getProgramChangeNumber());
fluid_synth_handle_midi_event(fluidSynthModel.getSynth().get(), midi_event);
delete_fluid_midi_event(midi_event);
} else if (m.isPitchWheel()) {
fluid_midi_event_t *midi_event(new_fluid_midi_event());
fluid_midi_event_set_type(midi_event, static_cast<int>(PITCH_BEND));
fluid_midi_event_set_channel(midi_event, fluidSynthModel.getChannel());
fluid_midi_event_set_pitch(midi_event, m.getPitchWheelValue());
fluid_synth_handle_midi_event(fluidSynthModel.getSynth().get(), midi_event);
delete_fluid_midi_event(midi_event);
} else if (m.isChannelPressure()) {
fluid_midi_event_t *midi_event(new_fluid_midi_event());
fluid_midi_event_set_type(midi_event, static_cast<int>(CHANNEL_PRESSURE));
fluid_midi_event_set_channel(midi_event, fluidSynthModel.getChannel());
fluid_midi_event_set_program(midi_event, m.getChannelPressureValue());
fluid_synth_handle_midi_event(fluidSynthModel.getSynth().get(), midi_event);
delete_fluid_midi_event(midi_event);
2019-06-24 06:17:28 +08:00
} else if (m.isAftertouch()) {
fluid_midi_event_t *midi_event(new_fluid_midi_event());
fluid_midi_event_set_type(midi_event, static_cast<int>(KEY_PRESSURE));
fluid_midi_event_set_channel(midi_event, fluidSynthModel.getChannel());
fluid_midi_event_set_key(midi_event, m.getNoteNumber());
fluid_midi_event_set_value(midi_event, m.getAfterTouchValue());
fluid_synth_handle_midi_event(fluidSynthModel.getSynth().get(), midi_event);
2019-06-24 06:17:28 +08:00
delete_fluid_midi_event(midi_event);
// } else if (m.isMetaEvent()) {
// fluid_midi_event_t *midi_event(new_fluid_midi_event());
// fluid_midi_event_set_type(midi_event, static_cast<int>(MIDI_SYSTEM_RESET));
// fluid_synth_handle_midi_event(fluidSynthModel.getSynth().get(), midi_event);
// delete_fluid_midi_event(midi_event);
} else if (m.isSysEx()) {
fluid_midi_event_t *midi_event(new_fluid_midi_event());
fluid_midi_event_set_type(midi_event, static_cast<int>(MIDI_SYSEX));
// I assume that the MidiMessage's sysex buffer would be freed anyway when MidiMessage is destroyed, so set dynamic=false
// to ensure that fluidsynth does not attempt to free the sysex buffer during delete_fluid_midi_event()
fluid_midi_event_set_sysex(midi_event, const_cast<juce::uint8*>(m.getSysExData()), m.getSysExDataSize(), static_cast<int>(false));
fluid_synth_handle_midi_event(fluidSynthModel.getSynth().get(), midi_event);
delete_fluid_midi_event(midi_event);
}
}
// int pval;
// 73: 64 attack
// 75: decay
// 79: sustain
// 72: 64 release
// fluid_synth_get_cc(fluidSynth, 0, 73, &pval);
// Logger::outputDebugString ( juce::String::formatted("hey: %d\n", pval) );
2018-02-27 08:25:20 +08:00
// and now get our synth to process these midi events and generate its output.
synth.renderNextBlock (buffer, midiMessages, 0, numSamples);
fluid_synth_process(fluidSynthModel.getSynth().get(), numSamples, 0, nullptr, buffer.getNumChannels(), buffer.getArrayOfWritePointers());
2018-02-27 08:17:12 +08:00
// (see juce_VST3_Wrapper.cpp for the assertion this would trip otherwise)
// we are !JucePlugin_ProducesMidiOutput, so clear remaining MIDI messages from our buffer
midiMessages.clear();
2018-02-27 08:17:12 +08:00
// In case we have more outputs than inputs, this code clears any output
// channels that didn't contain input data, (because these aren't
// guaranteed to be empty - they may contain garbage).
// This is here to avoid people getting screaming feedback
// when they first compile a plugin, but obviously you don't need to keep
// this code if your algorithm always overwrites all the output channels.
2018-02-27 08:25:20 +08:00
// for (int i = getTotalNumInputChannels(); i < getTotalNumOutputChannels(); ++i)
// buffer.clear (i, 0, numSamples);
2018-02-27 08:17:12 +08:00
}
//==============================================================================
2018-02-27 08:39:50 +08:00
bool JuicySFAudioProcessor::hasEditor() const
2018-02-27 08:17:12 +08:00
{
return true; // (change this to false if you choose to not supply an editor)
}
2018-02-27 08:39:50 +08:00
AudioProcessorEditor* JuicySFAudioProcessor::createEditor()
2018-02-27 08:17:12 +08:00
{
// grab a raw pointer to it for our own use
return /*pluginEditor = */new JuicySFAudioProcessorEditor (*this, valueTreeState);
2018-02-27 08:17:12 +08:00
}
//==============================================================================
2018-02-27 08:39:50 +08:00
void JuicySFAudioProcessor::getStateInformation (MemoryBlock& destData)
2018-02-27 08:17:12 +08:00
{
// You should use this method to store your parameters in the memory block.
// You could do that either as raw data, or use the XML or ValueTree classes
// as intermediaries to make it easy to save and load complex data.
2018-02-27 08:25:20 +08:00
// Create an outer XML element..
// XmlElement xml{"MYPLUGINSETTINGS"};
// sharedParams->setAttributesOnXml(xml);
auto state{valueTreeState.copyState()};
shared_ptr<XmlElement> xml{state.createXml()};
// sharedParams.setAttributesOnXml(xml);
// list<StateChangeSubscriber*>::iterator p;
// for(p = stateChangeSubscribers.begin(); p != stateChangeSubscribers.end(); p++) {
// (*p)->getStateInformation(xml);
// }
2018-02-27 08:25:20 +08:00
// Store the values of all our parameters, using their param ID as the XML attribute
// for (auto* param : getParameters())
// if (auto* p = dynamic_cast<AudioProcessorParameterWithID*> (param))
// xml->setAttribute (p->paramID, p->getValue());
2019-07-09 06:36:27 +08:00
2018-02-27 08:25:20 +08:00
// then use this helper function to stuff it into the binary blob and return it..
2019-07-09 06:36:27 +08:00
if (xml.get() != nullptr) {
copyXmlToBinary(*xml, destData);
}
2018-02-27 08:17:12 +08:00
}
2018-02-27 08:39:50 +08:00
void JuicySFAudioProcessor::setStateInformation (const void* data, int sizeInBytes)
2018-02-27 08:17:12 +08:00
{
// You should use this method to restore your parameters from this memory block,
// whose contents will have been created by the getStateInformation() call.
2018-02-27 08:25:20 +08:00
// This getXmlFromBinary() helper function retrieves our XML from the binary blob..
shared_ptr<XmlElement> xmlState{getXmlFromBinary(data, sizeInBytes)};
// unique_ptr<XmlElement> xmlState{getXmlFromBinary(data, sizeInBytes)};
DEBUG_PRINT(xmlState->createDocument("",false,false));
/*
<MYPLUGINSETTINGS soundFontPath="">
<PARAM id="attack" value="0.0"/>
<PARAM id="bank" value="0.0"/>
<PARAM id="decay" value="0.0"/>
<PARAM id="filterCutOff" value="0.0"/>
<PARAM id="filterResonance" value="0.0"/>
<PARAM id="preset" value="0.0"/>
<PARAM id="release" value="0.0"/>
<PARAM id="sustain" value="0.0"/>
<uiState width="722" height="300"/>
</MYPLUGINSETTINGS>
*/
if (xmlState.get() != nullptr) {
2018-02-27 08:25:20 +08:00
// make sure that it's actually our type of XML object..
// if (xmlState->hasTagName ("MYPLUGINSETTINGS")) {
if (xmlState->hasTagName(valueTreeState.state.getType())) {
2019-07-09 06:36:27 +08:00
// valueTreeState.replaceState(ValueTree::fromXml(*xmlState));
for (auto* param : getParameters())
if (auto* p = dynamic_cast<AudioProcessorParameterWithID*>(param))
p->setValue(static_cast<float>(xmlState->getDoubleAttribute(p->paramID, p->getValue())));
{
// Value value{valueTreeState.state.getPropertyAsValue("soundFontPath", nullptr)};
// value = xmlState->getStringAttribute("soundFontPath", value.getValue());
ValueTree tree{valueTreeState.state.getChildWithName("soundFont")};
XmlElement* xmlElement{xmlState->getChildByName("soundFont")};
if (xmlElement) {
Value value{tree.getPropertyAsValue("path", nullptr)};
value = xmlState->getStringAttribute("path", value.getValue());
}
2019-07-09 06:36:27 +08:00
// valueTreeState.getParameter("soundFontPath")->getValue()
// valueTreeState.getParameter("soundFontPath")->getValue();
// RangedAudioParameter *param {valueTreeState.getParameter("release")};
// jassert(dynamic_cast<AudioParameterInt*> (param) != nullptr);
// AudioParameterInt* castParam {dynamic_cast<AudioParameterInt*> (param)};
// *castParam = m.getControllerValue();
}
{
ValueTree tree{valueTreeState.state.getChildWithName("uiState")};
XmlElement* xmlElement{xmlState->getChildByName("uiState")};
if (xmlElement) {
2019-07-09 06:36:27 +08:00
{
Value value{tree.getPropertyAsValue("width", nullptr)};
value = xmlElement->getIntAttribute("width", value.getValue());
2019-07-09 06:36:27 +08:00
}
{
Value value{tree.getPropertyAsValue("height", nullptr)};
value = xmlElement->getIntAttribute("height", value.getValue());
2019-07-09 06:36:27 +08:00
}
}
// tree.getPropertyAsValue("width", nullptr)
// tree.
// valueTreeState.replaceState(ValueTree::fromXml(*xmlState))
// value = xmlState->getStringAttribute("soundFontPath", value.getValue());
}
// list<StateChangeSubscriber*>::iterator p;
// for(p = stateChangeSubscribers.begin(); p != stateChangeSubscribers.end(); p++) {
// (*p)->setStateInformation(xmlState);
// }
// ok, now pull out our last window size..
// sharedParams.loadAttributesFromXml(xmlState);
2018-02-27 08:25:20 +08:00
// Now reload our parameters..
// for (auto* param : getParameters())
// if (auto* p = dynamic_cast<AudioProcessorParameterWithID*> (param))
// p->setValue ((float) xmlState->getDoubleAttribute (p->paramID, p->getValue()));
//
// fluidSynthModel.onFileNameChanged(
// sharedParams->getSoundFontPath(),
// sharedParams->getBank(),
// sharedParams->getPreset());
//
// AudioProcessorEditor* editor{getActiveEditor()};
// if (editor != nullptr) {
// editor->setSize(
// sharedParams->getUiWidth(),
// sharedParams->getUiHeight());
//
// jassert(dynamic_cast<ExposesComponents*> (editor) != nullptr);
// ExposesComponents* exposesComponents = dynamic_cast<ExposesComponents*> (editor);
// exposesComponents->getFilePicker().setDisplayedFilePath(sharedParams->getSoundFontPath());
// }
// const String& currentSoundFontAbsPath = fluidSynthModel->getCurrentSoundFontAbsPath();
// if (currentSoundFontAbsPath.isNotEmpty()) {
// fileChooser.setCurrentFile(File(currentSoundFontAbsPath), true, dontSendNotification);
// }
2018-02-27 08:25:20 +08:00
}
}
}
//void JuicySFAudioProcessor::subscribeToStateChanges(StateChangeSubscriber* subscriber) {
// stateChangeSubscribers.push_back(subscriber);
//}
//
//void JuicySFAudioProcessor::unsubscribeFromStateChanges(StateChangeSubscriber* subscriber) {
// stateChangeSubscribers.remove(subscriber);
//}
2018-02-27 08:25:20 +08:00
// FluidSynth only supports float in its process function, so that's all we can support.
2018-02-27 08:39:50 +08:00
bool JuicySFAudioProcessor::supportsDoublePrecisionProcessing() const {
2018-02-27 08:25:20 +08:00
return false;
}
FluidSynthModel& JuicySFAudioProcessor::getFluidSynthModel() {
return fluidSynthModel;
}
//SharesParams& JuicySFAudioProcessor::getSharedParams() {
// return sharedParams;
//}
2018-02-27 08:17:12 +08:00
//==============================================================================
// This creates new instances of the plugin..
AudioProcessor* JUCE_CALLTYPE createPluginFilter()
{
2018-02-27 08:39:50 +08:00
return new JuicySFAudioProcessor();
2018-02-27 08:25:20 +08:00
}