Tracktion::graph processing in AudioProcessorGraph

I was trying to see around AudioProcessor and Tracktion::graph and I created this class to wrap a node of my AudioProcessorGraph:

static tracktion_engine::MidiMessageArray toMidiArray(juce::MidiBuffer& midiBuffer)
{
    tracktion_engine::MidiMessageArray tracktionMidiArray;
    for (const auto midiEvent : midiBuffer)
    {
        const auto& message = midiEvent.getMessage();
        const auto samplePosition = midiEvent.samplePosition;
        
        tracktionMidiArray.addMidiMessage(message, samplePosition, 0);
    }
    
    return tracktionMidiArray;
}

static MidiBuffer toMidiBuffer(tracktion_engine::MidiMessageArray& tracktionMidiArray)
{
    MidiBuffer midiBuffer;
    for (const auto& midiEvent : tracktionMidiArray)
        midiBuffer.addEvent(midiEvent, (int)midiEvent.getTimeStamp());
    return midiBuffer;
}


class ProcessorNode : public ayra::ConnectedNode
{
public:
    ProcessorNode(const juce::AudioProcessorGraph::Node::Ptr n,
                  const bool latencyCompensation) : ayra::ConnectedNode(latencyCompensation), node(n) {}
    
    tracktion::graph::NodeProperties getNodeProperties() override
    {
        auto const& processor = node->getProcessor();
        tracktion::graph::NodeProperties props;
        props.hasAudio = jmin(processor->getTotalNumInputChannels(), processor->getTotalNumOutputChannels()) > 0;
        props.hasMidi = processor->acceptsMidi() || processor->producesMidi();
        props.numberOfChannels = jmax(processor->getTotalNumInputChannels(), processor->getTotalNumOutputChannels());
        props.latencyNumSamples = processor->getLatencySamples();
        props.nodeID = (size_t)node->nodeID.uid;
        
        return props;
    }

    void prepareToPlay (const tracktion::graph::PlaybackInitialisationInfo& info) override
    {
        ayra::ConnectedNode::prepareToPlay(info);
        auto const& processor = node->getProcessor();
        processor->prepareToPlay(info.sampleRate, info.blockSize);
    }
    
    void process (tracktion::graph::Node::ProcessContext& pc) override
    {
        ayra::ConnectedNode::process(pc);
        auto const& processor = node->getProcessor();
        audioBuffer = tracktion::toAudioBuffer(pc.buffers.audio);
        midiBuffer  = toMidiBuffer(pc.buffers.midi);
        processor->processBlock(audioBuffer, midiBuffer);
        pc.buffers.audio = tracktion::toBufferView(audioBuffer);
        pc.buffers.midi  = toMidiArray(midiBuffer);
    }
    
    void addAudioConnection(std::shared_ptr<ProcessorNode> fromNode, const int fromCh, const int toCh)
    {
        tracktion::graph::ChannelConnection channelConnection { fromCh, toCh };
        ayra::ConnectedNode::addAudioConnection(fromNode, channelConnection);
    }
    
    void addMidiConnection(std::shared_ptr<ProcessorNode> fromNode)
    {
        ayra::ConnectedNode::addMidiConnection(fromNode);
    }
    
    juce::AudioProcessorGraph::Node::Ptr node;
    
private:

    juce::MidiBuffer midiBuffer;
    juce::AudioBuffer<float> audioBuffer;
};

What do you think about? (ayra::ConnectedNode is simply a ConnectedNode, but with a bool to chose if createLatencyNodes or not)

inline bool ConnectedNode::createLatencyNodes()
{
    if (!latencyCompensation) { return false; }

//[... and so on]

Could it be a good start point?

In my graph I have std::vector<std::unique_ptr> processorNodes;

and those func:

void initializeProcessorNodes(const juce::Array<AudioProcessorGraph::Node*> nodes, const bool compensateLatency)
{
    for (const auto& node : nodes)
        processorNodes.push_back(std::make_unique<ProcessorNode>(node, compensateLatency));
}

std::shared_ptr<ProcessorNode> getProcessorNode(const AudioProcessorGraph::NodeID forNodeID)
{
    for (const auto& processorNode : processorNodes)
        if (processorNode->node->nodeID == forNodeID) { return processorNode; }
}

template <typename RenderSequence>
void createRenderingOpsForProcessorNode(const AudioProcessorGraph::Node::Ptr node,
                                        const Connections& c, RenderSequence& sequence)
{
    const auto& processor = *node->getProcessor();
    const int numInputChannels = processor.getTotalNumInputChannels();
    const auto& connections = c.getConnections();
    
    if (processor.acceptsMidi())
        for (const auto& conn : connections)
            if (conn.destination.nodeID == node->nodeID)
                if (conn.source.isMIDI())
                    sequence.getProcessorNode(node->nodeID)->addMidiConnection(conn.source.nodeID);       
    
    for (int ch = 0; ch < numInputChannels; ch++)
        for (const auto& conn : connections)
            if (conn.destination.nodeID == node->nodeID)
                if (conn.destination.channelIndex == ch)
                    sequence.getProcessorNode(node->nodeID)->addAudioConnection(conn.source.nodeID,
                                                                                conn.source.channelIndex, ch);
}
That I call in this way:

        sequence.initializeProcessorNodes(orderedNodes);
        
        const Array<Node*> orderedNodesReversed = getOrederdNodesReversed();
        
        for (auto& node : orderedNodesReversed)
            createRenderingOpsForProcessorNode(*node, c, sequence);

It’s a bit difficult to tell from all that code but I think so?

Does it work?

1 Like

Actuality I don’t test it because I don’t have studied the players part of tracktion_graph, eg: i need to create before a root node? And if it’s so, whose the root in my graph? Then, I simplify have to create a player a sterted it?

The root is just the root Node of the graph. I.e. the one that all the other nodes feed in to.

Then yes, you can use one of the existing players, give it a Node to play and then process it from your audio device/render code etc.

1 Like

Ok thank you, but what if I have 2 or 3 output nodes (so nodes connected to different devices? I. E. An audio node connected to an output, a midi node connected to a midi output port and so on)

That’s fine, but you need each of those to be inputs to a single root Node.
The different output nodes should fill each of the output device buffers in their process block. This is how we do it in Tracktion Engine for multiple WaveOutputDevices etc.

1 Like

Ok thank you, so the rootnode could be a void mode, just put at the end of the chain, is it correct?

Yeah, I think in TE we use a SummingNode just because it has the right interface but we discard the final output of the node.

1 Like

Yeah, I’ll back when I’ll try :muscle: