{
"app-id": "org.jacktrip.JackTrip",
"runtime": "org.kde.Platform",
- "runtime-version": "6.4",
+ "runtime-version": "6.6",
"sdk": "org.kde.Sdk",
"base": "io.qt.qtwebengine.BaseApp",
- "base-version": "6.4",
+ "base-version": "6.6",
"command": "jacktrip",
"finish-args": [
"--share=ipc",
+- Version: "2.2.0"
+ Date: 2024-01-22
+ Description:
+ - (updated) Improved support for different input and output devices
+ - (updated) Various latency improvements for packet loss concealment
+ - (updated) VS Mode make it easier to dismiss the user feedback dialog
+ - (updated) VS Mode error message for disconnected audio interfaces
+ - (fixed) VS Mode broken deeplinks when studio doesn't match filters
+ - (fixed) VS Mode refused to connect to studios not 48khz
+ - (fixed) VS Mode showed wrong devices selected when connected
- Version: "2.1.0"
Date: 2023-11-06
Description:
-option('wair', type : 'boolean', value : 'false', description: 'WAIR')
+option('wair', type : 'boolean', value : false, description: 'WAIR')
option('rtaudio', type : 'feature', value : 'auto', description: 'Build with RtAudio Backend')
option('jack', type : 'feature', value : 'auto', description: 'Build with JACK Backend')
-option('weakjack', type : 'boolean', value : 'false', description: 'Weak link JACK library')
-option('nogui', type : 'boolean', value : 'false', description: 'Build without graphical user interface')
-option('novs', type : 'boolean', value : 'false', description: 'Build without Virtual Studio support')
-option('vsftux', type : 'boolean', value : 'false', description: 'Build with Virtual Studio first launch experience')
-option('noupdater', type : 'boolean', value : 'false', description: 'Build without auto-update support')
-option('nofeedback', type : 'boolean', value : 'false', description: 'Build without feedback detection')
+option('weakjack', type : 'boolean', value : false, description: 'Weak link JACK library')
+option('nogui', type : 'boolean', value : false, description: 'Build without graphical user interface')
+option('novs', type : 'boolean', value : false, description: 'Build without Virtual Studio support')
+option('vsftux', type : 'boolean', value : false, description: 'Build with Virtual Studio first launch experience')
+option('noupdater', type : 'boolean', value : false, description: 'Build without auto-update support')
+option('nofeedback', type : 'boolean', value : false, description: 'Build without feedback detection')
option('profile', type: 'combo', choices: ['default', 'development'], value: 'default', description: 'Choose build profile / Sets desktop id accordingly')
option('qtversion', type : 'combo', choices: ['', '5', '6'], description: 'Choose to build with either Qt5 or Qt6')
option('buildinfo', type : 'string', value : '', yield : true, description: 'Additional info used to describe the build')
\ No newline at end of file
{
"app_name": "JackTrip",
"releases": [
+ {
+ "version": "2.1.0",
+ "changelog": "Full changelog at https://github.com/jacktrip/jacktrip/releases/tag/v2.1.0",
+ "download": {
+ "date": "2023-11-06T00:00:00Z",
+ "url": "https://files.jacktrip.org/app-builds/JackTrip-v2.1.0-macOS-x64-signed-installer.pkg",
+ "downloadSize": "177373751",
+ "sha256": "e7ffb56b99f25de7c71774e6e6484c1e400ebe2fa05b9618695030e83a5de9a2"
+ }
+ },
+ {
+ "version": "2.1.0-beta1",
+ "changelog": "Full changelog at https://github.com/jacktrip/jacktrip/releases/tag/v2.1.0-beta1",
+ "download": {
+ "date": "2023-10-31T00:00:00Z",
+ "url": "https://files.jacktrip.org/app-builds/JackTrip-v2.1.0-beta1-macOS-x64-signed-installer.pkg",
+ "downloadSize": "177365464",
+ "sha256": "46ab83844671028a27b59b09a4e7eec1398b772f85820c1213f330d35c2ceba9"
+ }
+ },
{
"version": "2.0.2",
"changelog": "Full changelog at https://github.com/jacktrip/jacktrip/releases/tag/v2.0.2",
{
"app_name": "JackTrip",
"releases": [
+ {
+ "version": "2.1.0",
+ "changelog": "Full changelog at https://github.com/jacktrip/jacktrip/releases/tag/v2.1.0",
+ "download": {
+ "date": "2023-11-06T00:00:00Z",
+ "url": "https://files.jacktrip.org/app-builds/JackTrip-v2.1.0-Windows-x64-signed-installer.msi",
+ "downloadSize": "108511232",
+ "sha256": "cd5b4735421a484bf83635f07653755e56b095c785f021eedaa4ca2d4132dd7f"
+ }
+ },
+ {
+ "version": "2.1.0-beta1",
+ "changelog": "Full changelog at https://github.com/jacktrip/jacktrip/releases/tag/v2.1.0-beta1",
+ "download": {
+ "date": "2023-10-31T00:00:00Z",
+ "url": "https://files.jacktrip.org/app-builds/JackTrip-v2.1.0-beta1-Windows-x64-signed-installer.msi",
+ "downloadSize": "108511232",
+ "sha256": "d52784050fdd9876d44bc3fddd1b70b93065ab4c6b67076dcbfe42a098d73447"
+ }
+ },
{
"version": "2.0.2",
"changelog": "Full changelog at https://github.com/jacktrip/jacktrip/releases/tag/v2.0.2",
{
"app_name": "JackTrip",
"releases": [
+ {
+ "version": "2.1.0",
+ "changelog": "Full changelog at https://github.com/jacktrip/jacktrip/releases/tag/v2.1.0",
+ "download": {
+ "date": "2023-11-06T00:00:00Z",
+ "url": "https://files.jacktrip.org/app-builds/JackTrip-v2.1.0-Linux-x64-binary.zip",
+ "downloadSize": "1239221",
+ "sha256": "1f990a9d4e7874d5129f287eee3ace4881130c23531be9ca816a9cc01df17379"
+ }
+ },
{
"version": "2.0.2",
"changelog": "Full changelog at https://github.com/jacktrip/jacktrip/releases/tag/v2.0.2",
{
"app_name": "JackTrip",
"releases": [
+ {
+ "version": "2.1.0",
+ "changelog": "Full changelog at https://github.com/jacktrip/jacktrip/releases/tag/v2.1.0",
+ "download": {
+ "date": "2023-11-06T00:00:00Z",
+ "url": "https://files.jacktrip.org/app-builds/JackTrip-v2.1.0-macOS-x64-signed-installer.pkg",
+ "downloadSize": "177373751",
+ "sha256": "e7ffb56b99f25de7c71774e6e6484c1e400ebe2fa05b9618695030e83a5de9a2"
+ }
+ },
{
"version": "2.0.2",
"changelog": "Full changelog at https://github.com/jacktrip/jacktrip/releases/tag/v2.0.2",
{
"app_name": "JackTrip",
"releases": [
+ {
+ "version": "2.1.0",
+ "changelog": "Full changelog at https://github.com/jacktrip/jacktrip/releases/tag/v2.1.0",
+ "download": {
+ "date": "2023-11-06T00:00:00Z",
+ "url": "https://files.jacktrip.org/app-builds/JackTrip-v2.1.0-Windows-x64-signed-installer.msi",
+ "downloadSize": "108511232",
+ "sha256": "cd5b4735421a484bf83635f07653755e56b095c785f021eedaa4ca2d4132dd7f"
+ }
+ },
{
"version": "2.0.2",
"changelog": "Full changelog at https://github.com/jacktrip/jacktrip/releases/tag/v2.0.2",
, mBitResolutionMode(AudioBitResolution)
, mSampleRate(gDefaultSampleRate)
, mBufferSizeInSamples(gDefaultBufferSizeInSamples)
+ , mMonitorQueuePtr(NULL)
, mAudioInputPacket(NULL)
, mAudioOutputPacket(NULL)
, mLoopBack(false)
, mProcessWithNetwork(processWithNetwork)
+ , mMonitorStarted(false)
, mJackTrip(jacktrip)
, mInputMixMode(InputMixMode)
, mProcessingAudio(false)
{
-#ifndef WAIR
- // cc
- // Initialize and assign memory for ProcessPlugins Buffers
- int monitorChans = int(std::min<size_t>(mInputChans.size(), mOutputChans.size()));
- mInProcessBuffer.resize(mInputChans.size());
- mOutProcessBuffer.resize(mOutputChans.size());
- mMonProcessBuffer.resize(monitorChans);
- // Set pointer to NULL
- for (int i = 0; i < mInProcessBuffer.size(); i++) {
- mInProcessBuffer[i] = NULL;
- }
- for (int i = 0; i < mOutProcessBuffer.size(); i++) {
- mOutProcessBuffer[i] = NULL;
- }
- for (int i = 0; i < monitorChans; i++) {
- mMonProcessBuffer[i] = NULL;
- }
-#else // WAIR
- int iCnt =
- (mInputChans.size() > mNumNetRevChans) ? mInputChans.size() : mNumNetRevChans;
- int oCnt =
- (mOutputChans.size() > mNumNetRevChans) ? mOutputChans.size() : mNumNetRevChans;
- int aCnt = (mNumNetRevChans) ? mInputChans.size() : 0;
- int mCnt = std::min<int>(iCnt, oCnt);
- for (int i = 0; i < iCnt; i++) {
- mInProcessBuffer[i] = NULL;
- }
- for (int i = 0; i < oCnt; i++) {
- mOutProcessBuffer[i] = NULL;
- }
- for (int i = 0; i < mCnt; i++) {
- mMonProcessBuffer[i] = NULL;
- }
- for (int i = 0; i < aCnt; i++) {
- mAPInBuffer[i] = NULL;
- }
-#endif // endwhere
-
- mInBufCopy.resize(mInputChans.size());
- for (int i = 0; i < mInputChans.size(); i++) {
- mInBufCopy[i] =
- new sample_t[MAX_AUDIO_BUFFER_SIZE]; // required for processing audio input
- }
-
- // Not used in this class but may be needed by subclasses
- mNumInChans = mInputChans.size();
- mNumOutChans = mOutputChans.size();
}
//*******************************************************************************
{
delete[] mAudioInputPacket;
delete[] mAudioOutputPacket;
-#ifndef WAIR // NOT WAIR:
for (int i = 0; i < mInProcessBuffer.size(); i++) {
delete[] mInProcessBuffer[i];
}
-
for (int i = 0; i < mOutProcessBuffer.size(); i++) {
delete[] mOutProcessBuffer[i];
}
- for (int i = 0; i < mMonProcessBuffer.size(); i++) {
- delete[] mMonProcessBuffer[i];
- }
-#else // WAIR
- for (int i = 0; i < mInProcessBuffer.size(); i++) {
- delete[] mInProcessBuffer[i];
- }
- for (int i = 0; i < mOutProcessBuffer.size(); i++) {
- delete[] mOutProcessBuffer[i];
- }
- for (int i = 0; i < mMonProcessBuffer.size(); i++) {
- delete[] mMonProcessBuffer[i];
- }
+ delete mMonitorQueuePtr;
+#ifdef WAIR // NOT WAIR:
for (int i = 0; i < mAPInBuffer.size(); i++) {
delete[] mAPInBuffer[i];
}
i->disconnect();
delete i;
}
- for (int i = 0; i < mInBufCopy.size(); i++) {
- delete[] mInBufCopy[i];
- }
}
//*******************************************************************************
void AudioInterface::setup(bool /*verbose*/)
{
- int nChansIn = mInputChans.size();
- int nChansOut = mOutputChans.size();
- int nChansMon = std::min<int>(
- nChansIn, nChansOut); // Note: Should be 2 when mixing stereo-to-mono
- inputMixModeT inputMixMode = mInputMixMode;
- if (inputMixMode == MIXTOMONO) {
- nChansIn = 1;
- }
- if (inputMixMode == MONO) {
- nChansMon = nChansOut;
- }
// Allocate buffer memory to read and write
mSizeInBytesPerChannel = getSizeInBytesPerChannel();
-
- int size_audio_input = int(mSizeInBytesPerChannel * nChansIn);
- int size_audio_output = int(mSizeInBytesPerChannel * nChansOut);
+ int nframes = getBufferSizeInSamples();
+ int size_audio_input = int(mSizeInBytesPerChannel * mInputChans.size());
+ int size_audio_output = int(mSizeInBytesPerChannel * mOutputChans.size());
#ifdef WAIR // WAIR
if (mNumNetRevChans) // else don't change sizes
{
size_audio_output = mSizeInBytesPerChannel * mNumNetRevChans;
}
#endif // endwhere
- mAudioInputPacket = new int8_t[size_audio_input];
- mAudioOutputPacket = new int8_t[size_audio_output];
+ const size_t audioInputPacketSize = std::max<size_t>(
+ size_audio_input, mInputChans.size() * sizeof(sample_t) * nframes);
+ const size_t audioOutputPacketSize = std::max<size_t>(
+ size_audio_output, mOutputChans.size() * sizeof(sample_t) * nframes);
+ mAudioInputPacket = new int8_t[audioInputPacketSize];
+ mAudioOutputPacket = new int8_t[audioOutputPacketSize];
// Initialize and assign memory for ProcessPlugins Buffers
#ifdef WAIR // WAIR
if (mNumNetRevChans) {
mInProcessBuffer.resize(mNumNetRevChans);
mOutProcessBuffer.resize(mNumNetRevChans);
- mMonProcessBuffer.resize(mNumNetRevChans);
- mAPInBuffer.resize(nChansIn);
+ mAPInBuffer.resize(mInputChans.size());
mNetInBuffer.resize(mNumNetRevChans);
+ for (int i = 0; i < mAPInBuffer.size(); i++) {
+ mAPInBuffer[i] = new sample_t[nframes];
+ // set memory to 0
+ std::memset(mAPInBuffer[i], 0, sizeof(sample_t) * nframes);
+ }
+ for (int i = 0; i < mNumNetRevChans; i++) {
+ mNetInBuffer[i] = new sample_t[nframes];
+ // set memory to 0
+ std::memset(mNetInBuffer[i], 0, sizeof(sample_t) * nframes);
+ }
} else // don't change sizes
#endif // endwhere
{
- mInProcessBuffer.resize(nChansIn);
- mOutProcessBuffer.resize(nChansOut);
- mMonProcessBuffer.resize(nChansMon);
+ mInProcessBuffer.resize(mInputChans.size());
+ mOutProcessBuffer.resize(mOutputChans.size());
+ mMonitorQueuePtr = new WaitFreeFrameBuffer<64>(audioInputPacketSize);
}
- int nframes = getBufferSizeInSamples();
-
-#ifndef WAIR // NOT WAIR:
- for (int i = 0; i < nChansIn; i++) {
- mInProcessBuffer[i] = new sample_t[nframes];
- // set memory to 0
- std::memset(mInProcessBuffer[i], 0, sizeof(sample_t) * nframes);
- }
- for (int i = 0; i < nChansOut; i++) {
- mOutProcessBuffer[i] = new sample_t[nframes];
- // set memory to 0
- std::memset(mOutProcessBuffer[i], 0, sizeof(sample_t) * nframes);
- }
- for (int i = 0; i < nChansMon; i++) {
- mMonProcessBuffer[i] = new sample_t[nframes];
- // set memory to 0
- std::memset(mMonProcessBuffer[i], 0, sizeof(sample_t) * nframes);
- }
-#else // WAIR
- for (int i = 0; i < ((mNumNetRevChans) ? mNumNetRevChans : nChansIn); i++) {
+ for (int i = 0; i < mInputChans.size(); i++) {
mInProcessBuffer[i] = new sample_t[nframes];
// set memory to 0
std::memset(mInProcessBuffer[i], 0, sizeof(sample_t) * nframes);
}
- for (int i = 0; i < ((mNumNetRevChans) ? mNumNetRevChans : nChansOut); i++) {
+ for (int i = 0; i < mOutputChans.size(); i++) {
mOutProcessBuffer[i] = new sample_t[nframes];
// set memory to 0
std::memset(mOutProcessBuffer[i], 0, sizeof(sample_t) * nframes);
}
- for (int i = 0; i < ((mNumNetRevChans) ? mNumNetRevChans : nChansMon); i++) {
- mMonProcessBuffer[i] = new sample_t[nframes];
- // set memory to 0
- std::memset(mMonitorProcess[i], 0, sizeof(sample_t) * nframes);
- }
- for (int i = 0; i < ((mNumNetRevChans) ? nChansIn : 0); i++) {
- mAPInBuffer[i] = new sample_t[nframes];
- // set memory to 0
- std::memset(mAPInBuffer[i], 0, sizeof(sample_t) * nframes);
- }
- for (int i = 0; i < mNumNetRevChans; i++) {
- mNetInBuffer[i] = new sample_t[nframes];
- // set memory to 0
- std::memset(mNetInBuffer[i], 0, sizeof(sample_t) * nframes);
- }
-#endif // endwhere
}
//*******************************************************************************
QVarLengthArray<sample_t*>& out_buffer,
unsigned int n_frames)
{
- int nChansIn = mInputChans.size();
- int nChansOut = mOutputChans.size();
- int nChansMon = std::min<int>(
- nChansIn, nChansOut); // Note: Should be 2 when mixing stereo-to-mono
- inputMixModeT inputMixMode = mInputMixMode;
- if (inputMixMode == MIXTOMONO) {
- nChansIn = 1;
+ this->audioInputCallback(in_buffer, n_frames);
+ this->audioOutputCallback(out_buffer, n_frames);
+}
+
+//*******************************************************************************
+void AudioInterface::audioInputCallback(QVarLengthArray<sample_t*>& in_buffer,
+ unsigned int n_frames)
+{
+ // in_buffer is "in" from local audio hardware
+ if (getBufferSizeInSamples() < n_frames) { // allocated in constructor above
+ std::cerr << "*** AudioInterface::audioInputCallback n_frames = " << n_frames
+ << " larger than expected = " << getBufferSizeInSamples() << "\n";
+ exit(1);
}
- if (inputMixMode == MONO) {
- nChansMon = nChansOut;
+
+#ifndef WAIR
+ if (mMonitorQueuePtr != nullptr && mProcessPluginsToMonitor.size() > 0) {
+ // copy audio input to monitor queue
+ for (int i = 0; i < mInputChans.size(); i++) {
+ int8_t* sample_ptr = mAudioInputPacket + (i * sizeof(sample_t) * n_frames);
+ std::memcpy(sample_ptr, in_buffer[i], sizeof(sample_t) * n_frames);
+ }
+ mMonitorQueuePtr->push(mAudioInputPacket);
}
- // Allocate the Process Callback
- //-------------------------------------------------------------------
+#endif // not WAIR
+
+ // process incoming signal from audio interface using process plugins
+ for (auto* p : qAsConst(mProcessPluginsToNetwork)) {
+ if (p->getInited()) {
+ p->compute(n_frames, in_buffer.data(), in_buffer.data());
+ }
+ }
+
+ // add audio testing impulse, if enabled
+ if (mAudioTesterP && mAudioTesterP->getEnabled()) {
+ mAudioTesterP->writeImpulse(
+ in_buffer,
+ n_frames); // writes last channel of in_buffer with test impulse
+ }
+
+ // send the final signal to the network
+ if (mProcessWithNetwork) {
+ computeProcessToNetwork(in_buffer, n_frames);
+ }
+}
+
+//*******************************************************************************
+void AudioInterface::audioOutputCallback(QVarLengthArray<sample_t*>& out_buffer,
+ unsigned int n_frames)
+{
+ // in_buffer is "in" from local audio hardware
+ if (getBufferSizeInSamples() < n_frames) { // allocated in constructor above
+ std::cerr << "*** AudioInterface::audioOutputCallback n_frames = " << n_frames
+ << " larger than expected = " << getBufferSizeInSamples() << "\n";
+ exit(1);
+ }
+
// 1) First, process incoming packets
- // ----------------------------------
#ifdef WAIR // WAIR
// qDebug() << "--" << mProcessPluginsFromNetwork.size();
#define COMBDSP 1 // client
#define APDSP 0 // client
#define DCBDSP 0 // server
- for (int i = 0; i < mNumNetRevChans; i++) {
+ for (int i = 0; i < mNetInBuffer.size(); i++) {
std::memset(mNetInBuffer[i], 0, sizeof(sample_t) * n_frames);
}
#endif // endwhere
// ==== RECEIVE AUDIO CHANNELS FROM NETWORK ====
+ // out_buffer is from the network and goes "out" to local audio hardware
if (mProcessWithNetwork) {
computeProcessFromNetwork(out_buffer, n_frames);
}
// =============================================
- // out_buffer is from the network and goes "out" to local audio
- // hardware via JACK:
-
// mAudioTesterP will be nullptr for hub server's JackTripWorker instances
if (mAudioTesterP && mAudioTesterP->getEnabled()) {
mAudioTesterP->lookForReturnPulse(out_buffer, n_frames);
}
-#ifdef WAIR // WAIR
- // nib16 result now in mNetInBuffer
-#endif // endwhere
-
- // 2) Dynamically allocate ProcessPlugin processes
+ // apply process plugins to the signal
// -----------------------------------------------
// The processing will be done in order of allocation
/// \todo Implement for more than one process plugin, now it just works propertely
p->compute(n_frames, out_buffer.data(), out_buffer.data());
}
}
-#else // WAIR:
+
+ if (mMonitorQueuePtr != nullptr && mProcessPluginsToMonitor.size() > 0) {
+ // mix in the monitor signal
+ // note that using memory_order_acquire ensures all data written to the buffers
+ // will be also available be available to this thread before read
+ std::memset(mAudioOutputPacket, 0,
+ sizeof(sample_t) * n_frames * getNumInputChannels());
+ if (mMonitorStarted) {
+ mMonitorQueuePtr->pop(mAudioOutputPacket);
+ } else {
+ // drain the monitor queue to minimize latency
+ while (mMonitorQueuePtr->pop(mAudioOutputPacket)) {}
+ mMonitorStarted = true;
+ }
+ for (int i = 0; i < getNumOutputChannels(); i++) {
+ // if using mix-to-mono, in_buffer[0] should already contain the mixed
+ // audio, so copy it to the monitor buffer. See RtAudioInterface.cpp
+
+ // likewise if using mono, we simply copy the input to every monitor
+ // channel
+ int8_t* sample_ptr = mAudioOutputPacket;
+ if (i > 0 && getNumInputChannels() > i
+ && mInputMixMode == AudioInterface::STEREO) {
+ // otherwise, copy each channel individually
+ sample_ptr += (i * sizeof(sample_t) * n_frames);
+ }
+ std::memcpy(mOutProcessBuffer[i], sample_ptr, sizeof(sample_t) * n_frames);
+ }
+ for (int i = 0; i < mProcessPluginsToMonitor.size(); i++) {
+ ProcessPlugin* p = mProcessPluginsToMonitor[i];
+ if (p->getInited()) {
+ // note: for monitor plugins, the output is out_buffer (to the speakers)
+ p->compute(n_frames, mOutProcessBuffer.data(), out_buffer.data());
+ }
+ }
+ }
+
+#else // WAIR:
+ // nib16 result now in mNetInBuffer
+ int nChansIn = mInputChans.size();
+ int nChansOut = mOutputChans.size();
for (int i = 0; i < ((mNumNetRevChans) ? mNumNetRevChans : nChansOut); i++) {
std::memset(mOutProcessBuffer[i], 0, sizeof(sample_t) * n_frames);
}
mOutProcessBuffer.data());
}
// compute cob16
-#endif // endwhere
- // 3) Send packets to network:
- // mAudioTesterP will be nullptr for hub server's JackTripWorker instances:
- bool audioTesting = (mAudioTesterP && mAudioTesterP->getEnabled());
- int nop = mProcessPluginsToNetwork.size(); // number of OUTGOING processing modules
- if (nop > 0 || audioTesting
- || mProcessPluginsToMonitor.size()
- > 0) { // cannot modify in_buffer, so make a copy
- // in_buffer is "in" from local audio hardware via JACK
- if (mInBufCopy.size() < nChansIn) { // created in constructor above
- std::cerr << "*** AudioInterface.cpp: Number of Input Channels changed - "
- "insufficient room reserved\n";
- exit(1);
- }
- if (MAX_AUDIO_BUFFER_SIZE < n_frames) { // allocated in constructor above
- std::cerr << "*** AudioInterface.cpp: n_frames = " << n_frames
- << " larger than expected max = " << MAX_AUDIO_BUFFER_SIZE << "\n";
- exit(1);
- }
- for (int i = 0; i < nChansIn; i++) {
- std::memcpy(mInBufCopy[i], in_buffer[i], sizeof(sample_t) * n_frames);
- }
- for (int i = 0; i < nop; i++) {
- // process all outgoing channels with ProcessPlugins:
- ProcessPlugin* p = mProcessPluginsToNetwork[i];
- if (p->getInited()) {
- p->compute(n_frames, mInBufCopy.data(), mInBufCopy.data());
- }
- }
-
- for (int i = 0; i < nChansMon; i++) {
- if ((mInputChans.size() == 2 && mInputMixMode == AudioInterface::MIXTOMONO)
- || (mInputMixMode == AudioInterface::MONO)) {
- // if using mix-to-mono, in_buffer[0] should already contain the mixed
- // audio, so copy it to the monitor buffer. See RtAudioInterface.cpp
-
- // likewise if using mono, we simply copy the input to every monitor
- // channel
- std::memcpy(mMonProcessBuffer[i], in_buffer[0],
- sizeof(sample_t) * n_frames);
- } else {
- // otherwise, copy each channel individually
- std::memcpy(mMonProcessBuffer[i], in_buffer[i],
- sizeof(sample_t) * n_frames);
- }
- }
- for (int i = 0; i < mProcessPluginsToMonitor.size(); i++) {
- ProcessPlugin* p = mProcessPluginsToMonitor[i];
- if (p->getInited()) {
- // note: for monitor plugins, the output is out_buffer (to the speakers)
- p->compute(n_frames, mMonProcessBuffer.data(), out_buffer.data());
- }
- }
-
- if (audioTesting) {
- mAudioTesterP->writeImpulse(
- mInBufCopy,
- n_frames); // writes last channel of mInBufCopy with test impulse
- }
- if (mProcessWithNetwork) {
- computeProcessToNetwork(mInBufCopy, n_frames);
- }
- } else { // copy saved if no plugins and no audio testing in progress:
- if (mProcessWithNetwork) {
- computeProcessToNetwork(
- in_buffer, n_frames); // send processed input audio to network - OUTGOING
- }
- }
-
-#ifdef WAIR // WAIR
// aib2 + cob16 to nob16
-#endif // endwhere
-#ifdef WAIR // WAIR
if (mNumNetRevChans) // else not wair, so skip all this
{
#define AP
mix_sample[j] += tmp_sample[j];
}
} // nib6 to aob2
-#else // AP
+#else // AP
// output through all-pass cascade
// AP2 is 2 channel, mixes inputs to mono, then splits to two parallel AP chains
#endif // AP
}
#endif // endwhere
-
- ///************PROTOTYPE FOR CELT**************************
- ///********************************************************
- /*
- CELTMode* mode;
- int* error;
- mode = celt_mode_create(48000, 2, 64, error);
- */
- // celt_mode_create(48000, 2, 64, NULL);
- // unsigned char* compressed;
- // CELTEncoder* celtEncoder;
- // celt_encode_float(celtEncoder, mInBuffer, NULL, compressed, );
-
- ///********************************************************
- ///********************************************************
}
//*******************************************************************************
void AudioInterface::broadcastCallback(QVarLengthArray<sample_t*>& mon_buffer,
unsigned int n_frames)
{
- int nChansOut = mOutputChans.size();
-
/// \todo cast *mInBuffer[i] to the bit resolution
// Output Process (from NETWORK to JACK)
// ----------------------------------------------------------------
// Read Audio buffer from RingBuffer (read from incoming packets)
mJackTrip->receiveBroadcastPacket(mAudioOutputPacket);
// Extract separate channels to send to Jack
- for (int i = 0; i < nChansOut; i++) {
+ for (int i = 0; i < mOutputChans.size(); i++) {
sample_t* tmp_sample = mon_buffer[i]; // sample buffer for channel i
for (unsigned int j = 0; j < n_frames; j++) {
// Change the bit resolution on each sample
fromBitToSampleConversion(
// use interleaved channel layout
//&mOutputPacket[(i*mSizeInBytesPerChannel) + (j*mBitResolutionMode)],
- &mAudioOutputPacket[(j * mBitResolutionMode * nChansOut)
+ &mAudioOutputPacket[(j * mBitResolutionMode * mOutputChans.size())
+ (i * mBitResolutionMode)],
&tmp_sample[j], mBitResolutionMode);
}
void AudioInterface::computeProcessFromNetwork(QVarLengthArray<sample_t*>& out_buffer,
unsigned int n_frames)
{
- int nChansOut = mOutputChans.size();
-
/// \todo cast *mInBuffer[i] to the bit resolution
// Output Process (from NETWORK to JACK)
// ----------------------------------------------------------------
fromBitToSampleConversion(
// use interleaved channel layout
//&mOutputPacket[(i*mSizeInBytesPerChannel) + (j*mBitResolutionMode)],
- &mOutputPacket[(j * mBitResolutionMode * nChansOut)
+ &mOutputPacket[(j * mBitResolutionMode * mOutputChans.size())
+ (i * mBitResolutionMode)],
&tmp_sample[j], mBitResolutionMode);
}
#endif // endwhere
// Extract separate channels to send to Jack
- for (int i = 0; i < nChansOut; i++) {
+ for (int i = 0; i < mOutputChans.size(); i++) {
//--------
// This should be faster for 32 bits
// std::memcpy(mOutBuffer[i], &mOutputPacket[i*mSizeInBytesPerChannel],
fromBitToSampleConversion(
// use interleaved channel layout
//&mOutputPacket[(i*mSizeInBytesPerChannel) + (j*mBitResolutionMode)],
- &mAudioOutputPacket[(j * mBitResolutionMode * nChansOut)
+ &mAudioOutputPacket[(j * mBitResolutionMode * mOutputChans.size())
+ (i * mBitResolutionMode)],
&tmp_sample[j], mBitResolutionMode);
}
void AudioInterface::computeProcessToNetwork(QVarLengthArray<sample_t*>& in_buffer,
unsigned int n_frames)
{
- int nChansIn = mInputChans.size();
- inputMixModeT inputMixMode = mInputMixMode;
- if (inputMixMode == MIXTOMONO) {
- nChansIn = 1;
- }
+ const int nChansIn = (MIXTOMONO == mInputMixMode) ? 1 : mInputChans.size();
// Input Process (from JACK to NETWORK)
// ----------------------------------------------------------------
// Concatenate all the channels from jack to form packet
return;
}
- int nChansIn = mInputChans.size();
- inputMixModeT inputMixMode = mInputMixMode;
- if (inputMixMode == MIXTOMONO) {
- nChansIn = 1;
- }
-
- int nTestChans = (mAudioTesterP && mAudioTesterP->getEnabled()) ? 1 : 0;
- int nPluginChans = nChansIn - nTestChans;
+ const int nChansIn = (MIXTOMONO == mInputMixMode) ? 1 : mInputChans.size();
+ int nTestChans = (mAudioTesterP && mAudioTesterP->getEnabled()) ? 1 : 0;
+ int nPluginChans = nChansIn - nTestChans;
assert(nTestChans == 0 || (mAudioTesterP->getSendChannel() == nChansIn - 1));
if (plugin->getNumInputs() < nPluginChans) {
std::cerr
return;
}
- int nChansOut = mOutputChans.size();
-
int nTestChans = (mAudioTesterP && mAudioTesterP->getEnabled()) ? 1 : 0;
- int nPluginChans = nChansOut - nTestChans;
- assert(nTestChans == 0 || (mAudioTesterP->getSendChannel() == nChansOut - 1));
+ int nPluginChans = mOutputChans.size() - nTestChans;
+ assert(nTestChans == 0
+ || (mAudioTesterP->getSendChannel() == mOutputChans.size() - 1));
if (plugin->getNumOutputs() > nPluginChans) {
std::cerr
<< "*** AudioInterface.cpp: appendProcessPluginFromNetwork: ProcessPlugin "
if (!plugin) {
return;
}
- int nChansIn = mInputChans.size();
- int nChansOut = mOutputChans.size();
- int nChansMon = std::min<int>(
- nChansIn, nChansOut); // Note: Should be 2 when mixing stereo-to-mono
- inputMixModeT inputMixMode = mInputMixMode;
- if (inputMixMode == MIXTOMONO) {
- nChansIn = 1;
- }
- if (inputMixMode == MONO) {
- nChansMon = nChansOut;
- }
+
+ const int nChansMon = getNumMonChannels();
+
if (plugin->getNumInputs() > nChansMon) {
std::cerr
<< "*** AudioInterface.cpp: appendProcessPluginToMonitor: ProcessPlugin "
void AudioInterface::initPlugins(bool verbose)
{
- int nChansIn = mInputChans.size();
- int nChansOut = mOutputChans.size();
- int nChansMon = std::min<int>(
- nChansIn, nChansOut); // Note: Should be 2 when mixing stereo-to-mono
- inputMixModeT inputMixMode = mInputMixMode;
- if (inputMixMode == MIXTOMONO) {
- nChansIn = 1;
- }
- if (inputMixMode == MONO) {
- nChansMon = nChansOut;
- }
+ const int nChansIn = (MIXTOMONO == mInputMixMode) ? 1 : mInputChans.size();
+ const int nChansOut = mOutputChans.size();
+ const int nChansMon = getNumMonChannels();
int nPlugins = mProcessPluginsFromNetwork.size() + mProcessPluginsToNetwork.size()
+ mProcessPluginsToMonitor.size();
if (nPlugins > 0) {
#include "AudioTester.h"
#include "ProcessPlugin.h"
+#include "WaitFreeFrameBuffer.h"
#include "jacktrip_types.h"
-//#include "jacktrip_globals.h"
// Forward declarations
class JackTrip;
#endif // endwhere
AudioInterface::audioBitResolutionT AudioBitResolution = AudioInterface::BIT16,
bool processWithNetwork = false, JackTrip* jacktrip = nullptr);
+
/// \brief The class destructor
virtual ~AudioInterface();
* method to ensure correct inizialization.
*/
virtual void setup(bool verbose = true);
+
/// \brief Tell the audio server that we are ready to roll. The
/// process-callback will start running. This runs on its own thread.
/// \return 0 on success, otherwise a non-zero error code
virtual int startProcess() = 0;
+
/// \brief Stops the process-callback thread
/// \return 0 on success, otherwise a non-zero error code
virtual int stopProcess() = 0;
- /** \brief Process callback. Subclass should call this callback after obtaining the
- in_buffer and out_buffer pointers.
- * \param in_buffer Array of input audio samplers for each channel. The user
- * is responsible to check that each channel has n_frames samplers
- * \param in_buffer Array of output audio samplers for each channel. The user
- * is responsible to check that each channel has n_frames samplers
- */
+
+ /** \brief Broadcast callback. Subclass should call this callback after
+ * obtaining the mon_buffer pointer.
+ *
+ * \param in_buffer Array of input audio samplers for each channel. The user
+ * is responsible to check that each channel has n_frames samplers
+ * \param in_buffer Array of output audio samplers for each channel. The user
+ * is responsible to check that each channel has n_frames samplers
+ */
virtual void broadcastCallback(QVarLengthArray<sample_t*>& mon_buffer,
unsigned int n_frames);
+
+ /** \brief Audio interface callback. Subclass should call this callback after
+ * obtaining the in_buffer and out_buffer pointers (for duplux mode).
+ *
+ * \param in_buffer Array of input audio samplers for each channel. The user
+ * is responsible to check that each channel has n_frames samplers
+ * \param out_buffer Array of output audio samplers for each channel. The user
+ * is responsible to check that each channel has n_frames samplers
+ */
virtual void callback(QVarLengthArray<sample_t*>& in_buffer,
QVarLengthArray<sample_t*>& out_buffer, unsigned int n_frames);
+
+ /** \brief Audio input process callback. Subclass should call this callback
+ * after obtaining the in_buffer pointer (for input only).
+ *
+ * \param in_buffer Array of input audio samplers for each channel. The user
+ * is responsible to check that each channel has n_frames samplers
+ */
+ virtual void audioInputCallback(QVarLengthArray<sample_t*>& in_buffer,
+ unsigned int n_frames);
+
+ /** \brief Audio output process callback. Subclass should call this callback
+ * after obtaining the out_buffer pointer (for output only).
+ *
+ * \param out_buffer Array of output audio samplers for each channel. The user
+ * is responsible to check that each channel has n_frames samplers
+ */
+ virtual void audioOutputCallback(QVarLengthArray<sample_t*>& out_buffer,
+ unsigned int n_frames);
+
/** \brief appendProcessPluginToNetwork(): Append a ProcessPlugin for outgoing audio.
* The processing order equals order they were appended.
* This processing is in the JackTrip client before sending to the network.
* <tt>std::tr1::shared_ptr<ProcessPluginName> loopback(new ProcessPluginName);</tt>
*/
virtual void appendProcessPluginToNetwork(ProcessPlugin* plugin);
+
/** \brief appendProcessPluginFromNetwork():
* Same as appendProcessPluginToNetwork() except that these plugins operate
* on the audio received from the network (typically from a JackTrip server).
* -> JackTrip client -> processPlugin from network -> JACK -> audio
*/
virtual void appendProcessPluginFromNetwork(ProcessPlugin* plugin);
+
/** \brief appendProcessPluginToMonitor():
* Appends plugins used for local monitoring
*/
virtual void appendProcessPluginToMonitor(ProcessPlugin* plugin);
+
/** \brief initPlugins():
* Initialize all ProcessPlugin modules.
* The audio sampling rate (mSampleRate) must be set at this time.
*/
void initPlugins(bool verbose = true);
+
virtual void connectDefaultPorts() = 0;
+
/** \brief Convert a 32bit number (sample_t) into one of the bit resolution
* supported (audioBitResolutionT).
*
static void fromSampleToBitConversion(
const sample_t* const input, int8_t* output,
const AudioInterface::audioBitResolutionT targetBitResolution);
+
/** \brief Convert a audioBitResolutionT bit resolution number into a
* 32bit number (sample_t)
*
virtual void setInputChannels(QVarLengthArray<int> inputChans)
{
mInputChans = inputChans;
- mNumInChans = inputChans.size();
}
virtual void setOutputChannels(QVarLengthArray<int> outputChans)
{
mOutputChans = outputChans;
- mNumOutChans = outputChans.size();
}
virtual void setInputMixMode(inputMixModeT mode) { mInputMixMode = mode; }
virtual void setSampleRate(uint32_t sample_rate) { mSampleRate = sample_rate; }
virtual int getNumInputChannels() const { return mInputChans.size(); }
/// \brief Get Number of Output Channels
virtual int getNumOutputChannels() const { return mOutputChans.size(); }
+ /// \brief Get Number of Monitor Channels
+ virtual int getNumMonChannels() const { return mOutputChans.size(); }
virtual QVarLengthArray<int> getInputChannels() const { return mInputChans; }
virtual QVarLengthArray<int> getOutputChannels() const { return mOutputChans; }
virtual inputMixModeT getInputMixMode() const { return mInputMixMode; }
QVarLengthArray<sample_t*>
mNetInBuffer; ///< Vector of Input buffers/channel read from net
QVarLengthArray<sample_t*>
- mAPInBuffer; ///< Vector of Input buffers/channel for AllPass input
-#endif // endwhere
- QVarLengthArray<sample_t*>
- mInBufCopy; ///< needed in callback() to modify JACK audio input
+ mAPInBuffer; ///< Vector of Input buffers/channel for AllPass input
+#endif // endwhere
int mAudioBitResolution; ///< Bit resolution in audio samples
AudioInterface::audioBitResolutionT
mBitResolutionMode; ///< Bit resolution (audioBitResolutionT) mode
mInProcessBuffer; ///< Vector of Input buffers/channel for ProcessPlugin
QVarLengthArray<sample_t*>
mOutProcessBuffer; ///< Vector of Output buffers/channel for ProcessPlugin
- QVarLengthArray<sample_t*>
- mMonProcessBuffer; ///< Vector of Monitor buffers/channel for ProcessPlugin
+ WaitFreeFrameBuffer<64>*
+ mMonitorQueuePtr; //< Queue of audio frames from monitor signal
int8_t* mAudioInputPacket; ///< Packet containing all the channels to read from the
///< RingBuffer
int8_t* mAudioOutputPacket; ///< Packet containing all the channels to send to the
///< RingBuffer
bool mLoopBack;
bool mProcessWithNetwork; ///< whether or not to send/receive data via the network
+ bool mMonitorStarted; ///< True if we have started to consume monitor audio
AudioTester* mAudioTesterP{nullptr};
protected:
JackTrip* mJackTrip; ///< JackTrip Mediator Class pointer
- int mNumInChans; ///< Number of Input Channels
- int mNumOutChans; ///< Number of Output Channels
inputMixModeT mInputMixMode; ///< Input mixing mode
void setDevicesWarningMsg(warningMessageT msg);
mNumFrames = getBufferSizeInSamples();
// Initialize Buffer array to read and write audio
- mInBuffer.resize(mNumInChans);
- mOutBuffer.resize(mNumOutChans);
- mBroadcastBuffer.resize(mNumOutChans);
+ mInBuffer.resize(getNumInputChannels());
+ mOutBuffer.resize(getNumOutputChannels());
+ mBroadcastBuffer.resize(getNumOutputChannels());
}
//*******************************************************************************
void JackAudioInterface::createChannels()
{
// Create Input Ports
- mInPorts.resize(mNumInChans);
- for (int i = 0; i < mNumInChans; i++) {
+ mInPorts.resize(getNumInputChannels());
+ for (int i = 0; i < getNumInputChannels(); i++) {
QString inName;
QTextStream(&inName) << "send_" << i + 1;
mInPorts[i] =
}
// Create Output Ports
- mOutPorts.resize(mNumOutChans);
- for (int i = 0; i < mNumOutChans; i++) {
+ mOutPorts.resize(getNumOutputChannels());
+ for (int i = 0; i < getNumOutputChannels(); i++) {
QString outName;
QTextStream(&outName) << "receive_" << i + 1;
mOutPorts[i] =
}
// Create Broadcast Ports
if (mBroadcast) {
- mBroadcastPorts.resize(mNumOutChans);
- for (int i = 0; i < mNumOutChans; i++) {
+ mBroadcastPorts.resize(getNumOutputChannels());
+ for (int i = 0; i < getNumOutputChannels(); i++) {
QString outName;
QTextStream(&outName) << "broadcast_" << i + 1;
mBroadcastPorts[i] =
// Get input and output buffers from JACK
//-------------------------------------------------------------------
- for (int i = 0; i < mNumInChans; i++) {
+ for (int i = 0; i < getNumInputChannels(); i++) {
// Input Ports are READ ONLY and change as needed (no locks) - make a copy for
// debugging
mInBuffer[i] = (sample_t*)jack_port_get_buffer(mInPorts[i], nframes);
}
- for (int i = 0; i < mNumOutChans; i++) {
+ for (int i = 0; i < getNumOutputChannels(); i++) {
// Output Ports are WRITABLE
mOutBuffer[i] = (sample_t*)jack_port_get_buffer(mOutPorts[i], nframes);
}
AudioInterface::callback(mInBuffer, mOutBuffer, nframes);
if (mBroadcast) {
- for (int i = 0; i < mNumOutChans; i++) {
+ for (int i = 0; i < getNumOutputChannels(); i++) {
// Broadcast Ports are WRITABLE
mBroadcastBuffer[i] =
(sample_t*)jack_port_get_buffer(mBroadcastPorts[i], nframes);
cout << "WARNING: Cannot find any physical capture ports" << endl;
} else {
// Connect capure ports to jacktrip send
- for (int i = 0; i < mNumInChans; i++) {
+ for (int i = 0; i < getNumInputChannels(); i++) {
// Check that we don't run out of capture ports
if (ports[i] != NULL) {
jack_connect(mClient, ports[i], jack_port_name(mInPorts[i]));
cout << "WARNING: Cannot find any physical playback ports" << endl;
} else {
// Connect playback ports to jacktrip receive
- for (int i = 0; i < mNumOutChans; i++) {
+ for (int i = 0; i < getNumOutputChannels(); i++) {
// Check that we don't run out of capture ports
if (ports[i] != NULL) {
jack_connect(mClient, jack_port_name(mOutPorts[i]), ports[i]);
/// \brief Connect the default ports, capture to sends, and receives to playback
void connectDefaultPorts() override;
- /// \brief Get Number of Input Channels
- virtual int getNumInputChannels() const override { return mNumInChans; }
- /// \brief Get Number of Output Channels
- virtual int getNumOutputChannels() const override { return mNumOutChans; }
-
//--------------SETTERS---------------------------------------------
/// \brief Set Client Name to something different that the default (JackTrip)
virtual void setClientName(const QString& ClientName) override
using std::setw;
// constants...
-constexpr int HIST = 4; // for mono at FPP 16-128, see below for > mono, > 128
-constexpr int NumSlotsMax = 4096; // mNumSlots looped for recent arrivals
-constexpr double DefaultAutoHeadroom =
- 3.0; // msec padding for auto adjusting mMsecTolerance
-constexpr double AutoMax = 250.0; // msec bounds on insane IPI, like ethernet unplugged
-constexpr double AutoInitDur = 6000.0; // kick in auto after this many msec
+constexpr int HIST = 4; // for mono at FPP 16-128, see below for > mono, > 128
+constexpr int NumSlotsMax = 4096; // mNumSlots looped for recent arrivals
+constexpr double AutoMax = 250.0; // msec bounds on insane IPI, like ethernet unplugged
+constexpr double AutoInitDur = 3000.0; // kick in auto after this many msec
constexpr double AutoInitValFactor =
0.5; // scale for initial mMsecTolerance during init phase if unspecified
constexpr double MaxWaitTime = 30; // msec
// tweak
-constexpr int WindowDivisor = 8; // for faster auto tracking
-constexpr int MaxFPP = 1024; // tested up to this FPP
+constexpr int WindowDivisor = 8; // for faster auto tracking
+constexpr int MaxFPP = 1024; // tested up to this FPP
+constexpr int MaxAutoHeadroom = 5; // maximum auto headroom in milliseconds
+constexpr double AutoHeadroomGlitchTolerance =
+ 0.007; // Acceptable rate of glitches before auto headroom is increased (0.7%)
+constexpr double AutoHistoryWindow =
+ 60; // rolling window of time (in seconds) over which auto tolerance roughly adjusts
+constexpr double AutoSmoothingFactor =
+ 1.0
+ / (WindowDivisor * AutoHistoryWindow); // EWMA smoothing factor for auto tolerance
+
//*******************************************************************************
Regulator::Regulator(int rcvChannels, int bit_res, int FPP, int qLen, int bqLen,
int sample_rate)
, pushStat(NULL)
, pullStat(NULL)
, mAuto(false)
+ , mSkipAutoHeadroom(true)
+ , mLastGlitches(0)
+ , mCurrentHeadroom(0)
, mUseWorkerThread(false)
, m_b_BroadcastQueueLength(bqLen)
, mRegulatorThreadPtr(NULL)
mFPPratioIsSet = false;
mBytesPeerPacket = mBytes;
mPeerFPP = mFPP; // use local until first packet arrives
- mAutoHeadroom = DefaultAutoHeadroom;
+ mAutoHeadroom = 3.0;
mFPPdurMsec = 1000.0 * mFPP / mSampleRate;
changeGlobal_2(NumSlotsMax); // need hg if running GUI
if (m_b_BroadcastQueueLength) {
delete m_b_BroadcastRingBuffer;
}
+//*******************************************************************************
+void Regulator::updateTolerance()
+{
+ // pushes happen when we have new packets received from peer
+ // pulls happen when our audio interface triggers a callback
+ const double pushStatTol = pushStat->calcAuto();
+ const double pullStatTol = pullStat->calcAuto();
+ if (mAutoHeadroom < 0) {
+ // auto headroom calculation: use value calculated by pullStats
+ // because that is where it counts glitches in the incoming peer stream
+ const int glitchesAllowed =
+ static_cast<int>(AutoHeadroomGlitchTolerance * mSampleRate / mPeerFPP);
+ const int totalGlitches = pullStat->plcUnderruns + pullStat->plcOverruns;
+ const int newGlitches = totalGlitches - mLastGlitches;
+ mLastGlitches = totalGlitches;
+ // require two consecutive periods of glitches exceeding allowed threshold
+ if (newGlitches > glitchesAllowed && mCurrentHeadroom < MaxAutoHeadroom) {
+ if (mSkipAutoHeadroom) {
+ mSkipAutoHeadroom = false;
+ } else {
+ mSkipAutoHeadroom = true;
+ ++mCurrentHeadroom;
+ qDebug() << "PLC" << newGlitches << "glitches"
+ << ">" << glitchesAllowed << "allowed: Increasing headroom to "
+ << mCurrentHeadroom;
+ }
+ } else {
+ mSkipAutoHeadroom = true;
+ }
+ } else {
+ mCurrentHeadroom = mAutoHeadroom;
+ }
+ double tmp = std::max<double>(pushStatTol + mCurrentHeadroom, pullStatTol);
+ if (tmp > AutoMax)
+ tmp = AutoMax;
+ if (tmp < mFPPdurMsec)
+ tmp = mFPPdurMsec;
+ if (tmp < mPeerFPPdurMsec)
+ tmp = mPeerFPPdurMsec;
+ mMsecTolerance = tmp;
+}
+
+//*******************************************************************************
void Regulator::setFPPratio()
{
if (mPeerFPP != mFPP) {
mAuto = true;
// default is -500 from bufstrategy 1 autoq mode
// use mMsecTolerance to set headroom
- mAutoHeadroom =
- (mMsecTolerance == -500.0) ? DefaultAutoHeadroom : -mMsecTolerance;
- qDebug() << "PLC is in auto mode and has been set with" << mAutoHeadroom
- << "ms headroom";
- if (mAutoHeadroom > 50.0)
- qDebug() << "That's a very large value and should be less than, "
- "for example, 50ms";
+ if (mMsecTolerance == -500.0) {
+ mAutoHeadroom = -1;
+ qDebug()
+ << "PLC is in auto mode and has been set with variable headroom";
+ } else {
+ mAutoHeadroom = -mMsecTolerance;
+ qDebug() << "PLC is in auto mode and has been set with"
+ << mAutoHeadroom << "ms headroom";
+ if (mAutoHeadroom > 50.0)
+ qDebug() << "That's a very large value and should be less than, "
+ "for example, 50ms";
+ }
// found an interesting relationship between mPeerFPP and initial
// mMsecTolerance mPeerFPP*0.5 is pretty good though that's an oddball
// conversion of bufsize directly to msec
};
setFPPratio();
// number of stats tick calls per sec depends on FPP
- int maxFPP = (mPeerFPP > mFPP) ? mPeerFPP : mFPP;
- pushStat = new StdDev(1, &mIncomingTimer,
- (int)(floor(mSampleRate / (double)maxFPP)));
+ pushStat = new StdDev(1, &mIncomingTimer,
+ (int)(floor(mSampleRate / (double)mPeerFPP)));
pullStat =
new StdDev(2, &mIncomingTimer, (int)(floor(mSampleRate / (double)mFPP)));
mFPPratioIsSet = true;
seq_num++;
}
}
- pushStat->tick();
- if (mAuto && (pushStat->lastTime > AutoInitDur)) {
- // use max to accomodate for bad clocks in audio interfaces that
- // cause a wide range of callback intervals (like realtek at 11ms)
- mMsecTolerance = std::max<double>(
- pushStat->calcAuto(mAutoHeadroom, mFPPdurMsec, mPeerFPPdurMsec),
- pullStat->calcAuto(mAutoHeadroom, mFPPdurMsec, mPeerFPPdurMsec));
+ bool pushStatsUpdated = pushStat->tick();
+ if (mAuto && pushStatsUpdated && (pushStat->lastTime > AutoInitDur)
+ && pushStat->longTermCnt % WindowDivisor == 0) {
+ // after AutoInitDur: update auto tolerance once per second
+ updateTolerance();
}
}
};
int next = lastSeqNumIn - i;
if (next < 0)
next += mNumSlots;
- if (mFPPratioNumerator) {
+ if (mFPPratioNumerator > 1) {
// time for assembly has passed; reset for next time
mAssemblyCounts[next] = 0;
}
lastMax = 0.0;
longTermMax = 0.0;
longTermMaxAcc = 0.0;
- longTermMean = 0.0;
lastTime = 0.0;
lastPLCdspElapsed = 0.0;
lastPlcOverruns = 0;
max = -999999.0;
};
-double StdDev::calcAuto(double autoHeadroom, double localFPPdur, double peerFPPdur)
+double StdDev::calcAuto()
{
// qDebug() << longTermStdDev << longTermMax << AutoMax << window <<
// longTermCnt;
if ((longTermStdDev == 0.0) || (longTermMax == 0.0))
return AutoMax;
double tmp = longTermStdDev + ((longTermMax > AutoMax) ? AutoMax : longTermMax);
- if (tmp > AutoMax)
- tmp = AutoMax;
- if (tmp < localFPPdur)
- tmp = localFPPdur;
- if (tmp < peerFPPdur)
- tmp = peerFPPdur;
- tmp += autoHeadroom;
return tmp;
};
-void StdDev::tick()
+double StdDev::smooth(double avg, double current)
+{
+ // use exponential weighted moving average (EWMA) for long term calculations
+ // See https://en.wikipedia.org/wiki/Exponential_smoothing
+ return avg + AutoSmoothingFactor * (current - avg);
+}
+
+bool StdDev::tick()
{
double now = (double)mTimer->nsecsElapsed() / 1000000.0;
double msElapsed = now - lastTime;
lastTime = now;
+
// discard measurements that exceed the max wait time
// this prevents temporary outages from skewing jitter metrics
if (msElapsed > MaxWaitTime)
- return;
+ return false;
+
if (ctr != window) {
data[ctr] = msElapsed;
if (msElapsed < min)
std::cout << setw(10) << msElapsed << " " << mId << endl;
}
*/
- } else {
- // calculate mean and standard deviation
- mean = (double)acc / (double)window;
- double var = 0.0;
- for (int i = 0; i < window; i++) {
- double tmp = data[i] - mean;
- var += (tmp * tmp);
+ return false;
+ }
+
+ // calculate mean and standard deviation
+ mean = (double)acc / (double)window;
+ double var = 0.0;
+ for (int i = 0; i < window; i++) {
+ double tmp = data[i] - mean;
+ var += (tmp * tmp);
+ }
+ var /= (double)window;
+ double stdDevTmp = sqrt(var);
+
+ if (longTermCnt <= 3) {
+ if (longTermCnt == 0 && gVerboseFlag) {
+ cout << "printing directly from Regulator->stdDev->tick:\n (mean / min / "
+ "max / "
+ "stdDev / longTermMax / longTermStdDev) \n";
}
- var /= (double)window;
- double stdDevTmp = sqrt(var);
-
- if (longTermCnt <= 1) {
- if (longTermCnt == 0 && gVerboseFlag) {
- cout << "printing directly from Regulator->stdDev->tick:\n (mean / min / "
- "max / "
- "stdDev / longTermMean / longTermMax / longTermStdDev) \n";
- }
- // ignore first stats because they will be really unreliable
- longTermMax = max;
- longTermMaxAcc = max;
- longTermMean = mean;
- longTermStdDev = stdDevTmp;
- longTermStdDevAcc = stdDevTmp;
+ // ignore first few stats because they are unreliable
+ longTermMax = max;
+ longTermMaxAcc = max;
+ longTermStdDev = stdDevTmp;
+ longTermStdDevAcc = stdDevTmp;
+ } else {
+ longTermStdDevAcc += stdDevTmp;
+ longTermMaxAcc += max;
+ if (longTermCnt <= (WindowDivisor * AutoHistoryWindow)) {
+ // use simple average for startup to establish baseline
+ longTermStdDev = longTermStdDevAcc / (longTermCnt - 3);
+ longTermMax = longTermMaxAcc / (longTermCnt - 3);
} else {
- longTermStdDevAcc += stdDevTmp;
- longTermMaxAcc += max;
- longTermStdDev = longTermStdDevAcc / (double)longTermCnt;
- longTermMax = longTermMaxAcc / (double)longTermCnt;
- longTermMean = longTermMean / (double)longTermCnt;
- }
-
- if (gVerboseFlag) {
- cout << setw(10) << mean << setw(10) << min << setw(10) << max << setw(10)
- << stdDevTmp << setw(10) << longTermMean << setw(10) << longTermMax
- << setw(10) << longTermStdDev << " " << mId << endl;
+ // use EWMA after startup to allow for adjustments
+ longTermStdDev = smooth(longTermStdDev, stdDevTmp);
+ longTermMax = smooth(longTermMax, max);
}
+ }
- longTermCnt++;
- lastMean = mean;
- lastMin = min;
- lastMax = max;
- lastStdDev = stdDevTmp;
- reset();
+ if (gVerboseFlag) {
+ cout << setw(10) << mean << setw(10) << min << setw(10) << max << setw(10)
+ << stdDevTmp << setw(10) << longTermMax << setw(10) << longTermStdDev << " "
+ << mId << endl;
}
+
+ longTermCnt++;
+ lastMean = mean;
+ lastMin = min;
+ lastMax = max;
+ lastStdDev = stdDevTmp;
+ reset();
+ return true;
}
void Regulator::readSlotNonBlocking(int8_t* ptrToReadSlot)
{
public:
StdDev(int id, QElapsedTimer* timer, int w);
- void tick();
- double calcAuto(double autoHeadroom, double localFPPdur, double peerFPPdur);
+ bool tick(); // returns true if stats were updated
+ double calcAuto();
int mId;
int plcOverruns;
int plcUnderruns;
double longTermStdDevAcc;
double longTermMax;
double longTermMaxAcc;
- double longTermMean;
+ int longTermCnt;
private:
+ double smooth(double avg, double current);
void reset();
QElapsedTimer* mTimer;
std::vector<double> data;
double min;
double max;
int ctr;
- int longTermCnt;
};
class Regulator : public RingBuffer
void pushPacket(const int8_t* buf, int seq_num);
void assemblePacket(const int8_t* buf, int peer_seq_num);
void pullPacket();
+ void updateTolerance();
void setFPPratio();
- bool mFPPratioIsSet;
void processPacket(bool glitch);
void processChannel(int ch, bool glitch, int packetCnt, bool lastWasGlitch);
+
+ bool mFPPratioIsSet;
int mNumChannels;
int mAudioBitRes;
int mFPP;
int mFPPratioNumerator;
int mFPPratioDenominator;
bool mAuto;
+ bool mSkipAutoHeadroom;
+ int mLastGlitches;
+ double mCurrentHeadroom;
double mAutoHeadroom;
double mFPPdurMsec;
double mPeerFPPdurMsec;
#include "RtAudioInterface.h"
#include <QString>
+#include <QTextStream>
#include <cstdlib>
#include "JackTrip.h"
#endif
}
+//*******************************************************************************
+bool RtAudioDevice::checkSampleRate(unsigned int srate) const
+{
+ for (unsigned int i = 0; i < this->sampleRates.size(); i++) {
+ if (this->sampleRates[i] == srate)
+ return true;
+ }
+ return false;
+}
+
//*******************************************************************************
RtAudioDevice& RtAudioDevice::operator=(const RtAudio::DeviceInfo& info)
{
// Locate the selected input audio device
auto in_name = getInputDevice();
if (in_name.empty()) {
- mRtAudio.reset(new RtAudio);
- long default_device_id = getDefaultDevice(*mRtAudio, true);
+ long default_device_id = getDefaultDevice(true);
if (!getDeviceInfoFromId(default_device_id, in_device, true))
throw std::runtime_error("default input device not found");
cout << "Selected default INPUT device" << endl;
throw std::runtime_error("Requested input device \"" + in_name
+ "\" not found.");
}
- mRtAudio.reset(new RtAudio(in_device.api));
cout << "Selected INPUT device " << in_name << endl;
}
// Locate the selected output audio device
auto out_name = getOutputDevice();
if (out_name.empty()) {
- long default_device_id = getDefaultDevice(*mRtAudio, false);
+ long default_device_id = getDefaultDevice(false);
if (!getDeviceInfoFromId(default_device_id, out_device, false))
throw std::runtime_error("default output device not found");
cout << "Selected default OUTPUT device" << endl;
cout << "Selected OUTPUT device " << out_name << endl;
}
+ if (in_device.ID == out_device.ID) {
+ mRtAudioInput.reset(new RtAudio(in_device.api));
+ mRtAudioOutput.reset();
+ mDuplexMode = true;
+ } else {
+ mRtAudioInput.reset(new RtAudio(in_device.api));
+ mRtAudioOutput.reset(new RtAudio(out_device.api));
+ mDuplexMode = false;
+ }
+
if (in_chans_base + in_chans_num > in_device.inputChannels) {
in_chans_base = 0;
in_chans_num = 2;
}
if (verbose) {
- cout << "INPUT DEVICE:" << endl;
+ if (mDuplexMode) {
+ cout << "DUPLEX DEVICE:" << endl;
+ } else {
+ cout << "INPUT DEVICE:" << endl;
+ }
in_device.printVerbose();
cout << gPrintSeparator << endl;
+ if (!mDuplexMode) {
+ cout << "OUTPUT DEVICE:" << endl;
+ out_device.printVerbose();
+ cout << gPrintSeparator << endl;
+ }
+ }
- cout << "OUTPUT DEVICE:" << endl;
- out_device.printVerbose();
- cout << gPrintSeparator << endl;
+ if (!in_device.checkSampleRate(getSampleRate())) {
+ QString errorMsg;
+ QTextStream(&errorMsg) << "Input device \"" << QString::fromStdString(in_name)
+ << "\" does not support sample rate of "
+ << getSampleRate();
+ throw std::runtime_error(errorMsg.toStdString());
+ }
+ if (!out_device.checkSampleRate(getSampleRate())) {
+ QString errorMsg;
+ QTextStream(&errorMsg) << "Output device \"" << QString::fromStdString(out_name)
+ << "\" does not support sample rate of "
+ << getSampleRate();
+ throw std::runtime_error(errorMsg.toStdString());
}
if (in_device.api == out_device.api) {
errorCallback(type, errorText, nullptr);
};
try {
- mRtAudio->openStream(&out_params, &in_params, RTAUDIO_FLOAT32, sampleRate,
- &bufferFrames, &RtAudioInterface::wrapperRtAudioCallback,
- this, &options, errorFunc);
+ if (mDuplexMode) {
+ mRtAudioInput->openStream(
+ &out_params, &in_params, RTAUDIO_FLOAT32, sampleRate, &bufferFrames,
+ &RtAudioInterface::wrapperRtAudioCallback, this, &options, errorFunc);
+ } else {
+ mRtAudioInput->openStream(
+ nullptr, &in_params, RTAUDIO_FLOAT32, sampleRate, &bufferFrames,
+ &RtAudioInterface::wrapperRtAudioCallback, this, &options, errorFunc);
+ const unsigned int inputBufferFrames = bufferFrames;
+ mRtAudioOutput->openStream(
+ &out_params, nullptr, RTAUDIO_FLOAT32, sampleRate, &bufferFrames,
+ &RtAudioInterface::wrapperRtAudioCallback, this, &options, errorFunc);
+ if (inputBufferFrames != bufferFrames) {
+ // output device doesn't support the same buffer size
+ // try to reopen the input device with new size
+ const unsigned int outputBufferFrames = bufferFrames;
+ mRtAudioInput->closeStream();
+ mRtAudioInput->openStream(
+ nullptr, &in_params, RTAUDIO_FLOAT32, sampleRate, &bufferFrames,
+ &RtAudioInterface::wrapperRtAudioCallback, this, &options, errorFunc);
+ if (outputBufferFrames != bufferFrames) {
+ // just give up if this still doesn't work
+ errorText = "The two devices selected are incompatible";
+ }
+ }
+ }
} catch (RtAudioError& e) {
errorText = e.getMessage();
}
const std::string& errorText) {
errorCallback(type, errorText, this);
};
- mRtAudio->setErrorCallback(errorFunc);
- if (RTAUDIO_NO_ERROR
- != mRtAudio->openStream(&out_params, &in_params, RTAUDIO_FLOAT32, sampleRate,
- &bufferFrames, &RtAudioInterface::wrapperRtAudioCallback,
- this, &options)) {
- errorText = mRtAudio->getErrorText();
+ mRtAudioInput->setErrorCallback(errorFunc);
+ if (mDuplexMode) {
+ if (RTAUDIO_NO_ERROR
+ != mRtAudioInput->openStream(
+ &out_params, &in_params, RTAUDIO_FLOAT32, sampleRate, &bufferFrames,
+ &RtAudioInterface::wrapperRtAudioCallback, this, &options)) {
+ errorText = mRtAudioInput->getErrorText();
+ }
+ } else {
+ mRtAudioOutput->setErrorCallback(errorFunc);
+ if (RTAUDIO_NO_ERROR
+ != mRtAudioInput->openStream(
+ nullptr, &in_params, RTAUDIO_FLOAT32, sampleRate, &bufferFrames,
+ &RtAudioInterface::wrapperRtAudioCallback, this, &options)) {
+ errorText = mRtAudioInput->getErrorText();
+ } else {
+ const unsigned int inputBufferFrames = bufferFrames;
+ if (RTAUDIO_NO_ERROR
+ != mRtAudioOutput->openStream(
+ &out_params, nullptr, RTAUDIO_FLOAT32, sampleRate, &bufferFrames,
+ &RtAudioInterface::wrapperRtAudioCallback, this, &options)) {
+ errorText = mRtAudioOutput->getErrorText();
+ } else if (inputBufferFrames != bufferFrames) {
+ // output device doesn't support the same buffer size
+ // try to reopen the input device with new size
+ const unsigned int outputBufferFrames = bufferFrames;
+ mRtAudioInput->closeStream();
+ if (RTAUDIO_NO_ERROR
+ != mRtAudioInput->openStream(
+ nullptr, &in_params, RTAUDIO_FLOAT32, sampleRate, &bufferFrames,
+ &RtAudioInterface::wrapperRtAudioCallback, this, &options)) {
+ errorText = mRtAudioInput->getErrorText();
+ } else if (outputBufferFrames != bufferFrames) {
+ // just give up if this still doesn't work
+ errorText = "The two devices selected are incompatible";
+ }
+ }
+ }
}
#endif
if (!errorText.empty()) {
std::cerr << "RtAudioInterface failed to open stream: " << errorText << '\n'
<< std::endl;
- mRtAudio.reset();
+ mRtAudioInput.reset();
+ mRtAudioOutput.reset();
throw std::runtime_error(errorText);
}
}
//*******************************************************************************
-long RtAudioInterface::getDefaultDevice(RtAudio& rtaudio, bool isInput)
+long RtAudioInterface::getDefaultDevice(bool isInput)
{
+ RtAudio rtaudio;
+
#if RTAUDIO_VERSION_MAJOR < 6
if (rtaudio.getCurrentApi() == RtAudio::LINUX_PULSE) {
return getDefaultDeviceForLinuxPulseAudio(isInput);
unsigned int nFrames, double /*streamTime*/,
RtAudioStreamStatus /*status*/)
{
- // TODO: this function may need more changes. As-is I'm not sure this will work
-
- sample_t* inputBuffer_sample = NULL;
- sample_t* outputBuffer_sample = NULL;
+ sample_t* inputBuffer_sample = static_cast<sample_t*>(inputBuffer);
+ sample_t* outputBuffer_sample = static_cast<sample_t*>(outputBuffer);
+ int in_chans_num = getNumInputChannels();
- inputBuffer_sample = (sample_t*)inputBuffer;
- outputBuffer_sample = (sample_t*)outputBuffer;
+ if (mDuplexMode) {
+ if (inputBuffer_sample == NULL || outputBuffer_sample == NULL) {
+ return 0;
+ }
+ } else if (inputBuffer_sample == NULL && outputBuffer_sample == NULL) {
+ return 0;
+ }
- int in_chans_num = getNumInputChannels();
- if (inputBuffer_sample != NULL && outputBuffer_sample != NULL) {
- // Get input and output buffers
- //-------------------------------------------------------------------
+ // process input before output to minimize monitor latency on duplex devices
+ if (inputBuffer_sample != NULL) {
+ // copy samples to input buffer
for (int i = 0; i < mInBuffer.size(); i++) {
// Input Ports are READ ONLY
mInBuffer[i] = inputBuffer_sample + (nFrames * i);
}
+ if (in_chans_num == 2 && mInBuffer.size() == in_chans_num
+ && mInputMixMode == AudioInterface::MIXTOMONO) {
+ mStereoToMonoMixerPtr->compute(nFrames, mInBuffer.data(), mInBuffer.data());
+ }
+ AudioInterface::audioInputCallback(mInBuffer, nFrames);
+ }
+ if (outputBuffer_sample != NULL) {
+ // copy samples to output buffer
for (int i = 0; i < mOutBuffer.size(); i++) {
// Output Ports are WRITABLE
mOutBuffer[i] = outputBuffer_sample + (nFrames * i);
}
- if (in_chans_num == 2 && mInBuffer.size() == in_chans_num
- && mInputMixMode == AudioInterface::MIXTOMONO) {
- mStereoToMonoMixerPtr->compute(nFrames, mInBuffer.data(), mInBuffer.data());
- }
- AudioInterface::callback(mInBuffer, mOutBuffer, nFrames);
+ AudioInterface::audioOutputCallback(mOutBuffer, nFrames);
}
return 0;
//*******************************************************************************
int RtAudioInterface::startProcess()
{
- if (mRtAudio.isNull())
+ if (mRtAudioInput.isNull())
+ return 0;
+ if (!mDuplexMode && mRtAudioOutput.isNull())
return 0;
std::string errorText;
#if RTAUDIO_VERSION_MAJOR < 6
try {
- mRtAudio->startStream();
+ mRtAudioInput->startStream();
+ if (!mDuplexMode) {
+ mRtAudioOutput->startStream();
+ }
} catch (RtAudioError& e) {
errorText = e.getMessage();
}
#else
- if (RTAUDIO_NO_ERROR != mRtAudio->startStream()) {
- errorText = mRtAudio->getErrorText();
+ if (RTAUDIO_NO_ERROR != mRtAudioInput->startStream()) {
+ errorText = mRtAudioInput->getErrorText();
+ } else if (!mDuplexMode && RTAUDIO_NO_ERROR != mRtAudioOutput->startStream()) {
+ errorText = mRtAudioOutput->getErrorText();
}
#endif
if (!errorText.empty()) {
std::cerr << "RtAudioInterface failed to start stream: " << errorText
<< std::endl;
- mRtAudio.reset();
+ mRtAudioInput.reset();
+ mRtAudioOutput.reset();
return (-1);
}
//*******************************************************************************
int RtAudioInterface::stopProcess()
{
- if (mRtAudio.isNull())
+ if (mRtAudioInput.isNull())
+ return 0;
+ if (!mDuplexMode && mRtAudioOutput.isNull())
return 0;
std::string errorText;
#if RTAUDIO_VERSION_MAJOR < 6
try {
- mRtAudio->closeStream();
+ mRtAudioInput->closeStream();
// this causes it to crash for some reason
// mRtAudio->abortStream();
+ if (!mDuplexMode) {
+ mRtAudioOutput->closeStream();
+ }
} catch (RtAudioError& e) {
errorText = e.getMessage();
}
#else
- if (RTAUDIO_NO_ERROR != mRtAudio->abortStream()) {
- errorText = mRtAudio->getErrorText();
+ if (RTAUDIO_NO_ERROR != mRtAudioInput->abortStream()) {
+ errorText = mRtAudioInput->getErrorText();
+ } else if (!mDuplexMode && RTAUDIO_NO_ERROR != mRtAudioOutput->abortStream()) {
+ errorText = mRtAudioOutput->getErrorText();
} else {
- mRtAudio->closeStream();
+ mRtAudioInput->closeStream();
+ if (!mDuplexMode) {
+ mRtAudioOutput->closeStream();
+ }
}
#endif
- mRtAudio.reset();
+ mRtAudioInput.reset();
+ mRtAudioOutput.reset();
if (!errorText.empty()) {
std::cerr << errorText << '\n' << std::endl;
RtAudio::Api api;
void print() const;
void printVerbose() const;
+ bool checkSampleRate(unsigned int srate) const;
RtAudioDevice& operator=(const RtAudio::DeviceInfo& info);
};
// updates device and returns true if found
bool getDeviceInfoFromId(const long deviceId, RtAudioDevice& device,
bool isInput) const;
- long getDefaultDevice(RtAudio& rtaudio, bool isInput);
+ long getDefaultDevice(bool isInput);
long getDefaultDeviceForLinuxPulseAudio(bool isInput);
QVarLengthArray<float*>
mOutBuffer; ///< Vector of Output buffer/channel to write to JACK
QVector<RtAudioDevice>
mDevices; ///< Vector of audio interfaces available via RTAudio
- QSharedPointer<RtAudio>
- mRtAudio; ///< RtAudio class if the input and output device are the same
+ QSharedPointer<RtAudio> mRtAudioInput; ///< RtAudio class for the input device
+ QSharedPointer<RtAudio> mRtAudioOutput; ///< RtAudio class for the output device
+ ///< (null if using duplex mode)
+ bool mDuplexMode; ///< true if using duplex stream mode (input device == output
+ ///< device)
QScopedPointer<StereoToMono> mStereoToMonoMixerPtr;
};
public:
Settings(bool guiEnabled = false, QObject* parent = nullptr)
: QObject(parent)
-#ifndef NO_GUI
+#ifdef NO_GUI
+ , mGuiEnabled(false)
+#else
, mGuiEnabled(guiEnabled)
#endif
, mAudioTester(new AudioTester)
height: parent.height
color: backgroundColour
+ property bool connected: false
+ property bool showMeters: true
+ property bool showTestAudio: true
+
property int fontBig: 20
property int fontMedium: 13
property int fontSmall: 11
x: 0; y: 0
text: "Output Device"
font { family: "Poppins"; pixelSize: fontSmall * virtualstudio.fontScale * virtualstudio.uiScale }
+ bottomPadding: 10 * virtualstudio.uiScale
color: textColour
}
AppIcon {
id: headphonesIcon
anchors.left: outputLabel.left
- anchors.verticalCenter: outputDeviceMeters.verticalCenter
+ anchors.top: outputLabel.bottom
width: 28 * virtualstudio.uiScale
height: 28 * virtualstudio.uiScale
icon.source: "headphones.svg"
audio.inputDevice = modelData.text
}
}
- audio.validateDevices()
- audio.restartAudio()
+ if (connected) {
+ virtualstudio.triggerReconnect(false);
+ } else {
+ audio.validateDevices()
+ audio.restartAudio()
+ }
}
}
}
anchors.top: outputCombo.bottom
anchors.topMargin: 16 * virtualstudio.uiScale
height: 24 * virtualstudio.uiScale
- model: audio.outputMeterLevels
+ model: showMeters ? audio.outputMeterLevels : [0, 0]
clipped: audio.outputClipped
+ visible: showMeters
enabled: audio.audioReady && !Boolean(audio.devicesError)
}
tooltipText: "How loudly you hear other participants"
showLabel: false
sliderEnabled: true
+ visible: showMeters
}
Text {
id: outputChannelsLabel
anchors.left: outputCombo.left
anchors.right: outputCombo.horizontalCenter
- anchors.top: outputSlider.bottom
+ anchors.top: showMeters ? outputSlider.bottom : outputCombo.bottom
anchors.topMargin: 12 * virtualstudio.uiScale
textFormat: Text.RichText
text: "Output Channel(s)"
outputChannelsCombo.popup.close()
audio.baseOutputChannel = modelData.baseChannel
audio.numOutputChannels = modelData.numChannels
- audio.validateDevices()
- audio.restartAudio()
+ if (connected) {
+ virtualstudio.triggerReconnect(false);
+ } else {
+ audio.validateDevices()
+ audio.restartAudio()
+ }
}
}
}
Button {
id: testOutputAudioButton
+ visible: showTestAudio
background: Rectangle {
radius: 6 * virtualstudio.uiScale
color: testOutputAudioButton.down ? buttonPressedColour : (testOutputAudioButton.hovered ? buttonHoverColour : buttonColour)
Rectangle {
id: divider1
- anchors.top: testOutputAudioButton.bottom
+ anchors.top: showTestAudio ? testOutputAudioButton.bottom : outputChannelsCombo.bottom
anchors.topMargin: 24 * virtualstudio.uiScale
width: parent.width - x - (16 * virtualstudio.uiScale); height: 2 * virtualstudio.uiScale
color: "#E0E0E0"
id: inputLabel
anchors.left: outputLabel.left
anchors.top: divider1.bottom
- anchors.topMargin: 32 * virtualstudio.uiScale
+ anchors.topMargin: 24 * virtualstudio.uiScale
text: "Input Device"
font { family: "Poppins"; pixelSize: fontSmall * virtualstudio.fontScale * virtualstudio.uiScale }
+ bottomPadding: 10 * virtualstudio.uiScale
color: textColour
}
AppIcon {
id: microphoneIcon
- anchors.left: outputLabel.left
- anchors.verticalCenter: inputDeviceMeters.verticalCenter
+ anchors.left: inputLabel.left
+ anchors.top: inputLabel.bottom
width: 32 * virtualstudio.uiScale
height: 32 * virtualstudio.uiScale
icon.source: "mic.svg"
audio.outputDevice = modelData.text
}
}
- audio.validateDevices()
- audio.restartAudio()
+ if (connected) {
+ virtualstudio.triggerReconnect(false);
+ } else {
+ audio.validateDevices()
+ audio.restartAudio()
+ }
}
}
}
anchors.top: inputCombo.bottom
anchors.topMargin: 16 * virtualstudio.uiScale
height: 24 * virtualstudio.uiScale
- model: audio.inputMeterLevels
+ model: showMeters ? audio.inputMeterLevels : [0, 0]
clipped: audio.inputClipped
+ visible: showMeters
enabled: audio.audioReady && !Boolean(audio.devicesError)
}
VolumeSlider {
id: inputSlider
- anchors.left: inputDeviceMeters.left
+ anchors.left: inputCombo.left
anchors.right: parent.right
anchors.rightMargin: rightMargin * virtualstudio.uiScale
anchors.top: inputDeviceMeters.bottom
tooltipText: "How loudly other participants hear you"
showLabel: false
sliderEnabled: true
+ visible: showMeters
}
Button {
id: inputChannelsLabel
anchors.left: inputCombo.left
anchors.right: inputCombo.horizontalCenter
- anchors.top: inputSlider.bottom
+ anchors.top: showMeters ? inputSlider.bottom : inputCombo.bottom
anchors.topMargin: 12 * virtualstudio.uiScale
textFormat: Text.RichText
text: "Input Channel(s)"
inputChannelsCombo.popup.close()
audio.baseInputChannel = modelData.baseChannel
audio.numInputChannels = modelData.numChannels
- audio.validateDevices()
- audio.restartAudio()
+ if (connected) {
+ virtualstudio.triggerReconnect(false);
+ } else {
+ audio.validateDevices()
+ audio.restartAudio()
+ }
}
}
}
anchors.left: inputCombo.horizontalCenter
anchors.right: inputCombo.right
anchors.rightMargin: 8 * virtualstudio.uiScale
- anchors.top: inputSlider.bottom
+ anchors.top: showMeters ? inputSlider.bottom : inputCombo.bottom
anchors.topMargin: 12 * virtualstudio.uiScale
textFormat: Text.RichText
text: "Mono / Stereo"
inputMixModeCombo.currentIndex = index
inputMixModeCombo.popup.close()
audio.inputMixMode = audio.inputMixModeComboModel[index].value
- audio.validateDevices();
- audio.restartAudio()
+ if (connected) {
+ virtualstudio.triggerReconnect(false);
+ } else {
+ audio.validateDevices()
+ audio.restartAudio()
+ }
}
}
}
text: "Output Volume"
font { family: "Poppins"; pixelSize: fontSmall * virtualstudio.fontScale * virtualstudio.uiScale }
wrapMode: Text.WordWrap
+ bottomPadding: 10 * virtualstudio.uiScale
color: textColour
}
AppIcon {
id: jackHeadphonesIcon
anchors.left: jackOutputLabel.left
- anchors.verticalCenter: jackOutputVolumeSlider.verticalCenter
+ anchors.top: jackOutputLabel.bottom
width: 28 * virtualstudio.uiScale
height: 28 * virtualstudio.uiScale
icon.source: "headphones.svg"
anchors.rightMargin: rightMargin * virtualstudio.uiScale
anchors.verticalCenter: jackOutputLabel.verticalCenter
height: 24 * virtualstudio.uiScale
- model: audio.outputMeterLevels
+ model: showMeters ? audio.outputMeterLevels : [0, 0]
clipped: audio.outputClipped
enabled: audio.audioReady && !Boolean(audio.devicesError)
}
text: "Input Volume"
font { family: "Poppins"; pixelSize: fontSmall * virtualstudio.fontScale * virtualstudio.uiScale }
wrapMode: Text.WordWrap
+ bottomPadding: 10 * virtualstudio.uiScale
color: textColour
}
AppIcon {
id: jackMicrophoneIcon
anchors.left: jackInputLabel.left
- anchors.verticalCenter: jackInputVolumeSlider.verticalCenter
+ anchors.top: jackInputLabel.bottom
width: 32 * virtualstudio.uiScale
height: 32 * virtualstudio.uiScale
icon.source: "mic.svg"
anchors.rightMargin: rightMargin * virtualstudio.uiScale
anchors.verticalCenter: jackInputLabel.verticalCenter
height: 24 * virtualstudio.uiScale
- model: audio.inputMeterLevels
+ model: showMeters ? audio.inputMeterLevels : [0, 0]
clipped: audio.inputClipped
enabled: audio.audioReady && !Boolean(audio.devicesError)
}
property string linkText: virtualstudio.darkMode ? "#8B8D8D" : "#272525"
- function getCurrentInputDeviceIndex () {
- if (audio.inputDevice === "") {
- return audio.inputComboModel.findIndex(elem => elem.type === "element");
- }
-
- let idx = audio.inputComboModel.findIndex(elem => elem.type === "element" && elem.text === audio.inputDevice);
- if (idx < 0) {
- idx = audio.inputComboModel.findIndex(elem => elem.type === "element");
- }
-
- return idx;
- }
-
- function getCurrentOutputDeviceIndex() {
- if (audio.outputDevice === "") {
- return audio.outputComboModel.findIndex(elem => elem.type === "element");
- }
-
- let idx = audio.outputComboModel.findIndex(elem => elem.type === "element" && elem.text === audio.outputDevice);
- if (idx < 0) {
- idx = audio.outputComboModel.findIndex(elem => elem.type === "element");
- }
-
- return idx;
- }
-
MouseArea {
anchors.fill: parent
propagateComposedEvents: false
}
Rectangle {
- width: parent.width; height: 360
- anchors.verticalCenter: parent.verticalCenter
+ id: audioSettingsView
+ width: parent.width;
+ height: parent.height;
color: backgroundColour
radius: 6 * virtualstudio.uiScale
- Item {
- id: usingRtAudio
- anchors.top: parent.top
- anchors.topMargin: 24 * virtualstudio.uiScale
- anchors.bottom: parent.bottom
- anchors.left: parent.left
- anchors.leftMargin: 24 * virtualstudio.uiScale
- anchors.right: parent.right
-
- Rectangle {
- id: leftSpacer
- x: 0; y: 0
- width: 144 * virtualstudio.uiScale
- height: 0
- color: "transparent"
- }
-
- DeviceRefreshButton {
- id: refreshButton
- y: 0;
- x: parent.width - (144 + rightMargin) * virtualstudio.uiScale;
- enabled: !audio.scanningDevices
- onDeviceRefresh: function () {
- virtualstudio.triggerReconnect(true);
- }
- }
-
- Text {
- text: "Scanning Devices"
- y: 0;
- anchors.right: refreshButton.left;
- anchors.rightMargin: 16 * virtualstudio.uiScale;
- font { family: "Poppins"; pixelSize: fontTiny * virtualstudio.fontScale * virtualstudio.uiScale }
- color: textColour
- visible: audio.scanningDevices
- }
-
- Text {
- id: outputLabel
- x: 0;
- anchors.top: refreshButton.bottom
- anchors.topMargin: 24 * virtualstudio.uiScale
- text: "Output Device"
- font { family: "Poppins"; pixelSize: fontSmall * virtualstudio.fontScale * virtualstudio.uiScale }
- color: textColour
- }
-
- InfoTooltip {
- id: outputHelpIcon
- anchors.left: outputLabel.right
- anchors.bottom: outputLabel.top
- anchors.bottomMargin: -8 * virtualstudio.uiScale
- size: 16 * virtualstudio.uiScale
- content: qsTr("How you'll hear the studio audio")
- }
-
- AppIcon {
- id: headphonesIcon
- anchors.left: outputLabel.left
- anchors.top: outputLabel.bottom
- anchors.topMargin: bottomToolTipMargin * virtualstudio.uiScale
- width: 28 * virtualstudio.uiScale
- height: 28 * virtualstudio.uiScale
- icon.source: "headphones.svg"
- }
-
- ComboBox {
- id: outputCombo
- anchors.left: leftSpacer.right
- anchors.verticalCenter: outputLabel.verticalCenter
- anchors.rightMargin: rightMargin * virtualstudio.uiScale
- width: parent.width - leftSpacer.width - rightMargin * virtualstudio.uiScale
- enabled: virtualstudio.connectionState == "Connected"
- model: audio.outputComboModel
- currentIndex: getCurrentOutputDeviceIndex()
- delegate: ItemDelegate {
- required property var modelData
- required property int index
-
- leftPadding: 0
-
- width: parent.width
- contentItem: Text {
- leftPadding: modelData.type === "element" && outputCombo.model.filter(it => it.type === "header").length > 0 ? 24 : 12
- text: modelData.text || ""
- font.bold: modelData.type === "header"
- }
- highlighted: outputCombo.highlightedIndex === index
- MouseArea {
- anchors.fill: parent
- onClicked: {
- if (modelData.type == "element") {
- outputCombo.currentIndex = index
- outputCombo.popup.close()
- audio.outputDevice = modelData.text
- if (modelData.category.startsWith("Low-Latency")) {
- let inputComboIdx = inputCombo.model.findIndex(it => it.category.startsWith("Low-Latency") && it.text === modelData.text);
- if (inputComboIdx !== null && inputComboIdx !== undefined) {
- inputCombo.currentIndex = inputComboIdx;
- audio.inputDevice = modelData.text
- }
- }
- virtualstudio.triggerReconnect(false);
- }
- }
- }
- }
- contentItem: Text {
- leftPadding: 12
- font: outputCombo.font
- horizontalAlignment: Text.AlignHLeft
- verticalAlignment: Text.AlignVCenter
- elide: Text.ElideRight
- text: outputCombo.model[outputCombo.currentIndex] && outputCombo.model[outputCombo.currentIndex].text ? outputCombo.model[outputCombo.currentIndex].text : ""
- }
- }
-
- Text {
- id: outputChannelsLabel
- anchors.left: outputCombo.left
- anchors.right: outputCombo.horizontalCenter
- anchors.top: outputCombo.bottom
- anchors.topMargin: 12 * virtualstudio.uiScale
- textFormat: Text.RichText
- text: "Output Channel(s)"
- font { family: "Poppins"; pixelSize: fontTiny * virtualstudio.fontScale * virtualstudio.uiScale }
- color: textColour
- }
-
- ComboBox {
- id: outputChannelsCombo
- anchors.left: outputCombo.left
- anchors.right: outputCombo.horizontalCenter
- anchors.rightMargin: 8 * virtualstudio.uiScale
- anchors.top: outputChannelsLabel.bottom
- anchors.topMargin: 4 * virtualstudio.uiScale
- enabled: audio.outputChannelsComboModel.length > 1 && virtualstudio.connectionState == "Connected"
- model: audio.outputChannelsComboModel
- currentIndex: (() => {
- let idx = audio.outputChannelsComboModel.findIndex(elem => elem.baseChannel === audio.baseOutputChannel
- && elem.numChannels === audio.numOutputChannels);
- if (idx < 0) {
- idx = 0;
- }
- return idx;
- })()
- delegate: ItemDelegate {
- required property var modelData
- required property int index
- width: parent.width
- contentItem: Text {
- text: modelData.label
- }
- highlighted: outputChannelsCombo.highlightedIndex === index
- MouseArea {
- anchors.fill: parent
- onClicked: {
- outputChannelsCombo.currentIndex = index
- outputChannelsCombo.popup.close()
- audio.baseOutputChannel = modelData.baseChannel
- audio.numOutputChannels = modelData.numChannels
- virtualstudio.triggerReconnect(false);
- }
- }
- }
- contentItem: Text {
- leftPadding: 12
- font: inputCombo.font
- horizontalAlignment: Text.AlignHLeft
- verticalAlignment: Text.AlignVCenter
- elide: Text.ElideRight
- text: outputChannelsCombo.model[outputChannelsCombo.currentIndex].label || ""
- }
- }
-
- Text {
- id: inputLabel
- anchors.left: outputLabel.left
- anchors.top: outputChannelsCombo.bottom
- anchors.topMargin: 32 * virtualstudio.uiScale
- text: "Input Device"
- font { family: "Poppins"; pixelSize: fontSmall * virtualstudio.fontScale * virtualstudio.uiScale }
- color: textColour
- }
-
- InfoTooltip {
- id: inputHelpIcon
- anchors.left: inputLabel.right
- anchors.bottom: inputLabel.top
- anchors.bottomMargin: -8 * virtualstudio.uiScale
- size: 16 * virtualstudio.uiScale
- content: qsTr("Audio sent to the studio (microphone, instrument, mixer, etc.)")
- }
-
- AppIcon {
- id: microphoneIcon
- anchors.left: inputLabel.left
- anchors.top: inputLabel.bottom
- anchors.topMargin: bottomToolTipMargin * virtualstudio.uiScale
- width: 32 * virtualstudio.uiScale
- height: 32 * virtualstudio.uiScale
- icon.source: "mic.svg"
- }
-
- ComboBox {
- id: inputCombo
- model: audio.inputComboModel
- currentIndex: getCurrentInputDeviceIndex()
- anchors.left: outputCombo.left
- anchors.right: outputCombo.right
- anchors.verticalCenter: inputLabel.verticalCenter
- enabled: virtualstudio.connectionState == "Connected"
- delegate: ItemDelegate {
- required property var modelData
- required property int index
-
- leftPadding: 0
-
- width: parent.width
- contentItem: Text {
- leftPadding: modelData.type === "element" && inputCombo.model.filter(it => it.type === "header").length > 0 ? 24 : 12
- text: modelData.text || ""
- font.bold: modelData.type === "header"
- }
- highlighted: inputCombo.highlightedIndex === index
- MouseArea {
- anchors.fill: parent
- onClicked: {
- if (modelData.type == "element") {
- inputCombo.currentIndex = index
- inputCombo.popup.close()
- audio.inputDevice = modelData.text
- if (modelData.category.startsWith("Low-Latency")) {
- let outputComboIdx = outputCombo.model.findIndex(it => it.category.startsWith("Low-Latency") && it.text === modelData.text);
- if (outputComboIdx !== null && outputComboIdx !== undefined) {
- outputCombo.currentIndex = outputComboIdx;
- audio.outputDevice = modelData.text
- }
- }
- virtualstudio.triggerReconnect(false);
- }
- }
- }
- }
- contentItem: Text {
- leftPadding: 12
- font: inputCombo.font
- horizontalAlignment: Text.AlignHLeft
- verticalAlignment: Text.AlignVCenter
- elide: Text.ElideRight
- text: inputCombo.model[inputCombo.currentIndex] && inputCombo.model[inputCombo.currentIndex].text ? inputCombo.model[inputCombo.currentIndex].text : ""
- }
- }
-
- Text {
- id: inputChannelsLabel
- anchors.left: inputCombo.left
- anchors.right: inputCombo.horizontalCenter
- anchors.top: inputCombo.bottom
- anchors.topMargin: 12 * virtualstudio.uiScale
- textFormat: Text.RichText
- text: "Input Channel(s)"
- font { family: "Poppins"; pixelSize: fontTiny * virtualstudio.fontScale * virtualstudio.uiScale }
- color: textColour
- }
-
- ComboBox {
- id: inputChannelsCombo
- anchors.left: inputCombo.left
- anchors.right: inputCombo.horizontalCenter
- anchors.rightMargin: 8 * virtualstudio.uiScale
- anchors.top: inputChannelsLabel.bottom
- anchors.topMargin: 4 * virtualstudio.uiScale
- enabled: audio.inputChannelsComboModel.length > 1 && virtualstudio.connectionState == "Connected"
- model: audio.inputChannelsComboModel
- currentIndex: (() => {
- let idx = audio.inputChannelsComboModel.findIndex(elem => elem.baseChannel === audio.baseInputChannel
- && elem.numChannels === audio.numInputChannels);
- if (idx < 0) {
- idx = 0;
- }
- return idx;
- })()
- delegate: ItemDelegate {
- required property var modelData
- required property int index
- width: parent.width
- contentItem: Text {
- text: modelData.label
- }
- highlighted: inputChannelsCombo.highlightedIndex === index
- MouseArea {
- anchors.fill: parent
- onClicked: {
- inputChannelsCombo.currentIndex = index
- inputChannelsCombo.popup.close()
- audio.baseInputChannel = modelData.baseChannel
- audio.numInputChannels = modelData.numChannels
- virtualstudio.triggerReconnect(false);
- }
- }
- }
- contentItem: Text {
- leftPadding: 12
- font: inputCombo.font
- horizontalAlignment: Text.AlignHLeft
- verticalAlignment: Text.AlignVCenter
- elide: Text.ElideRight
- text: inputChannelsCombo.model[inputChannelsCombo.currentIndex].label || ""
- }
- }
-
- Text {
- id: inputMixModeLabel
- anchors.left: inputCombo.horizontalCenter
- anchors.right: inputCombo.right
- anchors.rightMargin: 8 * virtualstudio.uiScale
- anchors.top: inputCombo.bottom
- anchors.topMargin: 12 * virtualstudio.uiScale
- textFormat: Text.RichText
- text: "Mono / Stereo"
- font { family: "Poppins"; pixelSize: fontTiny * virtualstudio.fontScale * virtualstudio.uiScale }
- color: textColour
- }
-
- ComboBox {
- id: inputMixModeCombo
- anchors.left: inputCombo.horizontalCenter
- anchors.right: inputCombo.right
- anchors.rightMargin: 8 * virtualstudio.uiScale
- anchors.top: inputMixModeLabel.bottom
- anchors.topMargin: 4 * virtualstudio.uiScale
- enabled: audio.inputMixModeComboModel.length > 1 && virtualstudio.connectionState == "Connected"
- model: audio.inputMixModeComboModel
- currentIndex: (() => {
- let idx = audio.inputMixModeComboModel.findIndex(elem => elem.value === audio.inputMixMode);
- if (idx < 0) {
- idx = 0;
- }
- return idx;
- })()
- delegate: ItemDelegate {
- required property var modelData
- required property int index
- width: parent.width
- contentItem: Text {
- text: modelData.label
- }
- highlighted: inputMixModeCombo.highlightedIndex === index
- MouseArea {
- anchors.fill: parent
- onClicked: {
- inputMixModeCombo.currentIndex = index
- inputMixModeCombo.popup.close()
- audio.inputMixMode = audio.inputMixModeComboModel[index].value
- virtualstudio.triggerReconnect(false);
- }
- }
- }
- contentItem: Text {
- leftPadding: 12
- font: inputCombo.font
- horizontalAlignment: Text.AlignHLeft
- verticalAlignment: Text.AlignVCenter
- elide: Text.ElideRight
- text: inputMixModeCombo.model[inputMixModeCombo.currentIndex].label || ""
- }
- }
-
- Text {
- id: inputChannelHelpMessage
- anchors.left: inputChannelsCombo.left
- anchors.leftMargin: 2 * virtualstudio.uiScale
- anchors.right: inputChannelsCombo.right
- anchors.top: inputChannelsCombo.bottom
- anchors.topMargin: 8 * virtualstudio.uiScale
- textFormat: Text.RichText
- wrapMode: Text.WordWrap
- text: audio.inputChannelsComboModel.length > 1 ? "Choose up to 2 channels" : "Only 1 channel available"
- font { family: "Poppins"; pixelSize: fontTiny * virtualstudio.fontScale * virtualstudio.uiScale }
- color: textColour
+ DeviceRefreshButton {
+ id: refreshButton
+ anchors.top: parent.top;
+ anchors.topMargin: 16 * virtualstudio.uiScale;
+ anchors.right: parent.right;
+ anchors.rightMargin: 16 * virtualstudio.uiScale;
+ enabled: !audio.scanningDevices
+ onDeviceRefresh: function () {
+ virtualstudio.triggerReconnect(true);
}
+ }
- Text {
- id: inputMixModeHelpMessage
- anchors.left: inputMixModeCombo.left
- anchors.leftMargin: 2 * virtualstudio.uiScale
- anchors.right: inputMixModeCombo.right
- anchors.top: inputMixModeCombo.bottom
- anchors.topMargin: 8 * virtualstudio.uiScale
- textFormat: Text.RichText
- wrapMode: Text.WordWrap
- text: (() => {
- if (audio.inputMixMode === 2) {
- return "Treat the channels as Left and Right signals, coming through each speaker separately.";
- } else if (audio.inputMixMode === 3) {
- return "Combine the channels into one central channel coming through both speakers.";
- } else if (audio.inputMixMode === 1) {
- return "Send a single channel of audio";
- } else {
- return "";
- }
- })()
- font { family: "Poppins"; pixelSize: fontTiny * virtualstudio.fontScale * virtualstudio.uiScale }
- color: textColour
- }
+ Text {
+ text: "Restarting Audio"
+ anchors.verticalCenter: refreshButton.verticalCenter
+ anchors.right: refreshButton.left;
+ anchors.rightMargin: 16 * virtualstudio.uiScale;
+ font { family: "Poppins"; pixelSize: fontTiny * virtualstudio.fontScale * virtualstudio.uiScale }
+ color: textColour
+ visible: audio.scanningDevices
+ }
- DeviceWarning {
- id: deviceWarning
- anchors.left: inputCombo.left
- anchors.top: inputMixModeHelpMessage.bottom
- anchors.topMargin: 48 * virtualstudio.uiScale
- visible: Boolean(audio.devicesError) || Boolean(audio.devicesWarning)
- }
+ AudioSettings {
+ id: audioSettings
+ showMeters: false
+ showTestAudio: false
+ connected: true
+ height: 300 * virtualstudio.uiScale
+ anchors.top: refreshButton.bottom;
+ anchors.topMargin: 16 * virtualstudio.uiScale;
}
}
color: textColour
}
}
+
+ DeviceWarning {
+ id: deviceWarning
+ anchors.left: backButton.right
+ anchors.leftMargin: 24 * virtualstudio.uiScale
+ anchors.bottom: parent.bottom
+ anchors.bottomMargin: 16 * virtualstudio.uiScale;
+ visible: Boolean(audio.devicesError) || Boolean(audio.devicesWarning)
+ }
}
DeviceControlsGroup {
id: deviceControlsGroup
- showMinified: false
anchors.bottom: footer.top
}
id: footer
anchors.bottom: parent.bottom
}
-
- Connections {
- target: virtualstudio
-
- // self-managed servers do not support minified controls so keep it full size
- function onCollapseDeviceControlsChanged(collapseDeviceControls) {
- deviceControlsGroup.showMinified = virtualstudio.currentStudio.isManaged && collapseDeviceControls;
- }
- }
}
anchors.left: parent.left
anchors.leftMargin: 8 * virtualstudio.uiScale
- anchors.verticalCenter: parent.verticalCenter
background: Rectangle {
color: isInput ? (audio.inputMuted ? muteButtonMutedColor : buttonColour) : "transparent"
ColumnLayout {
anchors.fill: parent
- spacing: 2
+ spacing: 2 * virtualstudio.uiScale
VolumeSlider {
Layout.fillWidth: true
ColumnLayout {
anchors.fill: parent
- spacing: 2
+ spacing: 4 * virtualstudio.uiScale
VolumeSlider {
Layout.fillWidth: true
ColumnLayout {
anchors.fill: parent
- spacing: 2
+ spacing: 5 * virtualstudio.uiScale
Item {
- Layout.preferredHeight: minifiedHeight
+ Layout.topMargin: 5 * virtualstudio.uiScale
+ Layout.preferredHeight: 30 * virtualstudio.uiScale
Layout.fillWidth: true
RowLayout {
anchors.fill: parent
- spacing: 8
+ spacing: 8 * virtualstudio.uiScale
Item {
Layout.fillHeight: true
Loader {
id: typeIconIndicator
anchors.left: parent.left
- anchors.verticalCenter: parent.verticalCenter
sourceComponent: controlIndicator
}
id: label
anchors.left: parent.left
anchors.leftMargin: 36 * virtualstudio.uiScale
- anchors.verticalCenter: parent.verticalCenter
text: isInput ? "Input" : "Output"
font { family: "Poppins"; weight: Font.Bold; pixelSize: fontSmall * virtualstudio.fontScale * virtualstudio.uiScale }
Item {
Layout.fillHeight: true
Layout.fillWidth: true
- Layout.preferredWidth: 200
+ Layout.preferredWidth: 200 * virtualstudio.uiScale
Meter {
anchors.fill: parent
- anchors.topMargin: 5
- anchors.rightMargin: 8
+ anchors.rightMargin: 8 * virtualstudio.uiScale
model: isInput ? audio.inputMeterLevels : audio.outputMeterLevels
clipped: isInput ? audio.inputClipped : audio.outputClipped
enabled: true
}
Item {
- Layout.preferredHeight: 42
- Layout.minimumHeight: 42
- Layout.maximumHeight: 42
Layout.fillWidth: true
+ Layout.fillHeight: true
+ Layout.bottomMargin: 5 * virtualstudio.uiScale
RowLayout {
anchors.fill: parent
- spacing: 2
+ spacing: 8 * virtualstudio.uiScale
Item {
Layout.fillHeight: true
Layout.fillWidth: true
- Layout.alignment: Qt.AlignVCenter
Layout.leftMargin: 8 * virtualstudio.uiScale
Layout.rightMargin: 8 * virtualstudio.uiScale
Loader {
anchors.fill: parent
- anchors.verticalCenter: parent.verticalCenter
+ anchors.top: parent.top
sourceComponent: isInput ? inputControls : outputControls
}
}
import QtQuick.Layouts
Rectangle {
- required property bool showMinified
-
property string disabledButtonText: "#D3D4D4"
property string saveButtonText: "#DB0A0A"
- property int minifiedHeight: 36
- property int fullHeight: 84
+ property int fullHeight: 88 * virtualstudio.uiScale
+ property int minimumHeight: 48 * virtualstudio.uiScale
+
+ property bool isUsingRtAudio: audio.audioBackend == "RtAudio"
+ property bool isReady: virtualstudio.currentStudio.id !== "" && virtualstudio.currentStudio.status == "Ready"
+ property bool showDeviceControls: getShowDeviceControls()
id: deviceControlsGroup
width: parent.width
- height: showMinified ? minifiedHeight : fullHeight
+ height: isReady ? (showDeviceControls ? fullHeight : (feedbackDetectedModal.visible ? minimumHeight : 0)) : minimumHeight;
color: backgroundColour
- property bool showDeviceControls: studioStatus === "Ready"
+ function getShowDeviceControls () {
+ // self-managed servers do not support minified controls so keep it full size
+ return !virtualstudio.currentStudio.isManaged || (!virtualstudio.collapseDeviceControls && isReady);
+ }
MouseArea {
anchors.fill: parent
Item {
Layout.fillHeight: true
Layout.fillWidth: true
- visible: !showDeviceControls
+ visible: !isReady
Button {
id: backButton
Item {
Layout.fillHeight: true
- Layout.preferredWidth: 48
+ Layout.preferredWidth: 48 * virtualstudio.uiScale
visible: showDeviceControls
ColumnLayout {
spacing: 2
Item {
- Layout.preferredHeight: 20
- Layout.preferredWidth: 40
- Layout.alignment: Qt.AlignHCenter
+ Layout.preferredHeight: 24 * virtualstudio.uiScale
+ Layout.preferredWidth: 24 * virtualstudio.uiScale
+ Layout.topMargin: 2 * virtualstudio.uiScale
+ Layout.rightMargin: 2 * virtualstudio.uiScale
+ Layout.alignment: Qt.AlignRight | Qt.AlignTop
+
+ Button {
+ id: closeDeviceControlsButton
+ visible: virtualstudio.currentStudio.isManaged
+ width: 24 * virtualstudio.uiScale
+ height: 24 * virtualstudio.uiScale
+ background: Rectangle {
+ color: backgroundColour
+ }
+ anchors.top: parent.top
+ anchors.right: parent.right
+ onClicked: {
+ virtualstudio.collapseDeviceControls = true;
+ }
+
+ AppIcon {
+ id: closeDeviceControlsIcon
+ anchors { verticalCenter: parent.verticalCenter; horizontalCenter: parent.horizontalCenter }
+ width: 24 * virtualstudio.uiScale
+ height: 24 * virtualstudio.uiScale
+ color: closeDeviceControlsButton.hovered ? textColour : browserButtonHoverColour
+ icon.source: "close.svg"
+ onClicked: {
+ virtualstudio.collapseDeviceControls = true;
+ }
+ }
+ }
}
Item {
- Layout.preferredHeight: 48
- Layout.preferredWidth: 40
- Layout.alignment: Qt.AlignHCenter
- visible: !showMinified
+ Layout.preferredWidth: 40 * virtualstudio.uiScale
+ Layout.preferredHeight: 64 * virtualstudio.uiScale
+ Layout.bottomMargin: 5 * virtualstudio.uiScale
+ Layout.topMargin: 2 * virtualstudio.uiScale
+ Layout.rightMargin: 2 * virtualstudio.uiScale
+ Layout.alignment: Qt.AlignHCenter | Qt.AlignTop
Button {
id: changeDevicesButton
- width: 36
- height: 36
+ visible: isUsingRtAudio
+ width: 36 * virtualstudio.uiScale
+ height: 36 * virtualstudio.uiScale
anchors.top: parent.top
anchors.horizontalCenter: parent.horizontalCenter
background: Rectangle {
height: 32 * virtualstudio.uiScale
icon.source: "warning.svg"
color: "#F21B1B"
- visible: !showMinified
+ visible: showDeviceControls
}
AppIcon {
width: 24 * virtualstudio.uiScale
icon.source: "warning.svg"
color: "#F21B1B"
- visible: showMinified
+ visible: !showDeviceControls
}
Text {
color: textColour
elide: Text.ElideRight
wrapMode: Text.WordWrap
- visible: !showMinified
+ visible: showDeviceControls
}
Text {
color: textColour
elide: Text.ElideRight
wrapMode: Text.WordWrap
- visible: !showMinified
+ visible: showDeviceControls
}
Text {
color: textColour
elide: Text.ElideRight
wrapMode: Text.WordWrap
- visible: showMinified
+ visible: !showDeviceControls
}
Text {
color: textColour
elide: Text.ElideRight
wrapMode: Text.WordWrap
- visible: !showMinified
+ visible: showDeviceControls
}
Button {
Text {
text: "Ok"
font.family: "Poppins"
- font.pixelSize: showMinified ? fontTiny * virtualstudio.fontScale * virtualstudio.uiScale : fontSmall * virtualstudio.fontScale * virtualstudio.uiScale
+ font.pixelSize: showDeviceControls ? fontSmall * virtualstudio.fontScale * virtualstudio.uiScale : fontTiny * virtualstudio.fontScale * virtualstudio.uiScale
font.weight: Font.Bold
color: !Boolean(audio.devicesError) && audio.backendAvailable ? saveButtonText : disabledButtonText
anchors.horizontalCenter: parent.horizontalCenter
anchors.verticalCenter: parent.verticalCenter
}
- visible: !showMinified
+ visible: showDeviceControls
}
Button {
anchors.horizontalCenter: parent.horizontalCenter
anchors.verticalCenter: parent.verticalCenter
}
- visible: showMinified
+ visible: !showDeviceControls
}
}
}
feedbackDetectedModal.visible = true;
}
}
+
+ function onCollapseDeviceControlsChanged(collapseDeviceControls) {
+ showDeviceControls = getShowDeviceControls()
+ }
+
+ function onCurrentStudioChanged(currentStudio) {
+ isReady = virtualstudio.currentStudio.id !== "" && virtualstudio.currentStudio.status == "Ready"
+ showDeviceControls = getShowDeviceControls()
+ }
+
+ function onConnectionStateChanged(connectionState) {
+ isReady = virtualstudio.currentStudio.id !== "" && virtualstudio.currentStudio.status == "Ready"
+ showDeviceControls = getShowDeviceControls()
+ }
}
}
\ No newline at end of file
id: devicesWarningTooltip
anchors.left: warningOrErrorText.right
anchors.leftMargin: 2 * virtualstudio.uiScale
- anchors.bottom: warningOrErrorText.bottom
- anchors.bottomMargin: 6 * virtualstudio.uiScale
+ anchors.top: devicesWarningIcon.top
content: qsTr(audio.devicesError || audio.devicesWarning)
iconColor: devicesWarningColour
size: 16 * virtualstudio.uiScale
anchors.topMargin: 24 * virtualstudio.uiScale
Button {
- id: noUserFeedbackButton
- anchors.left: buttonsArea.left
- anchors.verticalCenter: parent.buttonsArea
- width: 150 * virtualstudio.uiScale; height: 30 * virtualstudio.uiScale
- onClicked: () => {
- userFeedbackModal.close();
- rating = 0;
- serverId = "";
- messageBox.clear();
- }
-
- background: Rectangle {
- radius: 6 * virtualstudio.uiScale
- color: noUserFeedbackButton.down ? buttonPressedColour : (noUserFeedbackButton.hovered ? buttonHoverColour : buttonColour)
- border.width: 1
- border.color: noUserFeedbackButton.down ? buttonPressedStroke : (noUserFeedbackButton.hovered ? buttonHoverStroke : buttonStroke)
- }
-
- Text {
- text: "No thanks"
- font.family: "Poppins"
- font.pixelSize: fontSmall * virtualstudio.fontScale * virtualstudio.uiScale
- anchors.horizontalCenter: parent.horizontalCenter
- anchors.verticalCenter: parent.verticalCenter
- }
- }
-
- Button {
- id: submitUserFeedbackButton
+ id: userFeedbackButton
anchors.right: buttonsArea.right
+ anchors.horizontalCenter: buttonsArea.horizontalCenter
anchors.verticalCenter: parent.buttonsArea
width: 150 * virtualstudio.uiScale; height: 30 * virtualstudio.uiScale
onClicked: () => {
+ if (rating === 0 && messageBox.text === "") {
+ userFeedbackModal.close();
+ serverId = "";
+ messageBox.clear();
+ return;
+ }
virtualstudio.collectFeedbackSurvey(serverId, rating, messageBox.text);
submitted = true;
rating = 0;
background: Rectangle {
radius: 6 * virtualstudio.uiScale
- color: submitUserFeedbackButton.down ? buttonPressedColour : (submitUserFeedbackButton.hovered ? buttonHoverColour : buttonColour)
+ color: userFeedbackButton.down ? buttonPressedColour : (userFeedbackButton.hovered ? buttonHoverColour : buttonColour)
border.width: 1
- border.color: submitUserFeedbackButton.down ? buttonPressedStroke : (submitUserFeedbackButton.hovered ? buttonHoverStroke : buttonStroke)
+ border.color: userFeedbackButton.down ? buttonPressedStroke : (userFeedbackButton.hovered ? buttonHoverStroke : buttonStroke)
}
Text {
- text: "Submit"
+ text: (rating === 0 && messageBox.text === "") ? "Dismiss" : "Submit"
font.family: "Poppins"
font.pixelSize: fontSmall * virtualstudio.fontScale * virtualstudio.uiScale
font.weight: Font.Bold
AudioSettings {
id: audioSettings
}
-
- DeviceWarning {
- id: deviceWarning
- anchors.left: parent.left
- anchors.leftMargin: 168 * virtualstudio.uiScale
- anchors.bottom: parent.bottom
- anchors.bottomMargin: 48 * virtualstudio.uiScale
- visible: Boolean(audio.devicesError) || Boolean(audio.devicesWarning)
- }
}
ToolBar {
width: parent.width
horizontalAlignment: Text.AlignHCenter
verticalAlignment: Text.AlignVCenter
+ bottomPadding: 5 * virtualstudio.uiScale
}
}
}
Slider {
id: scaleSlider
- x: 234 * virtualstudio.uiScale; y: 100 * virtualstudio.uiScale
+ x: 220 * virtualstudio.uiScale;
+ y: 100 * virtualstudio.uiScale
width: backendCombo.width
from: 1; to: 1.25; value: virtualstudio.uiScale
onMoved: { virtualstudio.uiScale = value }
// switch mode
virtualstudio.toStandard();
}
- x: 234 * virtualstudio.uiScale; y: 100 * virtualstudio.uiScale
- width: 216 * virtualstudio.uiScale; height: 30 * virtualstudio.uiScale
+ x: 220 * virtualstudio.uiScale;
+ y: 100 * virtualstudio.uiScale
+ width: 216 * virtualstudio.uiScale;
+ height: 30 * virtualstudio.uiScale
Text {
text: virtualstudio.psiBuild ? "Switch to Standard Mode" : "Switch to Classic Mode"
font { family: "Poppins"; pixelSize: fontSmall * virtualstudio.fontScale * virtualstudio.uiScale }
ComboBox {
id: updateChannelCombo
- x: 234 * virtualstudio.uiScale; y: modeButton.y + (48 * virtualstudio.uiScale)
+ x: 220 * virtualstudio.uiScale; y: modeButton.y + (48 * virtualstudio.uiScale)
width: parent.width - x - (16 * virtualstudio.uiScale); height: 36 * virtualstudio.uiScale
model: virtualstudio.updateChannelComboModel
currentIndex: virtualstudio.updateChannel == "stable" ? 0 : 1
onActivated: { virtualstudio.updateChannel = currentIndex == 0 ? "stable": "edge" }
font.family: "Poppins"
- visible: !virtualstudio.noUpdater
+ enabled: !virtualstudio.noUpdater
}
Text {
text: "Update Channel"
font { family: "Poppins"; pixelSize: fontMedium * virtualstudio.fontScale * virtualstudio.uiScale }
color: textColour
- visible: !virtualstudio.noUpdater
}
ComboBox {
audio.audioBackend = currentText
audio.restartAudio();
}
- x: 234 * virtualstudio.uiScale; y: updateChannelCombo.y + (48 * virtualstudio.uiScale)
+ x: 220 * virtualstudio.uiScale; y: updateChannelCombo.y + (48 * virtualstudio.uiScale)
width: updateChannelCombo.width; height: updateChannelCombo.height
}
ComboBox {
id: bufferCombo
- x: 234 * virtualstudio.uiScale; y: backendCombo.y + (48 * virtualstudio.uiScale)
+ x: 220 * virtualstudio.uiScale; y: backendCombo.y + (48 * virtualstudio.uiScale)
width: backendCombo.width; height: updateChannelCombo.height
model: audio.bufferSizeComboModel
currentIndex: getCurrentBufferSizeIndex()
color: Boolean(audio.devicesError) ? disabledButtonTextColour : textColour
}
}
+
+ DeviceWarning {
+ id: deviceWarning
+ x: (0.2 * window.width) + 16 * virtualstudio.uiScale
+ anchors.verticalCenter: parent.verticalCenter
+ visible: Boolean(audio.devicesError) || Boolean(audio.devicesWarning)
+ }
}
Connections {
property int leftMargin: 48
property int rightMargin: 16
+ property string strokeColor: virtualstudio.darkMode ? "#80827D7D" : "#34979797"
property string textColour: virtualstudio.darkMode ? "#FAFBFB" : "#0F0D0D"
property string buttonColour: virtualstudio.darkMode ? "#494646" : "#EAECEC"
property string buttonHoverColour: virtualstudio.darkMode ? "#5B5858" : "#D3D4D4"
Text {
id: pageTitle
- x: 16 * virtualstudio.uiScale; y: 32 * virtualstudio.uiScale
+ x: 16 * virtualstudio.uiScale;
+ y: 16 * virtualstudio.uiScale
text: "Choose your audio devices"
font { family: "Poppins"; weight: Font.Bold; pixelSize: fontBig * virtualstudio.fontScale * virtualstudio.uiScale }
color: textColour
id: audioSettings
width: parent.width
anchors.top: pageTitle.bottom
- anchors.topMargin: 24 * virtualstudio.uiScale
+ anchors.topMargin: 16 * virtualstudio.uiScale
}
- Button {
- id: backButton
- background: Rectangle {
- radius: 6 * virtualstudio.uiScale
- color: backButton.down ? buttonPressedColour : buttonColour
- border.width: 1
- border.color: backButton.down || backButton.hovered ? buttonPressedStroke : buttonStroke
- }
- onClicked: { virtualstudio.windowState = "browse"; virtualstudio.studioToJoin = ""; audio.stopAudio(); }
- anchors.left: parent.left
- anchors.leftMargin: 16 * virtualstudio.uiScale
- anchors.bottomMargin: rightMargin * virtualstudio.uiScale
- anchors.bottom: parent.bottom
- width: 150 * virtualstudio.uiScale; height: 30 * virtualstudio.uiScale
- Text {
- text: "Back"
- font.family: "Poppins"
- font.pixelSize: fontSmall * virtualstudio.fontScale * virtualstudio.uiScale
- color: textColour
- anchors.horizontalCenter: parent.horizontalCenter
- anchors.verticalCenter: parent.verticalCenter
- }
+ Rectangle {
+ id: headerBorder
+ width: parent.width
+ height: 1
+ anchors.top: audioSettings.top
+ color: strokeColor
}
- DeviceWarning {
- id: deviceWarning
- anchors.left: backButton.right
- anchors.leftMargin: 16 * virtualstudio.uiScale
- anchors.verticalCenter: backButton.verticalCenter
- visible: Boolean(audio.devicesError) || Boolean(audio.devicesWarning)
+ Rectangle {
+ id: footerBorder
+ width: parent.width
+ height: 1
+ anchors.top: audioSettings.bottom
+ color: strokeColor
}
- Button {
- id: saveButton
- background: Rectangle {
- radius: 6 * virtualstudio.uiScale
- color: saveButton.down ? saveButtonPressedColour : saveButtonBackgroundColour
- border.width: 1
- border.color: saveButton.down || saveButton.hovered ? saveButtonPressedStroke : saveButtonStroke
- }
- enabled: !Boolean(audio.devicesError) && audio.backendAvailable && audio.audioReady
- onClicked: {
- audio.stopAudio(true);
- virtualstudio.windowState = "connected";
- virtualstudio.saveSettings();
- virtualstudio.joinStudio();
+ Rectangle {
+ property int footerHeight: (30 + (rightMargin * 2)) * virtualstudio.uiScale;
+ x: -1; y: parent.height - footerHeight;
+ width: parent.width; height: footerHeight;
+ border.color: "#33979797"
+ color: backgroundColour
+
+ Button {
+ id: backButton
+ background: Rectangle {
+ radius: 6 * virtualstudio.uiScale
+ color: backButton.down ? buttonPressedColour : buttonColour
+ border.width: 1
+ border.color: backButton.down || backButton.hovered ? buttonPressedStroke : buttonStroke
+ }
+ onClicked: { virtualstudio.windowState = "browse"; virtualstudio.studioToJoin = ""; audio.stopAudio(); }
+ anchors.left: parent.left
+ anchors.leftMargin: 16 * virtualstudio.uiScale
+ anchors.bottomMargin: rightMargin * virtualstudio.uiScale
+ anchors.bottom: parent.bottom
+ width: 150 * virtualstudio.uiScale; height: 30 * virtualstudio.uiScale
+ Text {
+ text: "Back"
+ font.family: "Poppins"
+ font.pixelSize: fontSmall * virtualstudio.fontScale * virtualstudio.uiScale
+ color: textColour
+ anchors.horizontalCenter: parent.horizontalCenter
+ anchors.verticalCenter: parent.verticalCenter
+ }
}
- anchors.right: parent.right
- anchors.rightMargin: rightMargin * virtualstudio.uiScale
- anchors.bottomMargin: rightMargin * virtualstudio.uiScale
- anchors.bottom: parent.bottom
- width: 150 * virtualstudio.uiScale; height: 30 * virtualstudio.uiScale
- Text {
- text: "Connect to Studio"
- font.family: "Poppins"
- font.pixelSize: fontSmall * virtualstudio.fontScale * virtualstudio.uiScale
- font.weight: Font.Bold
- color: !Boolean(audio.devicesError) && audio.backendAvailable && audio.audioReady ? saveButtonText : disabledButtonText
- anchors.horizontalCenter: parent.horizontalCenter
- anchors.verticalCenter: parent.verticalCenter
+
+ DeviceWarning {
+ id: deviceWarning
+ anchors.left: backButton.right
+ anchors.leftMargin: 16 * virtualstudio.uiScale
+ anchors.verticalCenter: backButton.verticalCenter
+ visible: Boolean(audio.devicesError) || Boolean(audio.devicesWarning)
}
- }
- CheckBox {
- id: showAgainCheckbox
- checked: virtualstudio.showDeviceSetup
- visible: audio.backendAvailable
- text: qsTr("Ask again next time")
- anchors.right: saveButton.left
- anchors.rightMargin: 16 * virtualstudio.uiScale
- anchors.verticalCenter: saveButton.verticalCenter
- onClicked: { virtualstudio.showDeviceSetup = showAgainCheckbox.checkState == Qt.Checked }
- indicator: Rectangle {
- implicitWidth: 16 * virtualstudio.uiScale
- implicitHeight: 16 * virtualstudio.uiScale
- x: showAgainCheckbox.leftPadding
- y: parent.height / 2 - height / 2
- radius: 3 * virtualstudio.uiScale
- border.color: showAgainCheckbox.down || showAgainCheckbox.hovered ? checkboxPressedStroke : checkboxStroke
-
- Rectangle {
- width: 10 * virtualstudio.uiScale
- height: 10 * virtualstudio.uiScale
- x: 3 * virtualstudio.uiScale
- y: 3 * virtualstudio.uiScale
- radius: 2 * virtualstudio.uiScale
- color: showAgainCheckbox.down || showAgainCheckbox.hovered ? checkboxPressedStroke : checkboxStroke
- visible: showAgainCheckbox.checked
+ Button {
+ id: saveButton
+ background: Rectangle {
+ radius: 6 * virtualstudio.uiScale
+ color: saveButton.down ? saveButtonPressedColour : saveButtonBackgroundColour
+ border.width: 1
+ border.color: saveButton.down || saveButton.hovered ? saveButtonPressedStroke : saveButtonStroke
+ }
+ enabled: !Boolean(audio.devicesError) && audio.backendAvailable && audio.audioReady
+ onClicked: {
+ audio.stopAudio(true);
+ virtualstudio.windowState = "connected";
+ virtualstudio.saveSettings();
+ virtualstudio.joinStudio();
+ }
+ anchors.right: parent.right
+ anchors.rightMargin: rightMargin * virtualstudio.uiScale
+ anchors.bottomMargin: rightMargin * virtualstudio.uiScale
+ anchors.bottom: parent.bottom
+ width: 150 * virtualstudio.uiScale; height: 30 * virtualstudio.uiScale
+ Text {
+ text: "Connect to Studio"
+ font.family: "Poppins"
+ font.pixelSize: fontSmall * virtualstudio.fontScale * virtualstudio.uiScale
+ font.weight: Font.Bold
+ color: !Boolean(audio.devicesError) && audio.backendAvailable && audio.audioReady ? saveButtonText : disabledButtonText
+ anchors.horizontalCenter: parent.horizontalCenter
+ anchors.verticalCenter: parent.verticalCenter
}
}
- contentItem: Text {
- text: showAgainCheckbox.text
- font.family: "Poppins"
- font.pixelSize: 10 * virtualstudio.fontScale * virtualstudio.uiScale
- anchors.horizontalCenter: parent.horizontalCenter
- anchors.verticalCenter: parent.verticalCenter
- leftPadding: showAgainCheckbox.indicator.width + showAgainCheckbox.spacing
- color: textColour
+ CheckBox {
+ id: showAgainCheckbox
+ checked: virtualstudio.showDeviceSetup
+ visible: audio.backendAvailable
+ text: qsTr("Ask again next time")
+ anchors.right: saveButton.left
+ anchors.rightMargin: 16 * virtualstudio.uiScale
+ anchors.verticalCenter: saveButton.verticalCenter
+ onClicked: { virtualstudio.showDeviceSetup = showAgainCheckbox.checkState == Qt.Checked }
+ indicator: Rectangle {
+ implicitWidth: 16 * virtualstudio.uiScale
+ implicitHeight: 16 * virtualstudio.uiScale
+ x: showAgainCheckbox.leftPadding
+ y: parent.height / 2 - height / 2
+ radius: 3 * virtualstudio.uiScale
+ border.color: showAgainCheckbox.down || showAgainCheckbox.hovered ? checkboxPressedStroke : checkboxStroke
+
+ Rectangle {
+ width: 10 * virtualstudio.uiScale
+ height: 10 * virtualstudio.uiScale
+ x: 3 * virtualstudio.uiScale
+ y: 3 * virtualstudio.uiScale
+ radius: 2 * virtualstudio.uiScale
+ color: showAgainCheckbox.down || showAgainCheckbox.hovered ? checkboxPressedStroke : checkboxStroke
+ visible: showAgainCheckbox.checked
+ }
+ }
+
+ contentItem: Text {
+ text: showAgainCheckbox.text
+ font.family: "Poppins"
+ font.pixelSize: 10 * virtualstudio.fontScale * virtualstudio.uiScale
+ anchors.horizontalCenter: parent.horizontalCenter
+ anchors.verticalCenter: parent.verticalCenter
+ leftPadding: showAgainCheckbox.indicator.width + showAgainCheckbox.spacing
+ color: textColour
+ }
}
}
}
--- /dev/null
+<svg xmlns="http://www.w3.org/2000/svg" height="24" viewBox="0 0 24 24" width="24"><path d="M0 0h24v24H0z" fill="none"/><path d="M19 6.41L17.59 5 12 10.59 6.41 5 5 6.41 10.59 12 5 17.59 6.41 19 12 13.41 17.59 19 19 17.59 13.41 12z"/></svg>
\ No newline at end of file
<file>Prompt.svg</file>
<file>network.svg</file>
<file>video.svg</file>
+ <file>close.svg</file>
<file>jacktrip.png</file>
<file>jacktrip white.png</file>
<file>JTOriginal.png</file>
return;
}
+ // always connect with audio device controls open
+ setCollapseDeviceControls(false);
+
m_jackTripRunning = true;
m_connectionState = QStringLiteral("Preparing audio...");
emit connectionStateChanged();
int buffer_strategy = m_audioConfigPtr->getBufferStrategy() + 1;
// adjust buffer_strategy for PLC "auto" mode menu item
if (buffer_strategy == 3) {
+ // run PLC without worker (4)
+ buffer_strategy = 4;
+ /*
+ // I don't believe this is still necessary,
+ // after splitting the input and output RtAudio streams
+ // See https://github.com/jacktrip/jacktrip/pull/1235
if (useRtAudio) {
// if same device for input and output,
// run PLC without worker (4)
// run PLC without worker (4)
buffer_strategy = 4;
}
+ */
} else if (buffer_strategy == 5) {
buffer_strategy = 3; // run PLC with worker (3)
}
return;
}
- // special case if on create_studio screen
+ // special case if on create_studio screen:
+ // note that the studio creation happens inside of the web view,
+ // and the app doesn't really know anything about it. we depend
+ // on the web app triggering a deep link join event, which is
+ // handled here. it's unlikely that the new studio has been
+ // noticed yet, so we don't join right away; otherwise we'd just
+ // get an unknown studio error. instead, we trigger a refresh and
+ // rely on it to kick off the join afterwards.
if (m_windowState == "create_studio") {
refreshStudios(0, true);
if (showDeviceSetup()) {
return;
}
- // special case if on browsing screen
- if (m_windowState == "browse") {
- setWindowState("connected");
- joinStudio();
- return;
- }
-
- if (m_windowState == "failed") {
- setWindowState("connected");
- joinStudio();
+ // special case if on browsing and failed screens
+ if (m_windowState == "browse" || m_windowState == "failed") {
+ if (showDeviceSetup()) {
+ setWindowState("setup");
+ m_audioConfigPtr->startAudio();
+ } else {
+ setWindowState("connected");
+ joinStudio();
+ }
return;
}
} else if (errorMessage.startsWith(RtAudioErrorMsg)) {
if (errorMessage.length() > RtAudioErrorMsg.length() + 2) {
const QString details(errorMessage.sliced(RtAudioErrorMsg.length() + 2));
- if (details.startsWith(
- QStringLiteral("RtApiCore: the stream device was disconnected"))) {
+ if (details.contains(QStringLiteral("device was disconnected"))
+ || details.contains(
+ QStringLiteral("Unable to retrieve capture buffer"))) {
msgBox.setText(QStringLiteral("Your audio interface was disconnected."));
} else {
msgBox.setText(details);
void VirtualStudio::getServerList(bool signalRefresh, int index)
{
+ // only allow one thread to refresh at a time
QMutexLocker refreshLock(&m_refreshMutex);
if (m_refreshInProgress)
return;
m_refreshInProgress = true;
+ refreshLock.unlock();
// Get the serverId of the server at the top of our screen if we know it
QString topServerId;
- if (index >= 0 && index < m_servers.count()) {
- topServerId = m_servers.at(index)->id();
+ if (index >= 0 && index < m_serverModel.count()) {
+ topServerId = m_serverModel.at(index)->id();
}
- refreshLock.unlock();
QNetworkReply* reply = m_api->getServers();
connect(
if (signalRefresh) {
emit refreshFinished(index);
}
- std::cout << "Error: " << reply->errorString().toStdString() << std::endl;
+ std::cerr << "Error: " << reply->errorString().toStdString() << std::endl;
reply->deleteLater();
- QMutexLocker getServersLock(&m_refreshMutex);
+ QMutexLocker refreshLock(&m_refreshMutex);
m_refreshInProgress = false;
return;
}
if (signalRefresh) {
emit refreshFinished(index);
}
- std::cout << "Error: Not an array" << std::endl;
- QMutexLocker locker(&m_refreshMutex);
- m_refreshInProgress = false;
- QMutexLocker getServersLock(&m_refreshMutex);
+ std::cerr << "Error: Not an array" << std::endl;
+ QMutexLocker refreshLock(&m_refreshMutex);
m_refreshInProgress = false;
return;
}
QVector<VsServerInfoPointer> pubServers;
int skippedStudios = 0;
+ QMutexLocker refreshLock(&m_refreshMutex); // protect m_servers
+ m_servers.clear();
for (int i = 0; i < servers.count(); i++) {
if (servers.at(i)[QStringLiteral("type")].toString().contains(
QStringLiteral("JackTrip"))) {
QSharedPointer<VsServerInfo> serverInfo(new VsServerInfo(this));
serverInfo->setIsAdmin(
servers.at(i)[QStringLiteral("admin")].toBool());
- QString status = servers.at(i)[QStringLiteral("status")].toString();
- bool activeStudio = status == QLatin1String("Ready");
- bool hostedStudio = servers.at(i)[QStringLiteral("managed")].toBool();
- // Only iterate through servers that we want to show
- if (!m_showSelfHosted && !hostedStudio) {
- if (activeStudio || (serverInfo->isAdmin())) {
- skippedStudios++;
- }
- continue;
- }
- if (!m_showInactive && !activeStudio) {
- if (serverInfo->isAdmin()) {
- skippedStudios++;
- }
- continue;
- }
- if (activeStudio || m_showInactive) {
- serverInfo->setName(
- servers.at(i)[QStringLiteral("name")].toString());
- serverInfo->setHost(
- servers.at(i)[QStringLiteral("serverHost")].toString());
- serverInfo->setIsManaged(
- servers.at(i)[QStringLiteral("managed")].toBool());
- serverInfo->setStatus(
- servers.at(i)[QStringLiteral("status")].toString());
- serverInfo->setPort(
- servers.at(i)[QStringLiteral("serverPort")].toInt());
- serverInfo->setIsPublic(
- servers.at(i)[QStringLiteral("public")].toBool());
- serverInfo->setRegion(
- servers.at(i)[QStringLiteral("region")].toString());
- serverInfo->setPeriod(
- servers.at(i)[QStringLiteral("period")].toInt());
- serverInfo->setSampleRate(
- servers.at(i)[QStringLiteral("sampleRate")].toInt());
- serverInfo->setQueueBuffer(
- servers.at(i)[QStringLiteral("queueBuffer")].toInt());
- serverInfo->setBannerURL(
- servers.at(i)[QStringLiteral("bannerURL")].toString());
- serverInfo->setId(servers.at(i)[QStringLiteral("id")].toString());
- serverInfo->setSessionId(
- servers.at(i)[QStringLiteral("sessionId")].toString());
- serverInfo->setInviteKey(
- servers.at(i)[QStringLiteral("inviteKey")].toString());
- serverInfo->setCloudId(
- servers.at(i)[QStringLiteral("cloudId")].toString());
- serverInfo->setEnabled(
- servers.at(i)[QStringLiteral("enabled")].toBool());
- serverInfo->setIsOwner(
- servers.at(i)[QStringLiteral("owner")].toBool());
- if (servers.at(i)[QStringLiteral("owner")].toBool()) {
+ serverInfo->setName(servers.at(i)[QStringLiteral("name")].toString());
+ serverInfo->setHost(
+ servers.at(i)[QStringLiteral("serverHost")].toString());
+ serverInfo->setIsManaged(
+ servers.at(i)[QStringLiteral("managed")].toBool());
+ serverInfo->setStatus(
+ servers.at(i)[QStringLiteral("status")].toString());
+ serverInfo->setPort(
+ servers.at(i)[QStringLiteral("serverPort")].toInt());
+ serverInfo->setIsPublic(
+ servers.at(i)[QStringLiteral("public")].toBool());
+ serverInfo->setRegion(
+ servers.at(i)[QStringLiteral("region")].toString());
+ serverInfo->setPeriod(
+ servers.at(i)[QStringLiteral("period")].toInt());
+ serverInfo->setSampleRate(
+ servers.at(i)[QStringLiteral("sampleRate")].toInt());
+ serverInfo->setQueueBuffer(
+ servers.at(i)[QStringLiteral("queueBuffer")].toInt());
+ serverInfo->setBannerURL(
+ servers.at(i)[QStringLiteral("bannerURL")].toString());
+ serverInfo->setId(servers.at(i)[QStringLiteral("id")].toString());
+ serverInfo->setSessionId(
+ servers.at(i)[QStringLiteral("sessionId")].toString());
+ serverInfo->setInviteKey(
+ servers.at(i)[QStringLiteral("inviteKey")].toString());
+ serverInfo->setCloudId(
+ servers.at(i)[QStringLiteral("cloudId")].toString());
+ serverInfo->setEnabled(
+ servers.at(i)[QStringLiteral("enabled")].toBool());
+ serverInfo->setIsOwner(
+ servers.at(i)[QStringLiteral("owner")].toBool());
+
+ // Always add servers to m_servers
+ m_servers.append(serverInfo);
+
+ // Only add servers to the model that we want to show
+ if (serverInfo->isAdmin() || serverInfo->isOwner()) {
+ if (filterStudio(*serverInfo)) {
+ ++skippedStudios;
+ } else {
yourServers.append(serverInfo);
serverInfo->setSection(VsServerInfo::YOUR_STUDIOS);
- } else if (m_subscribedServers.contains(serverInfo->id())) {
+ }
+ } else if (m_subscribedServers.contains(serverInfo->id())) {
+ if (filterStudio(*serverInfo)) {
+ ++skippedStudios;
+ } else {
subServers.append(serverInfo);
serverInfo->setSection(VsServerInfo::SUBSCRIBED_STUDIOS);
- } else {
+ }
+ } else {
+ if (!filterStudio(*serverInfo)) {
pubServers.append(serverInfo);
serverInfo->setSection(VsServerInfo::PUBLIC_STUDIOS);
}
+ // don't count public studios in skipped count
}
}
}
+ refreshLock.unlock();
// sort studios in each section by name
auto serverSorter = [](VsServerInfoPointer first,
if (subServers.isEmpty()) {
m_logoSection = QStringLiteral("Public Studios");
- if (pubServers.isEmpty() && skippedStudios == 0) {
+ if (skippedStudios == 0) {
// This is a new user
setShowCreateStudio(true);
} else {
- // This is not a new user.
- // Set to false in case the studio created since refreshing.
+ // This is not a new user. One or more studios were filtered.
setShowCreateStudio(false);
}
} else {
m_logoSection = QStringLiteral("Subscribed Studios");
}
- emit logoSectionChanged();
} else {
m_logoSection = QStringLiteral("Your Studios");
- emit logoSectionChanged();
}
+ emit logoSectionChanged();
- QMutexLocker getServersLock(&m_refreshMutex);
- m_servers.clear();
- m_servers.append(yourServers);
- m_servers.append(subServers);
- m_servers.append(pubServers);
m_serverModel.clear();
- for (const VsServerInfoPointer& s : m_servers) {
+ for (const VsServerInfoPointer& s : yourServers) {
+ m_serverModel.append(s.get());
+ }
+ for (const VsServerInfoPointer& s : subServers) {
+ m_serverModel.append(s.get());
+ }
+ for (const VsServerInfoPointer& s : pubServers) {
m_serverModel.append(s.get());
}
emit serverModelChanged();
int index = -1;
if (!topServerId.isEmpty()) {
- for (int i = 0; i < m_servers.count(); i++) {
- if (m_servers.at(i)->id() == topServerId) {
+ for (int i = 0; i < m_serverModel.count(); i++) {
+ if (m_serverModel.at(i)->id() == topServerId) {
index = i;
break;
}
});
}
+bool VirtualStudio::filterStudio(const VsServerInfo& serverInfo) const
+{
+ // Return true if we want to filter the studio out of the display model
+ bool activeStudio = serverInfo.status() == QLatin1String("Ready");
+ bool hostedStudio = serverInfo.isManaged();
+ if (!m_showSelfHosted && !hostedStudio) {
+ return true;
+ }
+ if (!m_showInactive && !activeStudio) {
+ return true;
+ }
+ return false;
+}
+
void VirtualStudio::getSubscriptions()
{
if (m_userId.isEmpty()) {
private:
void resetState();
void getServerList(bool signalRefresh = false, int index = -1);
+ bool filterStudio(const VsServerInfo& serverInfo) const;
void getSubscriptions();
void getRegions();
void getUserMetadata();
{
AudioInterface* ifPtr = nullptr;
-#if defined(__unix__)
- AudioInterface::setPipewireLatency(getBufferSize(), m_sampleRate);
-#endif
-
// Create AudioInterface Client Object
if (isBackendAvailable<AudioInterfaceMode::ALL>() && jackIsAvailable()) {
// all backends area available
setBufferSize(ifPtr->getBufferSizeInSamples());
}
- std::cout << "The Sampling Rate is: " << m_sampleRate << std::endl;
+ std::cout << "The Sampling Rate is: " << ifPtr->getSampleRate() << std::endl;
std::cout << gPrintSeparator << std::endl;
int AudioBufferSizeInBytes = ifPtr->getBufferSizeInSamples() * sizeof(sample_t);
std::cout << "The Audio Buffer Size is: " << ifPtr->getBufferSizeInSamples()
ifPtr = new JackAudioInterface(inputChans, outputChans, m_audioBitResolution,
jackTripPtr != nullptr, jackTripPtr);
ifPtr->setClientName(QStringLiteral("JackTrip"));
+#if defined(__unix__)
+ AudioInterface::setPipewireLatency(
+ getBufferSize(),
+ jackTripPtr == nullptr ? 44100 : jackTripPtr->getSampleRate());
+#endif
ifPtr->setup(true);
-
- m_sampleRate = ifPtr->getSampleRate();
}
#endif
return ifPtr;
inputChans, outputChans,
static_cast<AudioInterface::inputMixModeT>(getInputMixMode()),
m_audioBitResolution, jackTripPtr != nullptr, jackTripPtr);
- ifPtr->setSampleRate(m_sampleRate);
+ ifPtr->setSampleRate(jackTripPtr == nullptr ? 44100 : jackTripPtr->getSampleRate());
ifPtr->setInputDevice(getInputDevice().toStdString());
ifPtr->setOutputDevice(getOutputDevice().toStdString());
ifPtr->setBufferSizeInSamples(getBufferSize());
if (!devices.empty())
static_cast<RtAudioInterface*>(ifPtr)->setRtAudioDevices(devices);
+#if defined(__unix__)
+ AudioInterface::setPipewireLatency(getBufferSize(), ifPtr->getSampleRate());
+#endif
+
// Note: setup might change the number of channels and/or buffer size
ifPtr->setup(true);
float m_inMultiplier = 1.0;
float m_outMultiplier = 1.0;
float m_monMultiplier = 0;
- uint32_t m_sampleRate = gDefaultSampleRate;
QString m_inputDevice;
QString m_outputDevice;
#ifdef RT_AUDIO
if (useRtAudio) {
m_jackTrip->setAudiointerfaceMode(JackTrip::RTAUDIO);
- m_jackTrip->setSampleRate(studioInfo->sampleRate());
m_jackTrip->setAudioBufferSizeInSamples(bufferSize);
m_jackTrip->setInputDevice(input);
m_jackTrip->setOutputDevice(output);
}
#endif
+ m_jackTrip->setSampleRate(studioInfo->sampleRate());
int bindPort = selectBindPort();
if (bindPort == 0) {
return 0;
#include "AudioInterface.h"
-constexpr const char* const gVersion = "2.1.0"; ///< JackTrip version
+constexpr const char* const gVersion = "2.2.0"; ///< JackTrip version
//*******************************************************************************
/// \name Default Values
--- /dev/null
+--- a/RtAudio.cpp 2024-01-11 13:04:29.148565300 -0800
++++ b/RtAudio.cpp 2024-01-11 13:04:42.305228600 -0800
+@@ -1981,7 +1981,7 @@
+ }
+ }
+
+- if ( handle->disconnectListenerAdded[0] ) {
++ if ( handle->disconnectListenerAdded[1] ) {
+ property.mSelector = kAudioDevicePropertyDeviceIsAlive;
+ if (AudioObjectRemovePropertyListener( handle->id[1], &property, streamDisconnectListener, (void *) &stream_.callbackInfo ) != noErr) {
+ errorText_ = "RtApiCore::closeStream(): error removing disconnect property listener!";
source_url = https://github.com/thestk/rtaudio/archive/refs/tags/6.0.1.tar.gz
source_filename = 6.0.1.tar.gz
source_hash = 7206c8b6cee43b474f43d64988fefaadfdcfc4264ed38d8de5f5d0e6ddb0a123
+diff_files = rtaudio-remove-input-disconnect-listener.patch
[provide]
dependency_names = rtaudio