New upstream version 2.2.0+ds
authorIOhannes m zmölnig (Debian/GNU) <umlaeute@debian.org>
Thu, 25 Jan 2024 22:51:16 +0000 (23:51 +0100)
committerIOhannes m zmölnig (Debian/GNU) <umlaeute@debian.org>
Thu, 25 Jan 2024 22:51:16 +0000 (23:51 +0100)
36 files changed:
build-aux/flatpak/org.jacktrip.JackTrip.json
docs/changelog.yml
meson_options.txt
releases/edge/mac-manifests.json
releases/edge/win-manifests.json
releases/stable/linux-manifests.json
releases/stable/mac-manifests.json
releases/stable/win-manifests.json
src/AudioInterface.cpp
src/AudioInterface.h
src/JackAudioInterface.cpp
src/JackAudioInterface.h
src/Regulator.cpp
src/Regulator.h
src/RtAudioInterface.cpp
src/RtAudioInterface.h
src/Settings.h
src/gui/AudioSettings.qml
src/gui/ChangeDevices.qml
src/gui/Connected.qml
src/gui/DeviceControls.qml
src/gui/DeviceControlsGroup.qml
src/gui/DeviceWarning.qml
src/gui/FeedbackSurvey.qml
src/gui/Settings.qml
src/gui/Setup.qml
src/gui/close.svg [new file with mode: 0644]
src/gui/qjacktrip.qrc
src/gui/virtualstudio.cpp
src/gui/virtualstudio.h
src/gui/vsAudio.cpp
src/gui/vsAudio.h
src/gui/vsDevice.cpp
src/jacktrip_globals.h
subprojects/packagefiles/rtaudio-remove-input-disconnect-listener.patch [new file with mode: 0644]
subprojects/rtaudio.wrap

index bfbfef09208ae248062d80ae2ca904e694c96383..e4348c02b4224e3dff1a3984dbb30cb48096b938 100644 (file)
@@ -1,10 +1,10 @@
 {
     "app-id": "org.jacktrip.JackTrip",
     "runtime": "org.kde.Platform",
-    "runtime-version": "6.4",
+    "runtime-version": "6.6",
     "sdk": "org.kde.Sdk",
     "base": "io.qt.qtwebengine.BaseApp",
-    "base-version": "6.4",
+    "base-version": "6.6",
     "command": "jacktrip",
     "finish-args": [
         "--share=ipc",
index 1caca7b1e7e2357c8b09b545e0843ef3cbe9b147..5e28460d42730b6482b562a1dc1c6a1397fae398 100644 (file)
@@ -1,3 +1,13 @@
+- Version: "2.2.0"
+  Date: 2024-01-22
+  Description:
+  - (updated) Improved support for different input and output devices
+  - (updated) Various latency improvements for packet loss concealment
+  - (updated) VS Mode make it easier to dismiss the user feedback dialog
+  - (updated) VS Mode error message for disconnected audio interfaces
+  - (fixed) VS Mode broken deeplinks when studio doesn't match filters
+  - (fixed) VS Mode refused to connect to studios not 48khz
+  - (fixed) VS Mode showed wrong devices selected when connected
 - Version: "2.1.0"
   Date: 2023-11-06
   Description:
index ca4f707251719f7963f12b73d020307ceab4e5fa..e7daaf03e8eb1317eb60a313226ee240e95848d3 100644 (file)
@@ -1,12 +1,12 @@
-option('wair', type : 'boolean', value : 'false', description: 'WAIR')
+option('wair', type : 'boolean', value : false, description: 'WAIR')
 option('rtaudio', type : 'feature', value : 'auto', description: 'Build with RtAudio Backend')
 option('jack', type : 'feature', value : 'auto', description: 'Build with JACK Backend')
-option('weakjack', type : 'boolean', value : 'false', description: 'Weak link JACK library')
-option('nogui', type : 'boolean', value : 'false', description: 'Build without graphical user interface')
-option('novs', type : 'boolean', value : 'false', description: 'Build without Virtual Studio support')
-option('vsftux', type : 'boolean', value : 'false', description: 'Build with Virtual Studio first launch experience')
-option('noupdater', type : 'boolean', value : 'false', description: 'Build without auto-update support')
-option('nofeedback', type : 'boolean', value : 'false', description: 'Build without feedback detection')
+option('weakjack', type : 'boolean', value : false, description: 'Weak link JACK library')
+option('nogui', type : 'boolean', value : false, description: 'Build without graphical user interface')
+option('novs', type : 'boolean', value : false, description: 'Build without Virtual Studio support')
+option('vsftux', type : 'boolean', value : false, description: 'Build with Virtual Studio first launch experience')
+option('noupdater', type : 'boolean', value : false, description: 'Build without auto-update support')
+option('nofeedback', type : 'boolean', value : false, description: 'Build without feedback detection')
 option('profile', type: 'combo', choices: ['default', 'development'], value: 'default', description: 'Choose build profile / Sets desktop id accordingly')
 option('qtversion', type : 'combo', choices: ['', '5', '6'], description: 'Choose to build with either Qt5 or Qt6')
 option('buildinfo', type : 'string', value : '', yield : true, description: 'Additional info used to describe the build')
\ No newline at end of file
index b7b759e13f771088e47c0b9b45715aafaede878d..09fa2557703c35d357b1decb2b8740cde88109de 100644 (file)
@@ -1,6 +1,26 @@
 {
   "app_name": "JackTrip",
   "releases": [
+    {
+      "version": "2.1.0",
+      "changelog": "Full changelog at https://github.com/jacktrip/jacktrip/releases/tag/v2.1.0",
+      "download": {
+        "date": "2023-11-06T00:00:00Z",
+        "url": "https://files.jacktrip.org/app-builds/JackTrip-v2.1.0-macOS-x64-signed-installer.pkg",
+        "downloadSize": "177373751",
+        "sha256": "e7ffb56b99f25de7c71774e6e6484c1e400ebe2fa05b9618695030e83a5de9a2"
+      }
+    },
+    {
+      "version": "2.1.0-beta1",
+      "changelog": "Full changelog at https://github.com/jacktrip/jacktrip/releases/tag/v2.1.0-beta1",
+      "download": {
+        "date": "2023-10-31T00:00:00Z",
+        "url": "https://files.jacktrip.org/app-builds/JackTrip-v2.1.0-beta1-macOS-x64-signed-installer.pkg",
+        "downloadSize": "177365464",
+        "sha256": "46ab83844671028a27b59b09a4e7eec1398b772f85820c1213f330d35c2ceba9"
+      }
+    },
     {
       "version": "2.0.2",
       "changelog": "Full changelog at https://github.com/jacktrip/jacktrip/releases/tag/v2.0.2",
index 58f7b0a81b54ffee0082407294d4681c99801425..17eb017aef31367510005b6257fb2dc86dfabcdf 100644 (file)
@@ -1,6 +1,26 @@
 {
   "app_name": "JackTrip",
   "releases": [
+    {
+      "version": "2.1.0",
+      "changelog": "Full changelog at https://github.com/jacktrip/jacktrip/releases/tag/v2.1.0",
+      "download": {
+        "date": "2023-11-06T00:00:00Z",
+        "url": "https://files.jacktrip.org/app-builds/JackTrip-v2.1.0-Windows-x64-signed-installer.msi",
+        "downloadSize": "108511232",
+        "sha256": "cd5b4735421a484bf83635f07653755e56b095c785f021eedaa4ca2d4132dd7f"
+      }
+    },
+    {
+      "version": "2.1.0-beta1",
+      "changelog": "Full changelog at https://github.com/jacktrip/jacktrip/releases/tag/v2.1.0-beta1",
+      "download": {
+        "date": "2023-10-31T00:00:00Z",
+        "url": "https://files.jacktrip.org/app-builds/JackTrip-v2.1.0-beta1-Windows-x64-signed-installer.msi",
+        "downloadSize": "108511232",
+        "sha256": "d52784050fdd9876d44bc3fddd1b70b93065ab4c6b67076dcbfe42a098d73447"
+      }
+    },
     {
       "version": "2.0.2",
       "changelog": "Full changelog at https://github.com/jacktrip/jacktrip/releases/tag/v2.0.2",
index c21757e3f80bf1d7ce39df172b908c6f3f545c36..26ec828e25d945edac058c141d5ebaae8d8c6c7e 100644 (file)
@@ -1,6 +1,16 @@
 {
   "app_name": "JackTrip",
   "releases": [
+    {
+      "version": "2.1.0",
+      "changelog": "Full changelog at https://github.com/jacktrip/jacktrip/releases/tag/v2.1.0",
+      "download": {
+        "date": "2023-11-06T00:00:00Z",
+        "url": "https://files.jacktrip.org/app-builds/JackTrip-v2.1.0-Linux-x64-binary.zip",
+        "downloadSize": "1239221",
+        "sha256": "1f990a9d4e7874d5129f287eee3ace4881130c23531be9ca816a9cc01df17379"
+      }
+    },
     {
       "version": "2.0.2",
       "changelog": "Full changelog at https://github.com/jacktrip/jacktrip/releases/tag/v2.0.2",
index 569c766852ba652026d96a46c44c7f49b7e9ec63..78ddfd405ea148daad2d0641bc9455f717ad10e5 100644 (file)
@@ -1,6 +1,16 @@
 {
   "app_name": "JackTrip",
   "releases": [
+    {
+      "version": "2.1.0",
+      "changelog": "Full changelog at https://github.com/jacktrip/jacktrip/releases/tag/v2.1.0",
+      "download": {
+        "date": "2023-11-06T00:00:00Z",
+        "url": "https://files.jacktrip.org/app-builds/JackTrip-v2.1.0-macOS-x64-signed-installer.pkg",
+        "downloadSize": "177373751",
+        "sha256": "e7ffb56b99f25de7c71774e6e6484c1e400ebe2fa05b9618695030e83a5de9a2"
+      }
+    },
     {
       "version": "2.0.2",
       "changelog": "Full changelog at https://github.com/jacktrip/jacktrip/releases/tag/v2.0.2",
index 41462eb2e23735e63c4d5f1fbc9d7fd0cf47b61b..3e34349c259325ac7fa8458ac52aa5a8939b633e 100644 (file)
@@ -1,6 +1,16 @@
 {
   "app_name": "JackTrip",
   "releases": [
+    {
+      "version": "2.1.0",
+      "changelog": "Full changelog at https://github.com/jacktrip/jacktrip/releases/tag/v2.1.0",
+      "download": {
+        "date": "2023-11-06T00:00:00Z",
+        "url": "https://files.jacktrip.org/app-builds/JackTrip-v2.1.0-Windows-x64-signed-installer.msi",
+        "downloadSize": "108511232",
+        "sha256": "cd5b4735421a484bf83635f07653755e56b095c785f021eedaa4ca2d4132dd7f"
+      }
+    },
     {
       "version": "2.0.2",
       "changelog": "Full changelog at https://github.com/jacktrip/jacktrip/releases/tag/v2.0.2",
index af36cfe3ff6cc141c29b0f4d93266d85e7c01380..1d7dc81cf2eeff2e0177f6a033313b3891e11165 100644 (file)
@@ -66,61 +66,16 @@ AudioInterface::AudioInterface(QVarLengthArray<int> InputChans,
     , mBitResolutionMode(AudioBitResolution)
     , mSampleRate(gDefaultSampleRate)
     , mBufferSizeInSamples(gDefaultBufferSizeInSamples)
+    , mMonitorQueuePtr(NULL)
     , mAudioInputPacket(NULL)
     , mAudioOutputPacket(NULL)
     , mLoopBack(false)
     , mProcessWithNetwork(processWithNetwork)
+    , mMonitorStarted(false)
     , mJackTrip(jacktrip)
     , mInputMixMode(InputMixMode)
     , mProcessingAudio(false)
 {
-#ifndef WAIR
-    // cc
-    // Initialize and assign memory for ProcessPlugins Buffers
-    int monitorChans = int(std::min<size_t>(mInputChans.size(), mOutputChans.size()));
-    mInProcessBuffer.resize(mInputChans.size());
-    mOutProcessBuffer.resize(mOutputChans.size());
-    mMonProcessBuffer.resize(monitorChans);
-    // Set pointer to NULL
-    for (int i = 0; i < mInProcessBuffer.size(); i++) {
-        mInProcessBuffer[i] = NULL;
-    }
-    for (int i = 0; i < mOutProcessBuffer.size(); i++) {
-        mOutProcessBuffer[i] = NULL;
-    }
-    for (int i = 0; i < monitorChans; i++) {
-        mMonProcessBuffer[i] = NULL;
-    }
-#else   // WAIR
-    int iCnt =
-        (mInputChans.size() > mNumNetRevChans) ? mInputChans.size() : mNumNetRevChans;
-    int oCnt =
-        (mOutputChans.size() > mNumNetRevChans) ? mOutputChans.size() : mNumNetRevChans;
-    int aCnt = (mNumNetRevChans) ? mInputChans.size() : 0;
-    int mCnt = std::min<int>(iCnt, oCnt);
-    for (int i = 0; i < iCnt; i++) {
-        mInProcessBuffer[i] = NULL;
-    }
-    for (int i = 0; i < oCnt; i++) {
-        mOutProcessBuffer[i] = NULL;
-    }
-    for (int i = 0; i < mCnt; i++) {
-        mMonProcessBuffer[i] = NULL;
-    }
-    for (int i = 0; i < aCnt; i++) {
-        mAPInBuffer[i] = NULL;
-    }
-#endif  // endwhere
-
-    mInBufCopy.resize(mInputChans.size());
-    for (int i = 0; i < mInputChans.size(); i++) {
-        mInBufCopy[i] =
-            new sample_t[MAX_AUDIO_BUFFER_SIZE];  // required for processing audio input
-    }
-
-    // Not used in this class but may be needed by subclasses
-    mNumInChans  = mInputChans.size();
-    mNumOutChans = mOutputChans.size();
 }
 
 //*******************************************************************************
@@ -128,27 +83,14 @@ AudioInterface::~AudioInterface()
 {
     delete[] mAudioInputPacket;
     delete[] mAudioOutputPacket;
-#ifndef WAIR  // NOT WAIR:
     for (int i = 0; i < mInProcessBuffer.size(); i++) {
         delete[] mInProcessBuffer[i];
     }
-
     for (int i = 0; i < mOutProcessBuffer.size(); i++) {
         delete[] mOutProcessBuffer[i];
     }
-    for (int i = 0; i < mMonProcessBuffer.size(); i++) {
-        delete[] mMonProcessBuffer[i];
-    }
-#else   // WAIR
-    for (int i = 0; i < mInProcessBuffer.size(); i++) {
-        delete[] mInProcessBuffer[i];
-    }
-    for (int i = 0; i < mOutProcessBuffer.size(); i++) {
-        delete[] mOutProcessBuffer[i];
-    }
-    for (int i = 0; i < mMonProcessBuffer.size(); i++) {
-        delete[] mMonProcessBuffer[i];
-    }
+    delete mMonitorQueuePtr;
+#ifdef WAIR  // NOT WAIR:
     for (int i = 0; i < mAPInBuffer.size(); i++) {
         delete[] mAPInBuffer[i];
     }
@@ -165,30 +107,16 @@ AudioInterface::~AudioInterface()
         i->disconnect();
         delete i;
     }
-    for (int i = 0; i < mInBufCopy.size(); i++) {
-        delete[] mInBufCopy[i];
-    }
 }
 
 //*******************************************************************************
 void AudioInterface::setup(bool /*verbose*/)
 {
-    int nChansIn  = mInputChans.size();
-    int nChansOut = mOutputChans.size();
-    int nChansMon = std::min<int>(
-        nChansIn, nChansOut);  // Note: Should be 2 when mixing stereo-to-mono
-    inputMixModeT inputMixMode = mInputMixMode;
-    if (inputMixMode == MIXTOMONO) {
-        nChansIn = 1;
-    }
-    if (inputMixMode == MONO) {
-        nChansMon = nChansOut;
-    }
     // Allocate buffer memory to read and write
     mSizeInBytesPerChannel = getSizeInBytesPerChannel();
-
-    int size_audio_input  = int(mSizeInBytesPerChannel * nChansIn);
-    int size_audio_output = int(mSizeInBytesPerChannel * nChansOut);
+    int nframes            = getBufferSizeInSamples();
+    int size_audio_input   = int(mSizeInBytesPerChannel * mInputChans.size());
+    int size_audio_output  = int(mSizeInBytesPerChannel * mOutputChans.size());
 #ifdef WAIR               // WAIR
     if (mNumNetRevChans)  // else don't change sizes
     {
@@ -196,70 +124,48 @@ void AudioInterface::setup(bool /*verbose*/)
         size_audio_output = mSizeInBytesPerChannel * mNumNetRevChans;
     }
 #endif  // endwhere
-    mAudioInputPacket  = new int8_t[size_audio_input];
-    mAudioOutputPacket = new int8_t[size_audio_output];
+    const size_t audioInputPacketSize = std::max<size_t>(
+        size_audio_input, mInputChans.size() * sizeof(sample_t) * nframes);
+    const size_t audioOutputPacketSize = std::max<size_t>(
+        size_audio_output, mOutputChans.size() * sizeof(sample_t) * nframes);
+    mAudioInputPacket  = new int8_t[audioInputPacketSize];
+    mAudioOutputPacket = new int8_t[audioOutputPacketSize];
 
     // Initialize and assign memory for ProcessPlugins Buffers
 #ifdef WAIR  // WAIR
     if (mNumNetRevChans) {
         mInProcessBuffer.resize(mNumNetRevChans);
         mOutProcessBuffer.resize(mNumNetRevChans);
-        mMonProcessBuffer.resize(mNumNetRevChans);
-        mAPInBuffer.resize(nChansIn);
+        mAPInBuffer.resize(mInputChans.size());
         mNetInBuffer.resize(mNumNetRevChans);
+        for (int i = 0; i < mAPInBuffer.size(); i++) {
+            mAPInBuffer[i] = new sample_t[nframes];
+            // set memory to 0
+            std::memset(mAPInBuffer[i], 0, sizeof(sample_t) * nframes);
+        }
+        for (int i = 0; i < mNumNetRevChans; i++) {
+            mNetInBuffer[i] = new sample_t[nframes];
+            // set memory to 0
+            std::memset(mNetInBuffer[i], 0, sizeof(sample_t) * nframes);
+        }
     } else  // don't change sizes
 #endif      // endwhere
     {
-        mInProcessBuffer.resize(nChansIn);
-        mOutProcessBuffer.resize(nChansOut);
-        mMonProcessBuffer.resize(nChansMon);
+        mInProcessBuffer.resize(mInputChans.size());
+        mOutProcessBuffer.resize(mOutputChans.size());
+        mMonitorQueuePtr = new WaitFreeFrameBuffer<64>(audioInputPacketSize);
     }
 
-    int nframes = getBufferSizeInSamples();
-
-#ifndef WAIR  // NOT WAIR:
-    for (int i = 0; i < nChansIn; i++) {
-        mInProcessBuffer[i] = new sample_t[nframes];
-        // set memory to 0
-        std::memset(mInProcessBuffer[i], 0, sizeof(sample_t) * nframes);
-    }
-    for (int i = 0; i < nChansOut; i++) {
-        mOutProcessBuffer[i] = new sample_t[nframes];
-        // set memory to 0
-        std::memset(mOutProcessBuffer[i], 0, sizeof(sample_t) * nframes);
-    }
-    for (int i = 0; i < nChansMon; i++) {
-        mMonProcessBuffer[i] = new sample_t[nframes];
-        // set memory to 0
-        std::memset(mMonProcessBuffer[i], 0, sizeof(sample_t) * nframes);
-    }
-#else   // WAIR
-    for (int i = 0; i < ((mNumNetRevChans) ? mNumNetRevChans : nChansIn); i++) {
+    for (int i = 0; i < mInputChans.size(); i++) {
         mInProcessBuffer[i] = new sample_t[nframes];
         // set memory to 0
         std::memset(mInProcessBuffer[i], 0, sizeof(sample_t) * nframes);
     }
-    for (int i = 0; i < ((mNumNetRevChans) ? mNumNetRevChans : nChansOut); i++) {
+    for (int i = 0; i < mOutputChans.size(); i++) {
         mOutProcessBuffer[i] = new sample_t[nframes];
         // set memory to 0
         std::memset(mOutProcessBuffer[i], 0, sizeof(sample_t) * nframes);
     }
-    for (int i = 0; i < ((mNumNetRevChans) ? mNumNetRevChans : nChansMon); i++) {
-        mMonProcessBuffer[i] = new sample_t[nframes];
-        // set memory to 0
-        std::memset(mMonitorProcess[i], 0, sizeof(sample_t) * nframes);
-    }
-    for (int i = 0; i < ((mNumNetRevChans) ? nChansIn : 0); i++) {
-        mAPInBuffer[i] = new sample_t[nframes];
-        // set memory to 0
-        std::memset(mAPInBuffer[i], 0, sizeof(sample_t) * nframes);
-    }
-    for (int i = 0; i < mNumNetRevChans; i++) {
-        mNetInBuffer[i] = new sample_t[nframes];
-        // set memory to 0
-        std::memset(mNetInBuffer[i], 0, sizeof(sample_t) * nframes);
-    }
-#endif  // endwhere
 }
 
 //*******************************************************************************
@@ -273,21 +179,64 @@ void AudioInterface::callback(QVarLengthArray<sample_t*>& in_buffer,
                               QVarLengthArray<sample_t*>& out_buffer,
                               unsigned int n_frames)
 {
-    int nChansIn  = mInputChans.size();
-    int nChansOut = mOutputChans.size();
-    int nChansMon = std::min<int>(
-        nChansIn, nChansOut);  // Note: Should be 2 when mixing stereo-to-mono
-    inputMixModeT inputMixMode = mInputMixMode;
-    if (inputMixMode == MIXTOMONO) {
-        nChansIn = 1;
+    this->audioInputCallback(in_buffer, n_frames);
+    this->audioOutputCallback(out_buffer, n_frames);
+}
+
+//*******************************************************************************
+void AudioInterface::audioInputCallback(QVarLengthArray<sample_t*>& in_buffer,
+                                        unsigned int n_frames)
+{
+    // in_buffer is "in" from local audio hardware
+    if (getBufferSizeInSamples() < n_frames) {  // allocated in constructor above
+        std::cerr << "*** AudioInterface::audioInputCallback n_frames = " << n_frames
+                  << " larger than expected = " << getBufferSizeInSamples() << "\n";
+        exit(1);
     }
-    if (inputMixMode == MONO) {
-        nChansMon = nChansOut;
+
+#ifndef WAIR
+    if (mMonitorQueuePtr != nullptr && mProcessPluginsToMonitor.size() > 0) {
+        // copy audio input to monitor queue
+        for (int i = 0; i < mInputChans.size(); i++) {
+            int8_t* sample_ptr = mAudioInputPacket + (i * sizeof(sample_t) * n_frames);
+            std::memcpy(sample_ptr, in_buffer[i], sizeof(sample_t) * n_frames);
+        }
+        mMonitorQueuePtr->push(mAudioInputPacket);
     }
-    // Allocate the Process Callback
-    //-------------------------------------------------------------------
+#endif  // not WAIR
+
+    // process incoming signal from audio interface using process plugins
+    for (auto* p : qAsConst(mProcessPluginsToNetwork)) {
+        if (p->getInited()) {
+            p->compute(n_frames, in_buffer.data(), in_buffer.data());
+        }
+    }
+
+    // add audio testing impulse, if enabled
+    if (mAudioTesterP && mAudioTesterP->getEnabled()) {
+        mAudioTesterP->writeImpulse(
+            in_buffer,
+            n_frames);  // writes last channel of in_buffer with test impulse
+    }
+
+    // send the final signal to the network
+    if (mProcessWithNetwork) {
+        computeProcessToNetwork(in_buffer, n_frames);
+    }
+}
+
+//*******************************************************************************
+void AudioInterface::audioOutputCallback(QVarLengthArray<sample_t*>& out_buffer,
+                                         unsigned int n_frames)
+{
+    // in_buffer is "in" from local audio hardware
+    if (getBufferSizeInSamples() < n_frames) {  // allocated in constructor above
+        std::cerr << "*** AudioInterface::audioOutputCallback n_frames = " << n_frames
+                  << " larger than expected = " << getBufferSizeInSamples() << "\n";
+        exit(1);
+    }
+
     // 1) First, process incoming packets
-    // ----------------------------------
 
 #ifdef WAIR  // WAIR
     //    qDebug() << "--" << mProcessPluginsFromNetwork.size();
@@ -295,30 +244,24 @@ void AudioInterface::callback(QVarLengthArray<sample_t*>& in_buffer,
 #define COMBDSP 1  // client
 #define APDSP   0  // client
 #define DCBDSP  0  // server
-    for (int i = 0; i < mNumNetRevChans; i++) {
+    for (int i = 0; i < mNetInBuffer.size(); i++) {
         std::memset(mNetInBuffer[i], 0, sizeof(sample_t) * n_frames);
     }
 #endif  // endwhere
 
     // ==== RECEIVE AUDIO CHANNELS FROM NETWORK ====
+    // out_buffer is from the network and goes "out" to local audio hardware
     if (mProcessWithNetwork) {
         computeProcessFromNetwork(out_buffer, n_frames);
     }
     // =============================================
 
-    // out_buffer is from the network and goes "out" to local audio
-    // hardware via JACK:
-
     // mAudioTesterP will be nullptr for hub server's JackTripWorker instances
     if (mAudioTesterP && mAudioTesterP->getEnabled()) {
         mAudioTesterP->lookForReturnPulse(out_buffer, n_frames);
     }
 
-#ifdef WAIR  // WAIR
-    // nib16 result now in mNetInBuffer
-#endif  // endwhere
-
-    // 2) Dynamically allocate ProcessPlugin processes
+    // apply process plugins to the signal
     // -----------------------------------------------
     // The processing will be done in order of allocation
     /// \todo Implement for more than one process plugin, now it just works propertely
@@ -330,7 +273,47 @@ void AudioInterface::callback(QVarLengthArray<sample_t*>& in_buffer,
             p->compute(n_frames, out_buffer.data(), out_buffer.data());
         }
     }
-#else   // WAIR:
+
+    if (mMonitorQueuePtr != nullptr && mProcessPluginsToMonitor.size() > 0) {
+        // mix in the monitor signal
+        // note that using memory_order_acquire ensures all data written to the buffers
+        // will be also available be available to this thread before read
+        std::memset(mAudioOutputPacket, 0,
+                    sizeof(sample_t) * n_frames * getNumInputChannels());
+        if (mMonitorStarted) {
+            mMonitorQueuePtr->pop(mAudioOutputPacket);
+        } else {
+            // drain the monitor queue to minimize latency
+            while (mMonitorQueuePtr->pop(mAudioOutputPacket)) {}
+            mMonitorStarted = true;
+        }
+        for (int i = 0; i < getNumOutputChannels(); i++) {
+            // if using mix-to-mono, in_buffer[0] should already contain the mixed
+            // audio, so copy it to the monitor buffer. See RtAudioInterface.cpp
+
+            // likewise if using mono, we simply copy the input to every monitor
+            // channel
+            int8_t* sample_ptr = mAudioOutputPacket;
+            if (i > 0 && getNumInputChannels() > i
+                && mInputMixMode == AudioInterface::STEREO) {
+                // otherwise, copy each channel individually
+                sample_ptr += (i * sizeof(sample_t) * n_frames);
+            }
+            std::memcpy(mOutProcessBuffer[i], sample_ptr, sizeof(sample_t) * n_frames);
+        }
+        for (int i = 0; i < mProcessPluginsToMonitor.size(); i++) {
+            ProcessPlugin* p = mProcessPluginsToMonitor[i];
+            if (p->getInited()) {
+                // note: for monitor plugins, the output is out_buffer (to the speakers)
+                p->compute(n_frames, mOutProcessBuffer.data(), out_buffer.data());
+            }
+        }
+    }
+
+#else  // WAIR:
+    // nib16 result now in mNetInBuffer
+    int nChansIn  = mInputChans.size();
+    int nChansOut = mOutputChans.size();
     for (int i = 0; i < ((mNumNetRevChans) ? mNumNetRevChans : nChansOut); i++) {
         std::memset(mOutProcessBuffer[i], 0, sizeof(sample_t) * n_frames);
     }
@@ -352,81 +335,9 @@ void AudioInterface::callback(QVarLengthArray<sample_t*>& in_buffer,
                                                      mOutProcessBuffer.data());
     }
     // compute cob16
-#endif  // endwhere
 
-    // 3) Send packets to network:
-    // mAudioTesterP will be nullptr for hub server's JackTripWorker instances:
-    bool audioTesting = (mAudioTesterP && mAudioTesterP->getEnabled());
-    int nop = mProcessPluginsToNetwork.size();  // number of OUTGOING processing modules
-    if (nop > 0 || audioTesting
-        || mProcessPluginsToMonitor.size()
-               > 0) {  // cannot modify in_buffer, so make a copy
-        // in_buffer is "in" from local audio hardware via JACK
-        if (mInBufCopy.size() < nChansIn) {  // created in constructor above
-            std::cerr << "*** AudioInterface.cpp: Number of Input Channels changed - "
-                         "insufficient room reserved\n";
-            exit(1);
-        }
-        if (MAX_AUDIO_BUFFER_SIZE < n_frames) {  // allocated in constructor above
-            std::cerr << "*** AudioInterface.cpp: n_frames = " << n_frames
-                      << " larger than expected max = " << MAX_AUDIO_BUFFER_SIZE << "\n";
-            exit(1);
-        }
-        for (int i = 0; i < nChansIn; i++) {
-            std::memcpy(mInBufCopy[i], in_buffer[i], sizeof(sample_t) * n_frames);
-        }
-        for (int i = 0; i < nop; i++) {
-            // process all outgoing channels with ProcessPlugins:
-            ProcessPlugin* p = mProcessPluginsToNetwork[i];
-            if (p->getInited()) {
-                p->compute(n_frames, mInBufCopy.data(), mInBufCopy.data());
-            }
-        }
-
-        for (int i = 0; i < nChansMon; i++) {
-            if ((mInputChans.size() == 2 && mInputMixMode == AudioInterface::MIXTOMONO)
-                || (mInputMixMode == AudioInterface::MONO)) {
-                // if using mix-to-mono, in_buffer[0] should already contain the mixed
-                // audio, so copy it to the monitor buffer. See RtAudioInterface.cpp
-
-                // likewise if using mono, we simply copy the input to every monitor
-                // channel
-                std::memcpy(mMonProcessBuffer[i], in_buffer[0],
-                            sizeof(sample_t) * n_frames);
-            } else {
-                // otherwise, copy each channel individually
-                std::memcpy(mMonProcessBuffer[i], in_buffer[i],
-                            sizeof(sample_t) * n_frames);
-            }
-        }
-        for (int i = 0; i < mProcessPluginsToMonitor.size(); i++) {
-            ProcessPlugin* p = mProcessPluginsToMonitor[i];
-            if (p->getInited()) {
-                // note: for monitor plugins, the output is out_buffer (to the speakers)
-                p->compute(n_frames, mMonProcessBuffer.data(), out_buffer.data());
-            }
-        }
-
-        if (audioTesting) {
-            mAudioTesterP->writeImpulse(
-                mInBufCopy,
-                n_frames);  // writes last channel of mInBufCopy with test impulse
-        }
-        if (mProcessWithNetwork) {
-            computeProcessToNetwork(mInBufCopy, n_frames);
-        }
-    } else {  // copy saved if no plugins and no audio testing in progress:
-        if (mProcessWithNetwork) {
-            computeProcessToNetwork(
-                in_buffer, n_frames);  // send processed input audio to network - OUTGOING
-        }
-    }
-
-#ifdef WAIR  // WAIR
     // aib2 + cob16 to nob16
-#endif  // endwhere
 
-#ifdef WAIR               // WAIR
     if (mNumNetRevChans)  // else not wair, so skip all this
     {
 #define AP
@@ -442,7 +353,7 @@ void AudioInterface::callback(QVarLengthArray<sample_t*>& in_buffer,
                 mix_sample[j] += tmp_sample[j];
             }
         }  // nib6 to aob2
-#else      // AP
+#else  // AP
 
         // output through all-pass cascade
         // AP2 is 2 channel, mixes inputs to mono, then splits to two parallel AP chains
@@ -478,43 +389,26 @@ void AudioInterface::callback(QVarLengthArray<sample_t*>& in_buffer,
 #endif  // AP
     }
 #endif  // endwhere
-
-    ///************PROTOTYPE FOR CELT**************************
-    ///********************************************************
-    /*
-  CELTMode* mode;
-  int* error;
-  mode = celt_mode_create(48000, 2, 64, error);
-  */
-    // celt_mode_create(48000, 2, 64, NULL);
-    // unsigned char* compressed;
-    // CELTEncoder* celtEncoder;
-    // celt_encode_float(celtEncoder, mInBuffer, NULL, compressed, );
-
-    ///********************************************************
-    ///********************************************************
 }
 
 //*******************************************************************************
 void AudioInterface::broadcastCallback(QVarLengthArray<sample_t*>& mon_buffer,
                                        unsigned int n_frames)
 {
-    int nChansOut = mOutputChans.size();
-
     /// \todo cast *mInBuffer[i] to the bit resolution
     // Output Process (from NETWORK to JACK)
     // ----------------------------------------------------------------
     // Read Audio buffer from RingBuffer (read from incoming packets)
     mJackTrip->receiveBroadcastPacket(mAudioOutputPacket);
     // Extract separate channels to send to Jack
-    for (int i = 0; i < nChansOut; i++) {
+    for (int i = 0; i < mOutputChans.size(); i++) {
         sample_t* tmp_sample = mon_buffer[i];  // sample buffer for channel i
         for (unsigned int j = 0; j < n_frames; j++) {
             // Change the bit resolution on each sample
             fromBitToSampleConversion(
                 // use interleaved channel layout
                 //&mOutputPacket[(i*mSizeInBytesPerChannel) + (j*mBitResolutionMode)],
-                &mAudioOutputPacket[(j * mBitResolutionMode * nChansOut)
+                &mAudioOutputPacket[(j * mBitResolutionMode * mOutputChans.size())
                                     + (i * mBitResolutionMode)],
                 &tmp_sample[j], mBitResolutionMode);
         }
@@ -528,8 +422,6 @@ void AudioInterface::broadcastCallback(QVarLengthArray<sample_t*>& mon_buffer,
 void AudioInterface::computeProcessFromNetwork(QVarLengthArray<sample_t*>& out_buffer,
                                                unsigned int n_frames)
 {
-    int nChansOut = mOutputChans.size();
-
     /// \todo cast *mInBuffer[i] to the bit resolution
     // Output Process (from NETWORK to JACK)
     // ----------------------------------------------------------------
@@ -546,7 +438,7 @@ void AudioInterface::computeProcessFromNetwork(QVarLengthArray<sample_t*>& out_b
                 fromBitToSampleConversion(
                     // use interleaved channel layout
                     //&mOutputPacket[(i*mSizeInBytesPerChannel) + (j*mBitResolutionMode)],
-                    &mOutputPacket[(j * mBitResolutionMode * nChansOut)
+                    &mOutputPacket[(j * mBitResolutionMode * mOutputChans.size())
                                    + (i * mBitResolutionMode)],
                     &tmp_sample[j], mBitResolutionMode);
             }
@@ -555,7 +447,7 @@ void AudioInterface::computeProcessFromNetwork(QVarLengthArray<sample_t*>& out_b
 #endif    // endwhere
 
         // Extract separate channels to send to Jack
-        for (int i = 0; i < nChansOut; i++) {
+        for (int i = 0; i < mOutputChans.size(); i++) {
             //--------
             // This should be faster for 32 bits
             // std::memcpy(mOutBuffer[i], &mOutputPacket[i*mSizeInBytesPerChannel],
@@ -567,7 +459,7 @@ void AudioInterface::computeProcessFromNetwork(QVarLengthArray<sample_t*>& out_b
                 fromBitToSampleConversion(
                     // use interleaved channel layout
                     //&mOutputPacket[(i*mSizeInBytesPerChannel) + (j*mBitResolutionMode)],
-                    &mAudioOutputPacket[(j * mBitResolutionMode * nChansOut)
+                    &mAudioOutputPacket[(j * mBitResolutionMode * mOutputChans.size())
                                         + (i * mBitResolutionMode)],
                     &tmp_sample[j], mBitResolutionMode);
             }
@@ -578,11 +470,7 @@ void AudioInterface::computeProcessFromNetwork(QVarLengthArray<sample_t*>& out_b
 void AudioInterface::computeProcessToNetwork(QVarLengthArray<sample_t*>& in_buffer,
                                              unsigned int n_frames)
 {
-    int nChansIn               = mInputChans.size();
-    inputMixModeT inputMixMode = mInputMixMode;
-    if (inputMixMode == MIXTOMONO) {
-        nChansIn = 1;
-    }
+    const int nChansIn = (MIXTOMONO == mInputMixMode) ? 1 : mInputChans.size();
     // Input Process (from JACK to NETWORK)
     // ----------------------------------------------------------------
     // Concatenate  all the channels from jack to form packet
@@ -760,14 +648,9 @@ void AudioInterface::appendProcessPluginToNetwork(ProcessPlugin* plugin)
         return;
     }
 
-    int nChansIn               = mInputChans.size();
-    inputMixModeT inputMixMode = mInputMixMode;
-    if (inputMixMode == MIXTOMONO) {
-        nChansIn = 1;
-    }
-
-    int nTestChans   = (mAudioTesterP && mAudioTesterP->getEnabled()) ? 1 : 0;
-    int nPluginChans = nChansIn - nTestChans;
+    const int nChansIn = (MIXTOMONO == mInputMixMode) ? 1 : mInputChans.size();
+    int nTestChans     = (mAudioTesterP && mAudioTesterP->getEnabled()) ? 1 : 0;
+    int nPluginChans   = nChansIn - nTestChans;
     assert(nTestChans == 0 || (mAudioTesterP->getSendChannel() == nChansIn - 1));
     if (plugin->getNumInputs() < nPluginChans) {
         std::cerr
@@ -786,11 +669,10 @@ void AudioInterface::appendProcessPluginFromNetwork(ProcessPlugin* plugin)
         return;
     }
 
-    int nChansOut = mOutputChans.size();
-
     int nTestChans   = (mAudioTesterP && mAudioTesterP->getEnabled()) ? 1 : 0;
-    int nPluginChans = nChansOut - nTestChans;
-    assert(nTestChans == 0 || (mAudioTesterP->getSendChannel() == nChansOut - 1));
+    int nPluginChans = mOutputChans.size() - nTestChans;
+    assert(nTestChans == 0
+           || (mAudioTesterP->getSendChannel() == mOutputChans.size() - 1));
     if (plugin->getNumOutputs() > nPluginChans) {
         std::cerr
             << "*** AudioInterface.cpp: appendProcessPluginFromNetwork: ProcessPlugin "
@@ -807,17 +689,9 @@ void AudioInterface::appendProcessPluginToMonitor(ProcessPlugin* plugin)
     if (!plugin) {
         return;
     }
-    int nChansIn  = mInputChans.size();
-    int nChansOut = mOutputChans.size();
-    int nChansMon = std::min<int>(
-        nChansIn, nChansOut);  // Note: Should be 2 when mixing stereo-to-mono
-    inputMixModeT inputMixMode = mInputMixMode;
-    if (inputMixMode == MIXTOMONO) {
-        nChansIn = 1;
-    }
-    if (inputMixMode == MONO) {
-        nChansMon = nChansOut;
-    }
+
+    const int nChansMon = getNumMonChannels();
+
     if (plugin->getNumInputs() > nChansMon) {
         std::cerr
             << "*** AudioInterface.cpp: appendProcessPluginToMonitor: ProcessPlugin "
@@ -843,17 +717,9 @@ void AudioInterface::appendProcessPluginToMonitor(ProcessPlugin* plugin)
 
 void AudioInterface::initPlugins(bool verbose)
 {
-    int nChansIn  = mInputChans.size();
-    int nChansOut = mOutputChans.size();
-    int nChansMon = std::min<int>(
-        nChansIn, nChansOut);  // Note: Should be 2 when mixing stereo-to-mono
-    inputMixModeT inputMixMode = mInputMixMode;
-    if (inputMixMode == MIXTOMONO) {
-        nChansIn = 1;
-    }
-    if (inputMixMode == MONO) {
-        nChansMon = nChansOut;
-    }
+    const int nChansIn  = (MIXTOMONO == mInputMixMode) ? 1 : mInputChans.size();
+    const int nChansOut = mOutputChans.size();
+    const int nChansMon = getNumMonChannels();
     int nPlugins = mProcessPluginsFromNetwork.size() + mProcessPluginsToNetwork.size()
                    + mProcessPluginsToMonitor.size();
     if (nPlugins > 0) {
index 7511ccb818d1d7490b3b7e96ee4561a43bc99810..a9ee255ad1c456c6ae8aaa5571995cfb50ffb419 100644 (file)
@@ -43,8 +43,8 @@
 
 #include "AudioTester.h"
 #include "ProcessPlugin.h"
+#include "WaitFreeFrameBuffer.h"
 #include "jacktrip_types.h"
-//#include "jacktrip_globals.h"
 
 // Forward declarations
 class JackTrip;
@@ -116,6 +116,7 @@ class AudioInterface
 #endif  // endwhere
         AudioInterface::audioBitResolutionT AudioBitResolution = AudioInterface::BIT16,
         bool processWithNetwork = false, JackTrip* jacktrip = nullptr);
+
     /// \brief The class destructor
     virtual ~AudioInterface();
 
@@ -127,24 +128,56 @@ class AudioInterface
      * method to ensure correct inizialization.
      */
     virtual void setup(bool verbose = true);
+
     /// \brief Tell the audio server that we are ready to roll. The
     /// process-callback will start running. This runs on its own thread.
     /// \return 0 on success, otherwise a non-zero error code
     virtual int startProcess() = 0;
+
     /// \brief Stops the process-callback thread
     /// \return 0 on success, otherwise a non-zero error code
     virtual int stopProcess() = 0;
-    /** \brief Process callback. Subclass should call this callback after obtaining the
-    in_buffer and out_buffer pointers.
-    * \param in_buffer Array of input audio samplers for each channel. The user
-    * is responsible to check that each channel has n_frames samplers
-    * \param in_buffer Array of output audio samplers for each channel. The user
-    * is responsible to check that each channel has n_frames samplers
-    */
+
+    /** \brief Broadcast callback. Subclass should call this callback after
+     * obtaining the mon_buffer pointer.
+     *
+     * \param in_buffer Array of input audio samplers for each channel. The user
+     * is responsible to check that each channel has n_frames samplers
+     * \param in_buffer Array of output audio samplers for each channel. The user
+     * is responsible to check that each channel has n_frames samplers
+     */
     virtual void broadcastCallback(QVarLengthArray<sample_t*>& mon_buffer,
                                    unsigned int n_frames);
+
+    /** \brief Audio interface callback. Subclass should call this callback after
+     * obtaining the in_buffer and out_buffer pointers (for duplux mode).
+     *
+     * \param in_buffer Array of input audio samplers for each channel. The user
+     * is responsible to check that each channel has n_frames samplers
+     * \param out_buffer Array of output audio samplers for each channel. The user
+     * is responsible to check that each channel has n_frames samplers
+     */
     virtual void callback(QVarLengthArray<sample_t*>& in_buffer,
                           QVarLengthArray<sample_t*>& out_buffer, unsigned int n_frames);
+
+    /** \brief Audio input process callback. Subclass should call this callback
+     * after obtaining the in_buffer pointer (for input only).
+     *
+     * \param in_buffer Array of input audio samplers for each channel. The user
+     * is responsible to check that each channel has n_frames samplers
+     */
+    virtual void audioInputCallback(QVarLengthArray<sample_t*>& in_buffer,
+                                    unsigned int n_frames);
+
+    /** \brief Audio output process callback. Subclass should call this callback
+     * after obtaining the out_buffer pointer (for output only).
+     *
+     * \param out_buffer Array of output audio samplers for each channel. The user
+     * is responsible to check that each channel has n_frames samplers
+     */
+    virtual void audioOutputCallback(QVarLengthArray<sample_t*>& out_buffer,
+                                     unsigned int n_frames);
+
     /** \brief appendProcessPluginToNetwork(): Append a ProcessPlugin for outgoing audio.
      * The processing order equals order they were appended.
      * This processing is in the JackTrip client before sending to the network.
@@ -153,6 +186,7 @@ class AudioInterface
      * <tt>std::tr1::shared_ptr<ProcessPluginName> loopback(new ProcessPluginName);</tt>
      */
     virtual void appendProcessPluginToNetwork(ProcessPlugin* plugin);
+
     /** \brief appendProcessPluginFromNetwork():
      * Same as appendProcessPluginToNetwork() except that these plugins operate
      * on the audio received from the network (typically from a JackTrip server).
@@ -162,16 +196,20 @@ class AudioInterface
      *               -> JackTrip client -> processPlugin from network -> JACK -> audio
      */
     virtual void appendProcessPluginFromNetwork(ProcessPlugin* plugin);
+
     /** \brief appendProcessPluginToMonitor():
      * Appends plugins used for local monitoring
      */
     virtual void appendProcessPluginToMonitor(ProcessPlugin* plugin);
+
     /** \brief initPlugins():
      * Initialize all ProcessPlugin modules.
      * The audio sampling rate (mSampleRate) must be set at this time.
      */
     void initPlugins(bool verbose = true);
+
     virtual void connectDefaultPorts() = 0;
+
     /** \brief Convert a 32bit number (sample_t) into one of the bit resolution
      * supported (audioBitResolutionT).
      *
@@ -182,6 +220,7 @@ class AudioInterface
     static void fromSampleToBitConversion(
         const sample_t* const input, int8_t* output,
         const AudioInterface::audioBitResolutionT targetBitResolution);
+
     /** \brief Convert a audioBitResolutionT bit resolution number into a
      * 32bit number (sample_t)
      *
@@ -200,12 +239,10 @@ class AudioInterface
     virtual void setInputChannels(QVarLengthArray<int> inputChans)
     {
         mInputChans = inputChans;
-        mNumInChans = inputChans.size();
     }
     virtual void setOutputChannels(QVarLengthArray<int> outputChans)
     {
         mOutputChans = outputChans;
-        mNumOutChans = outputChans.size();
     }
     virtual void setInputMixMode(inputMixModeT mode) { mInputMixMode = mode; }
     virtual void setSampleRate(uint32_t sample_rate) { mSampleRate = sample_rate; }
@@ -235,6 +272,8 @@ class AudioInterface
     virtual int getNumInputChannels() const { return mInputChans.size(); }
     /// \brief Get Number of Output Channels
     virtual int getNumOutputChannels() const { return mOutputChans.size(); }
+    /// \brief Get Number of Monitor Channels
+    virtual int getNumMonChannels() const { return mOutputChans.size(); }
     virtual QVarLengthArray<int> getInputChannels() const { return mInputChans; }
     virtual QVarLengthArray<int> getOutputChannels() const { return mOutputChans; }
     virtual inputMixModeT getInputMixMode() const { return mInputMixMode; }
@@ -282,10 +321,8 @@ class AudioInterface
     QVarLengthArray<sample_t*>
         mNetInBuffer;  ///< Vector of Input buffers/channel read from net
     QVarLengthArray<sample_t*>
-        mAPInBuffer;  ///< Vector of Input buffers/channel for AllPass input
-#endif                // endwhere
-    QVarLengthArray<sample_t*>
-        mInBufCopy;           ///< needed in callback() to modify JACK audio input
+        mAPInBuffer;          ///< Vector of Input buffers/channel for AllPass input
+#endif                        // endwhere
     int mAudioBitResolution;  ///< Bit resolution in audio samples
     AudioInterface::audioBitResolutionT
         mBitResolutionMode;  ///< Bit resolution (audioBitResolutionT) mode
@@ -304,20 +341,19 @@ class AudioInterface
         mInProcessBuffer;  ///< Vector of Input buffers/channel for ProcessPlugin
     QVarLengthArray<sample_t*>
         mOutProcessBuffer;  ///< Vector of Output buffers/channel for ProcessPlugin
-    QVarLengthArray<sample_t*>
-        mMonProcessBuffer;       ///< Vector of Monitor buffers/channel for ProcessPlugin
+    WaitFreeFrameBuffer<64>*
+        mMonitorQueuePtr;        //< Queue of audio frames from monitor signal
     int8_t* mAudioInputPacket;   ///< Packet containing all the channels to read from the
                                  ///< RingBuffer
     int8_t* mAudioOutputPacket;  ///< Packet containing all the channels to send to the
                                  ///< RingBuffer
     bool mLoopBack;
     bool mProcessWithNetwork;  ///< whether or not to send/receive data via the network
+    bool mMonitorStarted;      ///< True if we have started to consume monitor audio
     AudioTester* mAudioTesterP{nullptr};
 
    protected:
     JackTrip* mJackTrip;          ///< JackTrip Mediator Class pointer
-    int mNumInChans;              ///< Number of Input Channels
-    int mNumOutChans;             ///<  Number of Output Channels
     inputMixModeT mInputMixMode;  ///< Input mixing mode
 
     void setDevicesWarningMsg(warningMessageT msg);
index 349bd957a9f67c36776e1ea7587b26b2feb9b057..f638de23ee51f3645e5f2d2188b07f0ef2f7ae50 100644 (file)
@@ -169,17 +169,17 @@ void JackAudioInterface::setupClient()
     mNumFrames = getBufferSizeInSamples();
 
     // Initialize Buffer array to read and write audio
-    mInBuffer.resize(mNumInChans);
-    mOutBuffer.resize(mNumOutChans);
-    mBroadcastBuffer.resize(mNumOutChans);
+    mInBuffer.resize(getNumInputChannels());
+    mOutBuffer.resize(getNumOutputChannels());
+    mBroadcastBuffer.resize(getNumOutputChannels());
 }
 
 //*******************************************************************************
 void JackAudioInterface::createChannels()
 {
     // Create Input Ports
-    mInPorts.resize(mNumInChans);
-    for (int i = 0; i < mNumInChans; i++) {
+    mInPorts.resize(getNumInputChannels());
+    for (int i = 0; i < getNumInputChannels(); i++) {
         QString inName;
         QTextStream(&inName) << "send_" << i + 1;
         mInPorts[i] =
@@ -188,8 +188,8 @@ void JackAudioInterface::createChannels()
     }
 
     // Create Output Ports
-    mOutPorts.resize(mNumOutChans);
-    for (int i = 0; i < mNumOutChans; i++) {
+    mOutPorts.resize(getNumOutputChannels());
+    for (int i = 0; i < getNumOutputChannels(); i++) {
         QString outName;
         QTextStream(&outName) << "receive_" << i + 1;
         mOutPorts[i] =
@@ -198,8 +198,8 @@ void JackAudioInterface::createChannels()
     }
     // Create Broadcast Ports
     if (mBroadcast) {
-        mBroadcastPorts.resize(mNumOutChans);
-        for (int i = 0; i < mNumOutChans; i++) {
+        mBroadcastPorts.resize(getNumOutputChannels());
+        for (int i = 0; i < getNumOutputChannels(); i++) {
             QString outName;
             QTextStream(&outName) << "broadcast_" << i + 1;
             mBroadcastPorts[i] =
@@ -300,12 +300,12 @@ int JackAudioInterface::processCallback(jack_nframes_t nframes)
 
     // Get input and output buffers from JACK
     //-------------------------------------------------------------------
-    for (int i = 0; i < mNumInChans; i++) {
+    for (int i = 0; i < getNumInputChannels(); i++) {
         // Input Ports are READ ONLY and change as needed (no locks) - make a copy for
         // debugging
         mInBuffer[i] = (sample_t*)jack_port_get_buffer(mInPorts[i], nframes);
     }
-    for (int i = 0; i < mNumOutChans; i++) {
+    for (int i = 0; i < getNumOutputChannels(); i++) {
         // Output Ports are WRITABLE
         mOutBuffer[i] = (sample_t*)jack_port_get_buffer(mOutPorts[i], nframes);
     }
@@ -320,7 +320,7 @@ int JackAudioInterface::processCallback(jack_nframes_t nframes)
     AudioInterface::callback(mInBuffer, mOutBuffer, nframes);
 
     if (mBroadcast) {
-        for (int i = 0; i < mNumOutChans; i++) {
+        for (int i = 0; i < getNumOutputChannels(); i++) {
             // Broadcast Ports are WRITABLE
             mBroadcastBuffer[i] =
                 (sample_t*)jack_port_get_buffer(mBroadcastPorts[i], nframes);
@@ -348,7 +348,7 @@ void JackAudioInterface::connectDefaultPorts()
         cout << "WARNING: Cannot find any physical capture ports" << endl;
     } else {
         // Connect capure ports to jacktrip send
-        for (int i = 0; i < mNumInChans; i++) {
+        for (int i = 0; i < getNumInputChannels(); i++) {
             // Check that we don't run out of capture ports
             if (ports[i] != NULL) {
                 jack_connect(mClient, ports[i], jack_port_name(mInPorts[i]));
@@ -366,7 +366,7 @@ void JackAudioInterface::connectDefaultPorts()
         cout << "WARNING: Cannot find any physical playback ports" << endl;
     } else {
         // Connect playback ports to jacktrip receive
-        for (int i = 0; i < mNumOutChans; i++) {
+        for (int i = 0; i < getNumOutputChannels(); i++) {
             // Check that we don't run out of capture ports
             if (ports[i] != NULL) {
                 jack_connect(mClient, jack_port_name(mOutPorts[i]), ports[i]);
index 366cebcbcec0e3c1d76613427b1b48364e73f1a8..b6fc6c080aad4bcc2206ab9b1163c04ac38fb6fb 100644 (file)
@@ -99,11 +99,6 @@ class JackAudioInterface : public AudioInterface
     /// \brief Connect the default ports, capture to sends, and receives to playback
     void connectDefaultPorts() override;
 
-    /// \brief Get Number of Input Channels
-    virtual int getNumInputChannels() const override { return mNumInChans; }
-    /// \brief Get Number of Output Channels
-    virtual int getNumOutputChannels() const override { return mNumOutChans; }
-
     //--------------SETTERS---------------------------------------------
     /// \brief Set Client Name to something different that the default (JackTrip)
     virtual void setClientName(const QString& ClientName) override
index 9b83c287b9296e734cea6d52b1479ba6a997380b..efb9c70f9c2f51aed03b5706d91d85691be2ab1f 100644 (file)
@@ -91,19 +91,26 @@ using std::endl;
 using std::setw;
 
 // constants...
-constexpr int HIST        = 4;     // for mono at FPP 16-128, see below for > mono, > 128
-constexpr int NumSlotsMax = 4096;  // mNumSlots looped for recent arrivals
-constexpr double DefaultAutoHeadroom =
-    3.0;                           // msec padding for auto adjusting mMsecTolerance
-constexpr double AutoMax = 250.0;  // msec bounds on insane IPI, like ethernet unplugged
-constexpr double AutoInitDur = 6000.0;  // kick in auto after this many msec
+constexpr int HIST        = 4;      // for mono at FPP 16-128, see below for > mono, > 128
+constexpr int NumSlotsMax = 4096;   // mNumSlots looped for recent arrivals
+constexpr double AutoMax  = 250.0;  // msec bounds on insane IPI, like ethernet unplugged
+constexpr double AutoInitDur = 3000.0;  // kick in auto after this many msec
 constexpr double AutoInitValFactor =
     0.5;  // scale for initial mMsecTolerance during init phase if unspecified
 constexpr double MaxWaitTime = 30;  // msec
 
 // tweak
-constexpr int WindowDivisor = 8;     // for faster auto tracking
-constexpr int MaxFPP        = 1024;  // tested up to this FPP
+constexpr int WindowDivisor   = 8;     // for faster auto tracking
+constexpr int MaxFPP          = 1024;  // tested up to this FPP
+constexpr int MaxAutoHeadroom = 5;     // maximum auto headroom in milliseconds
+constexpr double AutoHeadroomGlitchTolerance =
+    0.007;  // Acceptable rate of glitches before auto headroom is increased (0.7%)
+constexpr double AutoHistoryWindow =
+    60;  // rolling window of time (in seconds) over which auto tolerance roughly adjusts
+constexpr double AutoSmoothingFactor =
+    1.0
+    / (WindowDivisor * AutoHistoryWindow);  // EWMA smoothing factor for auto tolerance
+
 //*******************************************************************************
 Regulator::Regulator(int rcvChannels, int bit_res, int FPP, int qLen, int bqLen,
                      int sample_rate)
@@ -116,6 +123,9 @@ Regulator::Regulator(int rcvChannels, int bit_res, int FPP, int qLen, int bqLen,
     , pushStat(NULL)
     , pullStat(NULL)
     , mAuto(false)
+    , mSkipAutoHeadroom(true)
+    , mLastGlitches(0)
+    , mCurrentHeadroom(0)
     , mUseWorkerThread(false)
     , m_b_BroadcastQueueLength(bqLen)
     , mRegulatorThreadPtr(NULL)
@@ -204,7 +214,7 @@ Regulator::Regulator(int rcvChannels, int bit_res, int FPP, int qLen, int bqLen,
     mFPPratioIsSet       = false;
     mBytesPeerPacket     = mBytes;
     mPeerFPP             = mFPP;  // use local until first packet arrives
-    mAutoHeadroom        = DefaultAutoHeadroom;
+    mAutoHeadroom        = 3.0;
     mFPPdurMsec          = 1000.0 * mFPP / mSampleRate;
     changeGlobal_2(NumSlotsMax);  // need hg if running GUI
     if (m_b_BroadcastQueueLength) {
@@ -280,6 +290,49 @@ Regulator::~Regulator()
         delete m_b_BroadcastRingBuffer;
 }
 
+//*******************************************************************************
+void Regulator::updateTolerance()
+{
+    // pushes happen when we have new packets received from peer
+    // pulls happen when our audio interface triggers a callback
+    const double pushStatTol = pushStat->calcAuto();
+    const double pullStatTol = pullStat->calcAuto();
+    if (mAutoHeadroom < 0) {
+        // auto headroom calculation: use value calculated by pullStats
+        // because that is where it counts glitches in the incoming peer stream
+        const int glitchesAllowed =
+            static_cast<int>(AutoHeadroomGlitchTolerance * mSampleRate / mPeerFPP);
+        const int totalGlitches = pullStat->plcUnderruns + pullStat->plcOverruns;
+        const int newGlitches   = totalGlitches - mLastGlitches;
+        mLastGlitches           = totalGlitches;
+        // require two consecutive periods of glitches exceeding allowed threshold
+        if (newGlitches > glitchesAllowed && mCurrentHeadroom < MaxAutoHeadroom) {
+            if (mSkipAutoHeadroom) {
+                mSkipAutoHeadroom = false;
+            } else {
+                mSkipAutoHeadroom = true;
+                ++mCurrentHeadroom;
+                qDebug() << "PLC" << newGlitches << "glitches"
+                         << ">" << glitchesAllowed << "allowed: Increasing headroom to "
+                         << mCurrentHeadroom;
+            }
+        } else {
+            mSkipAutoHeadroom = true;
+        }
+    } else {
+        mCurrentHeadroom = mAutoHeadroom;
+    }
+    double tmp = std::max<double>(pushStatTol + mCurrentHeadroom, pullStatTol);
+    if (tmp > AutoMax)
+        tmp = AutoMax;
+    if (tmp < mFPPdurMsec)
+        tmp = mFPPdurMsec;
+    if (tmp < mPeerFPPdurMsec)
+        tmp = mPeerFPPdurMsec;
+    mMsecTolerance = tmp;
+}
+
+//*******************************************************************************
 void Regulator::setFPPratio()
 {
     if (mPeerFPP != mFPP) {
@@ -306,13 +359,18 @@ void Regulator::shimFPP(const int8_t* buf, int len, int seq_num)
                 mAuto = true;
                 // default is -500 from bufstrategy 1 autoq mode
                 // use mMsecTolerance to set headroom
-                mAutoHeadroom =
-                    (mMsecTolerance == -500.0) ? DefaultAutoHeadroom : -mMsecTolerance;
-                qDebug() << "PLC is in auto mode and has been set with" << mAutoHeadroom
-                         << "ms headroom";
-                if (mAutoHeadroom > 50.0)
-                    qDebug() << "That's a very large value and should be less than, "
-                                "for example, 50ms";
+                if (mMsecTolerance == -500.0) {
+                    mAutoHeadroom = -1;
+                    qDebug()
+                        << "PLC is in auto mode and has been set with variable headroom";
+                } else {
+                    mAutoHeadroom = -mMsecTolerance;
+                    qDebug() << "PLC is in auto mode and has been set with"
+                             << mAutoHeadroom << "ms headroom";
+                    if (mAutoHeadroom > 50.0)
+                        qDebug() << "That's a very large value and should be less than, "
+                                    "for example, 50ms";
+                }
                 // found an interesting relationship between mPeerFPP and initial
                 // mMsecTolerance mPeerFPP*0.5 is pretty good though that's an oddball
                 // conversion of bufsize directly to msec
@@ -320,9 +378,8 @@ void Regulator::shimFPP(const int8_t* buf, int len, int seq_num)
             };
             setFPPratio();
             // number of stats tick calls per sec depends on FPP
-            int maxFPP = (mPeerFPP > mFPP) ? mPeerFPP : mFPP;
-            pushStat   = new StdDev(1, &mIncomingTimer,
-                                    (int)(floor(mSampleRate / (double)maxFPP)));
+            pushStat = new StdDev(1, &mIncomingTimer,
+                                  (int)(floor(mSampleRate / (double)mPeerFPP)));
             pullStat =
                 new StdDev(2, &mIncomingTimer, (int)(floor(mSampleRate / (double)mFPP)));
             mFPPratioIsSet = true;
@@ -343,13 +400,11 @@ void Regulator::shimFPP(const int8_t* buf, int len, int seq_num)
                 seq_num++;
             }
         }
-        pushStat->tick();
-        if (mAuto && (pushStat->lastTime > AutoInitDur)) {
-            // use max to accomodate for bad clocks in audio interfaces that
-            // cause a wide range of callback intervals (like realtek at 11ms)
-            mMsecTolerance = std::max<double>(
-                pushStat->calcAuto(mAutoHeadroom, mFPPdurMsec, mPeerFPPdurMsec),
-                pullStat->calcAuto(mAutoHeadroom, mFPPdurMsec, mPeerFPPdurMsec));
+        bool pushStatsUpdated = pushStat->tick();
+        if (mAuto && pushStatsUpdated && (pushStat->lastTime > AutoInitDur)
+            && pushStat->longTermCnt % WindowDivisor == 0) {
+            // after AutoInitDur: update auto tolerance once per second
+            updateTolerance();
         }
     }
 };
@@ -411,7 +466,7 @@ void Regulator::pullPacket()
             int next = lastSeqNumIn - i;
             if (next < 0)
                 next += mNumSlots;
-            if (mFPPratioNumerator) {
+            if (mFPPratioNumerator > 1) {
                 // time for assembly has passed; reset for next time
                 mAssemblyCounts[next] = 0;
             }
@@ -729,7 +784,6 @@ StdDev::StdDev(int id, QElapsedTimer* timer, int w) : mId(id), mTimer(timer), wi
     lastMax           = 0.0;
     longTermMax       = 0.0;
     longTermMaxAcc    = 0.0;
-    longTermMean      = 0.0;
     lastTime          = 0.0;
     lastPLCdspElapsed = 0.0;
     lastPlcOverruns   = 0;
@@ -748,32 +802,34 @@ void StdDev::reset()
     max  = -999999.0;
 };
 
-double StdDev::calcAuto(double autoHeadroom, double localFPPdur, double peerFPPdur)
+double StdDev::calcAuto()
 {
     //    qDebug() << longTermStdDev << longTermMax << AutoMax << window <<
     //    longTermCnt;
     if ((longTermStdDev == 0.0) || (longTermMax == 0.0))
         return AutoMax;
     double tmp = longTermStdDev + ((longTermMax > AutoMax) ? AutoMax : longTermMax);
-    if (tmp > AutoMax)
-        tmp = AutoMax;
-    if (tmp < localFPPdur)
-        tmp = localFPPdur;
-    if (tmp < peerFPPdur)
-        tmp = peerFPPdur;
-    tmp += autoHeadroom;
     return tmp;
 };
 
-void StdDev::tick()
+double StdDev::smooth(double avg, double current)
+{
+    // use exponential weighted moving average (EWMA) for long term calculations
+    // See https://en.wikipedia.org/wiki/Exponential_smoothing
+    return avg + AutoSmoothingFactor * (current - avg);
+}
+
+bool StdDev::tick()
 {
     double now       = (double)mTimer->nsecsElapsed() / 1000000.0;
     double msElapsed = now - lastTime;
     lastTime         = now;
+
     // discard measurements that exceed the max wait time
     // this prevents temporary outages from skewing jitter metrics
     if (msElapsed > MaxWaitTime)
-        return;
+        return false;
+
     if (ctr != window) {
         data[ctr] = msElapsed;
         if (msElapsed < min)
@@ -790,50 +846,57 @@ void StdDev::tick()
             std::cout << setw(10) << msElapsed << " " << mId << endl;
         }
         */
-    } else {
-        // calculate mean and standard deviation
-        mean       = (double)acc / (double)window;
-        double var = 0.0;
-        for (int i = 0; i < window; i++) {
-            double tmp = data[i] - mean;
-            var += (tmp * tmp);
+        return false;
+    }
+
+    // calculate mean and standard deviation
+    mean       = (double)acc / (double)window;
+    double var = 0.0;
+    for (int i = 0; i < window; i++) {
+        double tmp = data[i] - mean;
+        var += (tmp * tmp);
+    }
+    var /= (double)window;
+    double stdDevTmp = sqrt(var);
+
+    if (longTermCnt <= 3) {
+        if (longTermCnt == 0 && gVerboseFlag) {
+            cout << "printing directly from Regulator->stdDev->tick:\n (mean / min / "
+                    "max / "
+                    "stdDev / longTermMax / longTermStdDev) \n";
         }
-        var /= (double)window;
-        double stdDevTmp = sqrt(var);
-
-        if (longTermCnt <= 1) {
-            if (longTermCnt == 0 && gVerboseFlag) {
-                cout << "printing directly from Regulator->stdDev->tick:\n (mean / min / "
-                        "max / "
-                        "stdDev / longTermMean / longTermMax / longTermStdDev) \n";
-            }
-            // ignore first stats because they will be really unreliable
-            longTermMax       = max;
-            longTermMaxAcc    = max;
-            longTermMean      = mean;
-            longTermStdDev    = stdDevTmp;
-            longTermStdDevAcc = stdDevTmp;
+        // ignore first few stats because they are unreliable
+        longTermMax       = max;
+        longTermMaxAcc    = max;
+        longTermStdDev    = stdDevTmp;
+        longTermStdDevAcc = stdDevTmp;
+    } else {
+        longTermStdDevAcc += stdDevTmp;
+        longTermMaxAcc += max;
+        if (longTermCnt <= (WindowDivisor * AutoHistoryWindow)) {
+            // use simple average for startup to establish baseline
+            longTermStdDev = longTermStdDevAcc / (longTermCnt - 3);
+            longTermMax    = longTermMaxAcc / (longTermCnt - 3);
         } else {
-            longTermStdDevAcc += stdDevTmp;
-            longTermMaxAcc += max;
-            longTermStdDev = longTermStdDevAcc / (double)longTermCnt;
-            longTermMax    = longTermMaxAcc / (double)longTermCnt;
-            longTermMean   = longTermMean / (double)longTermCnt;
-        }
-
-        if (gVerboseFlag) {
-            cout << setw(10) << mean << setw(10) << min << setw(10) << max << setw(10)
-                 << stdDevTmp << setw(10) << longTermMean << setw(10) << longTermMax
-                 << setw(10) << longTermStdDev << " " << mId << endl;
+            // use EWMA after startup to allow for adjustments
+            longTermStdDev = smooth(longTermStdDev, stdDevTmp);
+            longTermMax    = smooth(longTermMax, max);
         }
+    }
 
-        longTermCnt++;
-        lastMean   = mean;
-        lastMin    = min;
-        lastMax    = max;
-        lastStdDev = stdDevTmp;
-        reset();
+    if (gVerboseFlag) {
+        cout << setw(10) << mean << setw(10) << min << setw(10) << max << setw(10)
+             << stdDevTmp << setw(10) << longTermMax << setw(10) << longTermStdDev << " "
+             << mId << endl;
     }
+
+    longTermCnt++;
+    lastMean   = mean;
+    lastMin    = min;
+    lastMax    = max;
+    lastStdDev = stdDevTmp;
+    reset();
+    return true;
 }
 
 void Regulator::readSlotNonBlocking(int8_t* ptrToReadSlot)
index 938b0322c48cb77d30c797f247303134f992cc4e..208acbaf6f395be2a8deb0dd63600553e3d1b663 100644 (file)
@@ -96,8 +96,8 @@ class StdDev
 {
    public:
     StdDev(int id, QElapsedTimer* timer, int w);
-    void tick();
-    double calcAuto(double autoHeadroom, double localFPPdur, double peerFPPdur);
+    bool tick();  // returns true if stats were updated
+    double calcAuto();
     int mId;
     int plcOverruns;
     int plcUnderruns;
@@ -113,9 +113,10 @@ class StdDev
     double longTermStdDevAcc;
     double longTermMax;
     double longTermMaxAcc;
-    double longTermMean;
+    int longTermCnt;
 
    private:
+    double smooth(double avg, double current);
     void reset();
     QElapsedTimer* mTimer;
     std::vector<double> data;
@@ -125,7 +126,6 @@ class StdDev
     double min;
     double max;
     int ctr;
-    int longTermCnt;
 };
 
 class Regulator : public RingBuffer
@@ -188,10 +188,12 @@ class Regulator : public RingBuffer
     void pushPacket(const int8_t* buf, int seq_num);
     void assemblePacket(const int8_t* buf, int peer_seq_num);
     void pullPacket();
+    void updateTolerance();
     void setFPPratio();
-    bool mFPPratioIsSet;
     void processPacket(bool glitch);
     void processChannel(int ch, bool glitch, int packetCnt, bool lastWasGlitch);
+
+    bool mFPPratioIsSet;
     int mNumChannels;
     int mAudioBitRes;
     int mFPP;
@@ -228,6 +230,9 @@ class Regulator : public RingBuffer
     int mFPPratioNumerator;
     int mFPPratioDenominator;
     bool mAuto;
+    bool mSkipAutoHeadroom;
+    int mLastGlitches;
+    double mCurrentHeadroom;
     double mAutoHeadroom;
     double mFPPdurMsec;
     double mPeerFPPdurMsec;
index 890809d89231ec37557bd4ed3bc8bb9ef154d3ad..41268f8d6fe0e9423672411a2c86c05da32434dc 100644 (file)
@@ -38,6 +38,7 @@
 #include "RtAudioInterface.h"
 
 #include <QString>
+#include <QTextStream>
 #include <cstdlib>
 
 #include "JackTrip.h"
@@ -82,6 +83,16 @@ void RtAudioDevice::printVerbose() const
 #endif
 }
 
+//*******************************************************************************
+bool RtAudioDevice::checkSampleRate(unsigned int srate) const
+{
+    for (unsigned int i = 0; i < this->sampleRates.size(); i++) {
+        if (this->sampleRates[i] == srate)
+            return true;
+    }
+    return false;
+}
+
 //*******************************************************************************
 RtAudioDevice& RtAudioDevice::operator=(const RtAudio::DeviceInfo& info)
 {
@@ -164,8 +175,7 @@ void RtAudioInterface::setup(bool verbose)
     // Locate the selected input audio device
     auto in_name = getInputDevice();
     if (in_name.empty()) {
-        mRtAudio.reset(new RtAudio);
-        long default_device_id = getDefaultDevice(*mRtAudio, true);
+        long default_device_id = getDefaultDevice(true);
         if (!getDeviceInfoFromId(default_device_id, in_device, true))
             throw std::runtime_error("default input device not found");
         cout << "Selected default INPUT device" << endl;
@@ -174,14 +184,13 @@ void RtAudioInterface::setup(bool verbose)
             throw std::runtime_error("Requested input device \"" + in_name
                                      + "\" not found.");
         }
-        mRtAudio.reset(new RtAudio(in_device.api));
         cout << "Selected INPUT device " << in_name << endl;
     }
 
     // Locate the selected output audio device
     auto out_name = getOutputDevice();
     if (out_name.empty()) {
-        long default_device_id = getDefaultDevice(*mRtAudio, false);
+        long default_device_id = getDefaultDevice(false);
         if (!getDeviceInfoFromId(default_device_id, out_device, false))
             throw std::runtime_error("default output device not found");
         cout << "Selected default OUTPUT device" << endl;
@@ -193,6 +202,16 @@ void RtAudioInterface::setup(bool verbose)
         cout << "Selected OUTPUT device " << out_name << endl;
     }
 
+    if (in_device.ID == out_device.ID) {
+        mRtAudioInput.reset(new RtAudio(in_device.api));
+        mRtAudioOutput.reset();
+        mDuplexMode = true;
+    } else {
+        mRtAudioInput.reset(new RtAudio(in_device.api));
+        mRtAudioOutput.reset(new RtAudio(out_device.api));
+        mDuplexMode = false;
+    }
+
     if (in_chans_base + in_chans_num > in_device.inputChannels) {
         in_chans_base = 0;
         in_chans_num  = 2;
@@ -210,13 +229,33 @@ void RtAudioInterface::setup(bool verbose)
     }
 
     if (verbose) {
-        cout << "INPUT DEVICE:" << endl;
+        if (mDuplexMode) {
+            cout << "DUPLEX DEVICE:" << endl;
+        } else {
+            cout << "INPUT DEVICE:" << endl;
+        }
         in_device.printVerbose();
         cout << gPrintSeparator << endl;
+        if (!mDuplexMode) {
+            cout << "OUTPUT DEVICE:" << endl;
+            out_device.printVerbose();
+            cout << gPrintSeparator << endl;
+        }
+    }
 
-        cout << "OUTPUT DEVICE:" << endl;
-        out_device.printVerbose();
-        cout << gPrintSeparator << endl;
+    if (!in_device.checkSampleRate(getSampleRate())) {
+        QString errorMsg;
+        QTextStream(&errorMsg) << "Input device \"" << QString::fromStdString(in_name)
+                               << "\" does not support sample rate of "
+                               << getSampleRate();
+        throw std::runtime_error(errorMsg.toStdString());
+    }
+    if (!out_device.checkSampleRate(getSampleRate())) {
+        QString errorMsg;
+        QTextStream(&errorMsg) << "Output device \"" << QString::fromStdString(out_name)
+                               << "\" does not support sample rate of "
+                               << getSampleRate();
+        throw std::runtime_error(errorMsg.toStdString());
     }
 
     if (in_device.api == out_device.api) {
@@ -299,9 +338,32 @@ void RtAudioInterface::setup(bool verbose)
         errorCallback(type, errorText, nullptr);
     };
     try {
-        mRtAudio->openStream(&out_params, &in_params, RTAUDIO_FLOAT32, sampleRate,
-                             &bufferFrames, &RtAudioInterface::wrapperRtAudioCallback,
-                             this, &options, errorFunc);
+        if (mDuplexMode) {
+            mRtAudioInput->openStream(
+                &out_params, &in_params, RTAUDIO_FLOAT32, sampleRate, &bufferFrames,
+                &RtAudioInterface::wrapperRtAudioCallback, this, &options, errorFunc);
+        } else {
+            mRtAudioInput->openStream(
+                nullptr, &in_params, RTAUDIO_FLOAT32, sampleRate, &bufferFrames,
+                &RtAudioInterface::wrapperRtAudioCallback, this, &options, errorFunc);
+            const unsigned int inputBufferFrames = bufferFrames;
+            mRtAudioOutput->openStream(
+                &out_params, nullptr, RTAUDIO_FLOAT32, sampleRate, &bufferFrames,
+                &RtAudioInterface::wrapperRtAudioCallback, this, &options, errorFunc);
+            if (inputBufferFrames != bufferFrames) {
+                // output device doesn't support the same buffer size
+                // try to reopen the input device with new size
+                const unsigned int outputBufferFrames = bufferFrames;
+                mRtAudioInput->closeStream();
+                mRtAudioInput->openStream(
+                    nullptr, &in_params, RTAUDIO_FLOAT32, sampleRate, &bufferFrames,
+                    &RtAudioInterface::wrapperRtAudioCallback, this, &options, errorFunc);
+                if (outputBufferFrames != bufferFrames) {
+                    // just give up if this still doesn't work
+                    errorText = "The two devices selected are incompatible";
+                }
+            }
+        }
     } catch (RtAudioError& e) {
         errorText = e.getMessage();
     }
@@ -311,19 +373,52 @@ void RtAudioInterface::setup(bool verbose)
                                             const std::string& errorText) {
         errorCallback(type, errorText, this);
     };
-    mRtAudio->setErrorCallback(errorFunc);
-    if (RTAUDIO_NO_ERROR
-        != mRtAudio->openStream(&out_params, &in_params, RTAUDIO_FLOAT32, sampleRate,
-                                &bufferFrames, &RtAudioInterface::wrapperRtAudioCallback,
-                                this, &options)) {
-        errorText = mRtAudio->getErrorText();
+    mRtAudioInput->setErrorCallback(errorFunc);
+    if (mDuplexMode) {
+        if (RTAUDIO_NO_ERROR
+            != mRtAudioInput->openStream(
+                &out_params, &in_params, RTAUDIO_FLOAT32, sampleRate, &bufferFrames,
+                &RtAudioInterface::wrapperRtAudioCallback, this, &options)) {
+            errorText = mRtAudioInput->getErrorText();
+        }
+    } else {
+        mRtAudioOutput->setErrorCallback(errorFunc);
+        if (RTAUDIO_NO_ERROR
+            != mRtAudioInput->openStream(
+                nullptr, &in_params, RTAUDIO_FLOAT32, sampleRate, &bufferFrames,
+                &RtAudioInterface::wrapperRtAudioCallback, this, &options)) {
+            errorText = mRtAudioInput->getErrorText();
+        } else {
+            const unsigned int inputBufferFrames = bufferFrames;
+            if (RTAUDIO_NO_ERROR
+                != mRtAudioOutput->openStream(
+                    &out_params, nullptr, RTAUDIO_FLOAT32, sampleRate, &bufferFrames,
+                    &RtAudioInterface::wrapperRtAudioCallback, this, &options)) {
+                errorText = mRtAudioOutput->getErrorText();
+            } else if (inputBufferFrames != bufferFrames) {
+                // output device doesn't support the same buffer size
+                // try to reopen the input device with new size
+                const unsigned int outputBufferFrames = bufferFrames;
+                mRtAudioInput->closeStream();
+                if (RTAUDIO_NO_ERROR
+                    != mRtAudioInput->openStream(
+                        nullptr, &in_params, RTAUDIO_FLOAT32, sampleRate, &bufferFrames,
+                        &RtAudioInterface::wrapperRtAudioCallback, this, &options)) {
+                    errorText = mRtAudioInput->getErrorText();
+                } else if (outputBufferFrames != bufferFrames) {
+                    // just give up if this still doesn't work
+                    errorText = "The two devices selected are incompatible";
+                }
+            }
+        }
     }
 #endif
 
     if (!errorText.empty()) {
         std::cerr << "RtAudioInterface failed to open stream: " << errorText << '\n'
                   << std::endl;
-        mRtAudio.reset();
+        mRtAudioInput.reset();
+        mRtAudioOutput.reset();
         throw std::runtime_error(errorText);
     }
 
@@ -394,8 +489,10 @@ void RtAudioInterface::getDeviceIds(RtAudio& rtaudio, std::vector<unsigned int>&
 }
 
 //*******************************************************************************
-long RtAudioInterface::getDefaultDevice(RtAudio& rtaudio, bool isInput)
+long RtAudioInterface::getDefaultDevice(bool isInput)
 {
+    RtAudio rtaudio;
+
 #if RTAUDIO_VERSION_MAJOR < 6
     if (rtaudio.getCurrentApi() == RtAudio::LINUX_PULSE) {
         return getDefaultDeviceForLinuxPulseAudio(isInput);
@@ -448,32 +545,39 @@ int RtAudioInterface::RtAudioCallback(void* outputBuffer, void* inputBuffer,
                                       unsigned int nFrames, double /*streamTime*/,
                                       RtAudioStreamStatus /*status*/)
 {
-    // TODO: this function may need more changes. As-is I'm not sure this will work
-
-    sample_t* inputBuffer_sample  = NULL;
-    sample_t* outputBuffer_sample = NULL;
+    sample_t* inputBuffer_sample  = static_cast<sample_t*>(inputBuffer);
+    sample_t* outputBuffer_sample = static_cast<sample_t*>(outputBuffer);
+    int in_chans_num              = getNumInputChannels();
 
-    inputBuffer_sample  = (sample_t*)inputBuffer;
-    outputBuffer_sample = (sample_t*)outputBuffer;
+    if (mDuplexMode) {
+        if (inputBuffer_sample == NULL || outputBuffer_sample == NULL) {
+            return 0;
+        }
+    } else if (inputBuffer_sample == NULL && outputBuffer_sample == NULL) {
+        return 0;
+    }
 
-    int in_chans_num = getNumInputChannels();
-    if (inputBuffer_sample != NULL && outputBuffer_sample != NULL) {
-        // Get input and output buffers
-        //-------------------------------------------------------------------
+    // process input before output to minimize monitor latency on duplex devices
+    if (inputBuffer_sample != NULL) {
+        // copy samples to input buffer
         for (int i = 0; i < mInBuffer.size(); i++) {
             // Input Ports are READ ONLY
             mInBuffer[i] = inputBuffer_sample + (nFrames * i);
         }
+        if (in_chans_num == 2 && mInBuffer.size() == in_chans_num
+            && mInputMixMode == AudioInterface::MIXTOMONO) {
+            mStereoToMonoMixerPtr->compute(nFrames, mInBuffer.data(), mInBuffer.data());
+        }
+        AudioInterface::audioInputCallback(mInBuffer, nFrames);
+    }
 
+    if (outputBuffer_sample != NULL) {
+        // copy samples to output buffer
         for (int i = 0; i < mOutBuffer.size(); i++) {
             // Output Ports are WRITABLE
             mOutBuffer[i] = outputBuffer_sample + (nFrames * i);
         }
-        if (in_chans_num == 2 && mInBuffer.size() == in_chans_num
-            && mInputMixMode == AudioInterface::MIXTOMONO) {
-            mStereoToMonoMixerPtr->compute(nFrames, mInBuffer.data(), mInBuffer.data());
-        }
-        AudioInterface::callback(mInBuffer, mOutBuffer, nFrames);
+        AudioInterface::audioOutputCallback(mOutBuffer, nFrames);
     }
 
     return 0;
@@ -515,27 +619,35 @@ void RtAudioInterface::errorCallback(RtAudioErrorType errorType,
 //*******************************************************************************
 int RtAudioInterface::startProcess()
 {
-    if (mRtAudio.isNull())
+    if (mRtAudioInput.isNull())
+        return 0;
+    if (!mDuplexMode && mRtAudioOutput.isNull())
         return 0;
 
     std::string errorText;
 
 #if RTAUDIO_VERSION_MAJOR < 6
     try {
-        mRtAudio->startStream();
+        mRtAudioInput->startStream();
+        if (!mDuplexMode) {
+            mRtAudioOutput->startStream();
+        }
     } catch (RtAudioError& e) {
         errorText = e.getMessage();
     }
 #else
-    if (RTAUDIO_NO_ERROR != mRtAudio->startStream()) {
-        errorText = mRtAudio->getErrorText();
+    if (RTAUDIO_NO_ERROR != mRtAudioInput->startStream()) {
+        errorText = mRtAudioInput->getErrorText();
+    } else if (!mDuplexMode && RTAUDIO_NO_ERROR != mRtAudioOutput->startStream()) {
+        errorText = mRtAudioOutput->getErrorText();
     }
 #endif
 
     if (!errorText.empty()) {
         std::cerr << "RtAudioInterface failed to start stream: " << errorText
                   << std::endl;
-        mRtAudio.reset();
+        mRtAudioInput.reset();
+        mRtAudioOutput.reset();
         return (-1);
     }
 
@@ -545,28 +657,39 @@ int RtAudioInterface::startProcess()
 //*******************************************************************************
 int RtAudioInterface::stopProcess()
 {
-    if (mRtAudio.isNull())
+    if (mRtAudioInput.isNull())
+        return 0;
+    if (!mDuplexMode && mRtAudioOutput.isNull())
         return 0;
 
     std::string errorText;
 
 #if RTAUDIO_VERSION_MAJOR < 6
     try {
-        mRtAudio->closeStream();
+        mRtAudioInput->closeStream();
         // this causes it to crash for some reason
         // mRtAudio->abortStream();
+        if (!mDuplexMode) {
+            mRtAudioOutput->closeStream();
+        }
     } catch (RtAudioError& e) {
         errorText = e.getMessage();
     }
 #else
-    if (RTAUDIO_NO_ERROR != mRtAudio->abortStream()) {
-        errorText = mRtAudio->getErrorText();
+    if (RTAUDIO_NO_ERROR != mRtAudioInput->abortStream()) {
+        errorText = mRtAudioInput->getErrorText();
+    } else if (!mDuplexMode && RTAUDIO_NO_ERROR != mRtAudioOutput->abortStream()) {
+        errorText = mRtAudioOutput->getErrorText();
     } else {
-        mRtAudio->closeStream();
+        mRtAudioInput->closeStream();
+        if (!mDuplexMode) {
+            mRtAudioOutput->closeStream();
+        }
     }
 #endif
 
-    mRtAudio.reset();
+    mRtAudioInput.reset();
+    mRtAudioOutput.reset();
 
     if (!errorText.empty()) {
         std::cerr << errorText << '\n' << std::endl;
index c4822f46d120be57419f90ce4d97901d78c0bb28..b58125c1c7858018501e74265639925dfeac502d 100644 (file)
@@ -64,6 +64,7 @@ class RtAudioDevice : public RtAudio::DeviceInfo
     RtAudio::Api api;
     void print() const;
     void printVerbose() const;
+    bool checkSampleRate(unsigned int srate) const;
     RtAudioDevice& operator=(const RtAudio::DeviceInfo& info);
 };
 
@@ -139,7 +140,7 @@ class RtAudioInterface : public AudioInterface
     // updates device and returns true if found
     bool getDeviceInfoFromId(const long deviceId, RtAudioDevice& device,
                              bool isInput) const;
-    long getDefaultDevice(RtAudio& rtaudio, bool isInput);
+    long getDefaultDevice(bool isInput);
     long getDefaultDeviceForLinuxPulseAudio(bool isInput);
 
     QVarLengthArray<float*>
@@ -148,8 +149,11 @@ class RtAudioInterface : public AudioInterface
         mOutBuffer;  ///< Vector of Output buffer/channel to write to JACK
     QVector<RtAudioDevice>
         mDevices;  ///< Vector of audio interfaces available via RTAudio
-    QSharedPointer<RtAudio>
-        mRtAudio;  ///< RtAudio class if the input and output device are the same
+    QSharedPointer<RtAudio> mRtAudioInput;   ///< RtAudio class for the input device
+    QSharedPointer<RtAudio> mRtAudioOutput;  ///< RtAudio class for the output device
+                                             ///< (null if using duplex mode)
+    bool mDuplexMode;  ///< true if using duplex stream mode (input device == output
+                       ///< device)
     QScopedPointer<StereoToMono> mStereoToMonoMixerPtr;
 };
 
index a189a8e6f24fecf589a1c90675e51c3f286dea54..99900cf98af82ede1d040b9160e3fbb59c2c1b52 100644 (file)
@@ -62,7 +62,9 @@ class Settings : public QObject
    public:
     Settings(bool guiEnabled = false, QObject* parent = nullptr)
         : QObject(parent)
-#ifndef NO_GUI
+#ifdef NO_GUI
+        , mGuiEnabled(false)
+#else
         , mGuiEnabled(guiEnabled)
 #endif
         , mAudioTester(new AudioTester)
index 469f39ef349ad7c6ee04050b503d1ca117e8f45e..0b56065f996f982e3de4de59065ecb0a5aa54209 100644 (file)
@@ -6,6 +6,10 @@ Rectangle {
     height: parent.height
     color: backgroundColour
 
+    property bool connected: false
+    property bool showMeters: true
+    property bool showTestAudio: true
+
     property int fontBig: 20
     property int fontMedium: 13
     property int fontSmall: 11
@@ -112,6 +116,7 @@ Rectangle {
                 x: 0; y: 0
                 text: "Output Device"
                 font { family: "Poppins"; pixelSize: fontSmall * virtualstudio.fontScale * virtualstudio.uiScale }
+                bottomPadding: 10 * virtualstudio.uiScale
                 color: textColour
             }
 
@@ -127,7 +132,7 @@ Rectangle {
             AppIcon {
                 id: headphonesIcon
                 anchors.left: outputLabel.left
-                anchors.verticalCenter: outputDeviceMeters.verticalCenter
+                anchors.top: outputLabel.bottom
                 width: 28 * virtualstudio.uiScale
                 height: 28 * virtualstudio.uiScale
                 icon.source: "headphones.svg"
@@ -168,8 +173,12 @@ Rectangle {
                                         audio.inputDevice = modelData.text
                                     }
                                 }
-                                audio.validateDevices()
-                                audio.restartAudio()
+                                if (connected) {
+                                    virtualstudio.triggerReconnect(false);
+                                } else {
+                                    audio.validateDevices()
+                                    audio.restartAudio()
+                                }
                             }
                         }
                     }
@@ -191,8 +200,9 @@ Rectangle {
                 anchors.top: outputCombo.bottom
                 anchors.topMargin: 16 * virtualstudio.uiScale
                 height: 24 * virtualstudio.uiScale
-                model: audio.outputMeterLevels
+                model: showMeters ? audio.outputMeterLevels : [0, 0]
                 clipped: audio.outputClipped
+                visible: showMeters
                 enabled: audio.audioReady && !Boolean(audio.devicesError)
             }
 
@@ -208,13 +218,14 @@ Rectangle {
                 tooltipText: "How loudly you hear other participants"
                 showLabel: false
                 sliderEnabled: true
+                visible: showMeters
             }
 
             Text {
                 id: outputChannelsLabel
                 anchors.left: outputCombo.left
                 anchors.right: outputCombo.horizontalCenter
-                anchors.top: outputSlider.bottom
+                anchors.top: showMeters ? outputSlider.bottom : outputCombo.bottom
                 anchors.topMargin: 12 * virtualstudio.uiScale
                 textFormat: Text.RichText
                 text: "Output Channel(s)"
@@ -247,8 +258,12 @@ Rectangle {
                             outputChannelsCombo.popup.close()
                             audio.baseOutputChannel = modelData.baseChannel
                             audio.numOutputChannels = modelData.numChannels
-                            audio.validateDevices()
-                            audio.restartAudio()
+                            if (connected) {
+                                virtualstudio.triggerReconnect(false);
+                            } else {
+                                audio.validateDevices()
+                                audio.restartAudio()
+                            }
                         }
                     }
                 }
@@ -264,6 +279,7 @@ Rectangle {
 
             Button {
                 id: testOutputAudioButton
+                visible: showTestAudio
                 background: Rectangle {
                     radius: 6 * virtualstudio.uiScale
                     color: testOutputAudioButton.down ? buttonPressedColour : (testOutputAudioButton.hovered ? buttonHoverColour : buttonColour)
@@ -285,7 +301,7 @@ Rectangle {
 
             Rectangle {
                 id: divider1
-                anchors.top: testOutputAudioButton.bottom
+                anchors.top: showTestAudio ? testOutputAudioButton.bottom : outputChannelsCombo.bottom
                 anchors.topMargin: 24 * virtualstudio.uiScale
                 width: parent.width - x - (16 * virtualstudio.uiScale); height: 2 * virtualstudio.uiScale
                 color: "#E0E0E0"
@@ -295,9 +311,10 @@ Rectangle {
                 id: inputLabel
                 anchors.left: outputLabel.left
                 anchors.top: divider1.bottom
-                anchors.topMargin: 32 * virtualstudio.uiScale
+                anchors.topMargin: 24 * virtualstudio.uiScale
                 text: "Input Device"
                 font { family: "Poppins"; pixelSize: fontSmall * virtualstudio.fontScale * virtualstudio.uiScale }
+                bottomPadding: 10 * virtualstudio.uiScale
                 color: textColour
             }
 
@@ -312,8 +329,8 @@ Rectangle {
 
             AppIcon {
                 id: microphoneIcon
-                anchors.left: outputLabel.left
-                anchors.verticalCenter: inputDeviceMeters.verticalCenter
+                anchors.left: inputLabel.left
+                anchors.top: inputLabel.bottom
                 width: 32 * virtualstudio.uiScale
                 height: 32 * virtualstudio.uiScale
                 icon.source: "mic.svg"
@@ -353,8 +370,12 @@ Rectangle {
                                         audio.outputDevice = modelData.text
                                     }
                                 }
-                                audio.validateDevices()
-                                audio.restartAudio()
+                                if (connected) {
+                                    virtualstudio.triggerReconnect(false);
+                                } else {
+                                    audio.validateDevices()
+                                    audio.restartAudio()
+                                }
                             }
                         }
                     }
@@ -376,14 +397,15 @@ Rectangle {
                 anchors.top: inputCombo.bottom
                 anchors.topMargin: 16 * virtualstudio.uiScale
                 height: 24 * virtualstudio.uiScale
-                model: audio.inputMeterLevels
+                model: showMeters ? audio.inputMeterLevels : [0, 0]
                 clipped: audio.inputClipped
+                visible: showMeters
                 enabled: audio.audioReady && !Boolean(audio.devicesError)
             }
 
             VolumeSlider {
                 id: inputSlider
-                anchors.left: inputDeviceMeters.left
+                anchors.left: inputCombo.left
                 anchors.right: parent.right
                 anchors.rightMargin: rightMargin * virtualstudio.uiScale
                 anchors.top: inputDeviceMeters.bottom
@@ -393,6 +415,7 @@ Rectangle {
                 tooltipText: "How loudly other participants hear you"
                 showLabel: false
                 sliderEnabled: true
+                visible: showMeters
             }
 
             Button {
@@ -408,7 +431,7 @@ Rectangle {
                 id: inputChannelsLabel
                 anchors.left: inputCombo.left
                 anchors.right: inputCombo.horizontalCenter
-                anchors.top: inputSlider.bottom
+                anchors.top: showMeters ? inputSlider.bottom : inputCombo.bottom
                 anchors.topMargin: 12 * virtualstudio.uiScale
                 textFormat: Text.RichText
                 text: "Input Channel(s)"
@@ -441,8 +464,12 @@ Rectangle {
                             inputChannelsCombo.popup.close()
                             audio.baseInputChannel = modelData.baseChannel
                             audio.numInputChannels = modelData.numChannels
-                            audio.validateDevices()
-                            audio.restartAudio()
+                            if (connected) {
+                                virtualstudio.triggerReconnect(false);
+                            } else {
+                                audio.validateDevices()
+                                audio.restartAudio()
+                            }
                         }
                     }
                 }
@@ -461,7 +488,7 @@ Rectangle {
                 anchors.left: inputCombo.horizontalCenter
                 anchors.right: inputCombo.right
                 anchors.rightMargin: 8 * virtualstudio.uiScale
-                anchors.top: inputSlider.bottom
+                anchors.top: showMeters ? inputSlider.bottom : inputCombo.bottom
                 anchors.topMargin: 12 * virtualstudio.uiScale
                 textFormat: Text.RichText
                 text: "Mono / Stereo"
@@ -493,8 +520,12 @@ Rectangle {
                             inputMixModeCombo.currentIndex = index
                             inputMixModeCombo.popup.close()
                             audio.inputMixMode = audio.inputMixModeComboModel[index].value
-                            audio.validateDevices();
-                            audio.restartAudio()
+                            if (connected) {
+                                virtualstudio.triggerReconnect(false);
+                            } else {
+                                audio.validateDevices()
+                                audio.restartAudio()
+                            }
                         }
                     }
                 }
@@ -605,13 +636,14 @@ Rectangle {
                 text: "Output Volume"
                 font { family: "Poppins"; pixelSize: fontSmall * virtualstudio.fontScale * virtualstudio.uiScale }
                 wrapMode: Text.WordWrap
+                bottomPadding: 10 * virtualstudio.uiScale
                 color: textColour
             }
 
             AppIcon {
                 id: jackHeadphonesIcon
                 anchors.left: jackOutputLabel.left
-                anchors.verticalCenter: jackOutputVolumeSlider.verticalCenter
+                anchors.top: jackOutputLabel.bottom
                 width: 28 * virtualstudio.uiScale
                 height: 28 * virtualstudio.uiScale
                 icon.source: "headphones.svg"
@@ -624,7 +656,7 @@ Rectangle {
                 anchors.rightMargin: rightMargin * virtualstudio.uiScale
                 anchors.verticalCenter: jackOutputLabel.verticalCenter
                 height: 24 * virtualstudio.uiScale
-                model: audio.outputMeterLevels
+                model: showMeters ? audio.outputMeterLevels : [0, 0]
                 clipped: audio.outputClipped
                 enabled: audio.audioReady && !Boolean(audio.devicesError)
             }
@@ -673,13 +705,14 @@ Rectangle {
                 text: "Input Volume"
                 font { family: "Poppins"; pixelSize: fontSmall * virtualstudio.fontScale * virtualstudio.uiScale }
                 wrapMode: Text.WordWrap
+                bottomPadding: 10 * virtualstudio.uiScale
                 color: textColour
             }
 
             AppIcon {
                 id: jackMicrophoneIcon
                 anchors.left: jackInputLabel.left
-                anchors.verticalCenter: jackInputVolumeSlider.verticalCenter
+                anchors.top: jackInputLabel.bottom
                 width: 32 * virtualstudio.uiScale
                 height: 32 * virtualstudio.uiScale
                 icon.source: "mic.svg"
@@ -692,7 +725,7 @@ Rectangle {
                 anchors.rightMargin: rightMargin * virtualstudio.uiScale
                 anchors.verticalCenter: jackInputLabel.verticalCenter
                 height: 24 * virtualstudio.uiScale
-                model: audio.inputMeterLevels
+                model: showMeters ? audio.inputMeterLevels : [0, 0]
                 clipped: audio.inputClipped
                 enabled: audio.audioReady && !Boolean(audio.devicesError)
             }
index 47f97d57e821ebd6d7f6b79510dcd07c864de800..7937ec293498a9652cfd84d237d63cbe9bff79a8 100644 (file)
@@ -35,457 +35,48 @@ Rectangle {
 
     property string linkText: virtualstudio.darkMode ? "#8B8D8D" : "#272525"
 
-    function getCurrentInputDeviceIndex () {
-        if (audio.inputDevice === "") {
-            return audio.inputComboModel.findIndex(elem => elem.type === "element");
-        }
-
-        let idx = audio.inputComboModel.findIndex(elem => elem.type === "element" && elem.text === audio.inputDevice);
-        if (idx < 0) {
-            idx = audio.inputComboModel.findIndex(elem => elem.type === "element");
-        }
-
-        return idx;
-    }
-
-    function getCurrentOutputDeviceIndex() {
-        if (audio.outputDevice === "") {
-            return audio.outputComboModel.findIndex(elem => elem.type === "element");
-        }
-
-        let idx = audio.outputComboModel.findIndex(elem => elem.type === "element" && elem.text === audio.outputDevice);
-        if (idx < 0) {
-            idx = audio.outputComboModel.findIndex(elem => elem.type === "element");
-        }
-
-        return idx;
-    }
-
     MouseArea {
         anchors.fill: parent
         propagateComposedEvents: false
     }
 
     Rectangle {
-        width: parent.width; height: 360
-        anchors.verticalCenter: parent.verticalCenter
+        id: audioSettingsView
+        width: parent.width;
+        height: parent.height;
         color: backgroundColour
         radius: 6 * virtualstudio.uiScale
 
-        Item {
-            id: usingRtAudio
-            anchors.top: parent.top
-            anchors.topMargin: 24 * virtualstudio.uiScale
-            anchors.bottom: parent.bottom
-            anchors.left: parent.left
-            anchors.leftMargin: 24 * virtualstudio.uiScale
-            anchors.right: parent.right
-
-            Rectangle {
-                id: leftSpacer
-                x: 0; y: 0
-                width: 144 * virtualstudio.uiScale
-                height: 0
-                color: "transparent"
-            }
-
-            DeviceRefreshButton {
-                id: refreshButton
-                y: 0;
-                x: parent.width - (144 + rightMargin) * virtualstudio.uiScale;
-                enabled: !audio.scanningDevices
-                onDeviceRefresh: function () {
-                    virtualstudio.triggerReconnect(true);
-                }
-            }
-
-            Text {
-                text: "Scanning Devices"
-                y: 0;
-                anchors.right: refreshButton.left;
-                anchors.rightMargin: 16 * virtualstudio.uiScale;
-                font { family: "Poppins"; pixelSize: fontTiny * virtualstudio.fontScale * virtualstudio.uiScale }
-                color: textColour
-                visible: audio.scanningDevices
-            }
-
-            Text {
-                id: outputLabel
-                x: 0;
-                anchors.top: refreshButton.bottom
-                anchors.topMargin: 24 * virtualstudio.uiScale
-                text: "Output Device"
-                font { family: "Poppins"; pixelSize: fontSmall * virtualstudio.fontScale * virtualstudio.uiScale }
-                color: textColour
-            }
-
-            InfoTooltip {
-                id: outputHelpIcon
-                anchors.left: outputLabel.right
-                anchors.bottom: outputLabel.top
-                anchors.bottomMargin: -8 * virtualstudio.uiScale
-                size: 16 * virtualstudio.uiScale
-                content: qsTr("How you'll hear the studio audio")
-            }
-
-            AppIcon {
-                id: headphonesIcon
-                anchors.left: outputLabel.left
-                anchors.top: outputLabel.bottom
-                anchors.topMargin: bottomToolTipMargin * virtualstudio.uiScale
-                width: 28 * virtualstudio.uiScale
-                height: 28 * virtualstudio.uiScale
-                icon.source: "headphones.svg"
-            }
-
-            ComboBox {
-                id: outputCombo
-                anchors.left: leftSpacer.right
-                anchors.verticalCenter: outputLabel.verticalCenter
-                anchors.rightMargin: rightMargin * virtualstudio.uiScale
-                width: parent.width - leftSpacer.width - rightMargin * virtualstudio.uiScale
-                enabled: virtualstudio.connectionState == "Connected"
-                model: audio.outputComboModel
-                currentIndex: getCurrentOutputDeviceIndex()
-                delegate: ItemDelegate {
-                    required property var modelData
-                    required property int index
-
-                    leftPadding: 0
-
-                    width: parent.width
-                    contentItem: Text {
-                        leftPadding: modelData.type === "element" && outputCombo.model.filter(it => it.type === "header").length > 0 ? 24 : 12
-                        text: modelData.text || ""
-                        font.bold: modelData.type === "header"
-                    }
-                    highlighted: outputCombo.highlightedIndex === index
-                    MouseArea {
-                        anchors.fill: parent
-                        onClicked: {
-                            if (modelData.type == "element") {
-                                outputCombo.currentIndex = index
-                                outputCombo.popup.close()
-                                audio.outputDevice = modelData.text
-                                if (modelData.category.startsWith("Low-Latency")) {
-                                    let inputComboIdx = inputCombo.model.findIndex(it => it.category.startsWith("Low-Latency") && it.text === modelData.text);
-                                    if (inputComboIdx !== null && inputComboIdx !== undefined) {
-                                        inputCombo.currentIndex = inputComboIdx;
-                                        audio.inputDevice = modelData.text
-                                    }
-                                }
-                                virtualstudio.triggerReconnect(false);
-                            }
-                        }
-                    }
-                }
-                contentItem: Text {
-                    leftPadding: 12
-                    font: outputCombo.font
-                    horizontalAlignment: Text.AlignHLeft
-                    verticalAlignment: Text.AlignVCenter
-                    elide: Text.ElideRight
-                    text: outputCombo.model[outputCombo.currentIndex] && outputCombo.model[outputCombo.currentIndex].text ? outputCombo.model[outputCombo.currentIndex].text : ""
-                }
-            }
-
-            Text {
-                id: outputChannelsLabel
-                anchors.left: outputCombo.left
-                anchors.right: outputCombo.horizontalCenter
-                anchors.top: outputCombo.bottom
-                anchors.topMargin: 12 * virtualstudio.uiScale
-                textFormat: Text.RichText
-                text: "Output Channel(s)"
-                font { family: "Poppins"; pixelSize: fontTiny * virtualstudio.fontScale * virtualstudio.uiScale }
-                color: textColour
-            }
-
-            ComboBox {
-                id: outputChannelsCombo
-                anchors.left: outputCombo.left
-                anchors.right: outputCombo.horizontalCenter
-                anchors.rightMargin: 8 * virtualstudio.uiScale
-                anchors.top: outputChannelsLabel.bottom
-                anchors.topMargin: 4 * virtualstudio.uiScale
-                enabled: audio.outputChannelsComboModel.length > 1 && virtualstudio.connectionState == "Connected"
-                model: audio.outputChannelsComboModel
-                currentIndex: (() => {
-                    let idx = audio.outputChannelsComboModel.findIndex(elem => elem.baseChannel === audio.baseOutputChannel
-                        && elem.numChannels === audio.numOutputChannels);
-                    if (idx < 0) {
-                        idx = 0;
-                    }
-                    return idx;
-                })()
-                delegate: ItemDelegate {
-                    required property var modelData
-                    required property int index
-                    width: parent.width
-                    contentItem: Text {
-                        text: modelData.label
-                    }
-                    highlighted: outputChannelsCombo.highlightedIndex === index
-                    MouseArea {
-                        anchors.fill: parent
-                        onClicked: {
-                            outputChannelsCombo.currentIndex = index
-                            outputChannelsCombo.popup.close()
-                            audio.baseOutputChannel = modelData.baseChannel
-                            audio.numOutputChannels = modelData.numChannels
-                            virtualstudio.triggerReconnect(false);
-                        }
-                    }
-                }
-                contentItem: Text {
-                    leftPadding: 12
-                    font: inputCombo.font
-                    horizontalAlignment: Text.AlignHLeft
-                    verticalAlignment: Text.AlignVCenter
-                    elide: Text.ElideRight
-                    text: outputChannelsCombo.model[outputChannelsCombo.currentIndex].label || ""
-                }
-            }
-
-            Text {
-                id: inputLabel
-                anchors.left: outputLabel.left
-                anchors.top: outputChannelsCombo.bottom
-                anchors.topMargin: 32 * virtualstudio.uiScale
-                text: "Input Device"
-                font { family: "Poppins"; pixelSize: fontSmall * virtualstudio.fontScale * virtualstudio.uiScale }
-                color: textColour
-            }
-
-            InfoTooltip {
-                id: inputHelpIcon
-                anchors.left: inputLabel.right
-                anchors.bottom: inputLabel.top
-                anchors.bottomMargin: -8 * virtualstudio.uiScale
-                size: 16 * virtualstudio.uiScale
-                content: qsTr("Audio sent to the studio (microphone, instrument, mixer, etc.)")
-            }
-
-            AppIcon {
-                id: microphoneIcon
-                anchors.left: inputLabel.left
-                anchors.top: inputLabel.bottom
-                anchors.topMargin: bottomToolTipMargin * virtualstudio.uiScale
-                width: 32 * virtualstudio.uiScale
-                height: 32 * virtualstudio.uiScale
-                icon.source: "mic.svg"
-            }
-
-            ComboBox {
-                id: inputCombo
-                model: audio.inputComboModel
-                currentIndex: getCurrentInputDeviceIndex()
-                anchors.left: outputCombo.left
-                anchors.right: outputCombo.right
-                anchors.verticalCenter: inputLabel.verticalCenter
-                enabled: virtualstudio.connectionState == "Connected"
-                delegate: ItemDelegate {
-                    required property var modelData
-                    required property int index
-
-                    leftPadding: 0
-
-                    width: parent.width
-                    contentItem: Text {
-                        leftPadding: modelData.type === "element" && inputCombo.model.filter(it => it.type === "header").length > 0 ? 24 : 12
-                        text: modelData.text || ""
-                        font.bold: modelData.type === "header"
-                    }
-                    highlighted: inputCombo.highlightedIndex === index
-                    MouseArea {
-                        anchors.fill: parent
-                        onClicked: {
-                            if (modelData.type == "element") {
-                                inputCombo.currentIndex = index
-                                inputCombo.popup.close()
-                                audio.inputDevice = modelData.text
-                                if (modelData.category.startsWith("Low-Latency")) {
-                                    let outputComboIdx = outputCombo.model.findIndex(it => it.category.startsWith("Low-Latency") && it.text === modelData.text);
-                                    if (outputComboIdx !== null && outputComboIdx !== undefined) {
-                                        outputCombo.currentIndex = outputComboIdx;
-                                        audio.outputDevice = modelData.text
-                                    }
-                                }
-                                virtualstudio.triggerReconnect(false);
-                            }
-                        }
-                    }
-                }
-                contentItem: Text {
-                    leftPadding: 12
-                    font: inputCombo.font
-                    horizontalAlignment: Text.AlignHLeft
-                    verticalAlignment: Text.AlignVCenter
-                    elide: Text.ElideRight
-                    text: inputCombo.model[inputCombo.currentIndex] && inputCombo.model[inputCombo.currentIndex].text ? inputCombo.model[inputCombo.currentIndex].text : ""
-                }
-            }
-
-            Text {
-                id: inputChannelsLabel
-                anchors.left: inputCombo.left
-                anchors.right: inputCombo.horizontalCenter
-                anchors.top: inputCombo.bottom
-                anchors.topMargin: 12 * virtualstudio.uiScale
-                textFormat: Text.RichText
-                text: "Input Channel(s)"
-                font { family: "Poppins"; pixelSize: fontTiny * virtualstudio.fontScale * virtualstudio.uiScale }
-                color: textColour
-            }
-
-            ComboBox {
-                id: inputChannelsCombo
-                anchors.left: inputCombo.left
-                anchors.right: inputCombo.horizontalCenter
-                anchors.rightMargin: 8 * virtualstudio.uiScale
-                anchors.top: inputChannelsLabel.bottom
-                anchors.topMargin: 4 * virtualstudio.uiScale
-                enabled: audio.inputChannelsComboModel.length > 1 && virtualstudio.connectionState == "Connected"
-                model: audio.inputChannelsComboModel
-                currentIndex: (() => {
-                    let idx = audio.inputChannelsComboModel.findIndex(elem => elem.baseChannel === audio.baseInputChannel
-                        && elem.numChannels === audio.numInputChannels);
-                    if (idx < 0) {
-                        idx = 0;
-                    }
-                    return idx;
-                })()
-                delegate: ItemDelegate {
-                    required property var modelData
-                    required property int index
-                    width: parent.width
-                    contentItem: Text {
-                        text: modelData.label
-                    }
-                    highlighted: inputChannelsCombo.highlightedIndex === index
-                    MouseArea {
-                        anchors.fill: parent
-                        onClicked: {
-                            inputChannelsCombo.currentIndex = index
-                            inputChannelsCombo.popup.close()
-                            audio.baseInputChannel = modelData.baseChannel
-                            audio.numInputChannels = modelData.numChannels
-                            virtualstudio.triggerReconnect(false);
-                        }
-                    }
-                }
-                contentItem: Text {
-                    leftPadding: 12
-                    font: inputCombo.font
-                    horizontalAlignment: Text.AlignHLeft
-                    verticalAlignment: Text.AlignVCenter
-                    elide: Text.ElideRight
-                    text: inputChannelsCombo.model[inputChannelsCombo.currentIndex].label || ""
-                }
-            }
-
-            Text {
-                id: inputMixModeLabel
-                anchors.left: inputCombo.horizontalCenter
-                anchors.right: inputCombo.right
-                anchors.rightMargin: 8 * virtualstudio.uiScale
-                anchors.top: inputCombo.bottom
-                anchors.topMargin: 12 * virtualstudio.uiScale
-                textFormat: Text.RichText
-                text: "Mono / Stereo"
-                font { family: "Poppins"; pixelSize: fontTiny * virtualstudio.fontScale * virtualstudio.uiScale }
-                color: textColour
-            }
-
-            ComboBox {
-                id: inputMixModeCombo
-                anchors.left: inputCombo.horizontalCenter
-                anchors.right: inputCombo.right
-                anchors.rightMargin: 8 * virtualstudio.uiScale
-                anchors.top: inputMixModeLabel.bottom
-                anchors.topMargin: 4 * virtualstudio.uiScale
-                enabled: audio.inputMixModeComboModel.length > 1 && virtualstudio.connectionState == "Connected"
-                model: audio.inputMixModeComboModel
-                currentIndex: (() => {
-                    let idx = audio.inputMixModeComboModel.findIndex(elem => elem.value === audio.inputMixMode);
-                    if (idx < 0) {
-                        idx = 0;
-                    }
-                    return idx;
-                })()
-                delegate: ItemDelegate {
-                    required property var modelData
-                    required property int index
-                    width: parent.width
-                    contentItem: Text {
-                        text: modelData.label
-                    }
-                    highlighted: inputMixModeCombo.highlightedIndex === index
-                    MouseArea {
-                        anchors.fill: parent
-                        onClicked: {
-                            inputMixModeCombo.currentIndex = index
-                            inputMixModeCombo.popup.close()
-                            audio.inputMixMode = audio.inputMixModeComboModel[index].value
-                            virtualstudio.triggerReconnect(false);
-                        }
-                    }
-                }
-                contentItem: Text {
-                    leftPadding: 12
-                    font: inputCombo.font
-                    horizontalAlignment: Text.AlignHLeft
-                    verticalAlignment: Text.AlignVCenter
-                    elide: Text.ElideRight
-                    text: inputMixModeCombo.model[inputMixModeCombo.currentIndex].label || ""
-                }
-            }
-
-            Text {
-                id: inputChannelHelpMessage
-                anchors.left: inputChannelsCombo.left
-                anchors.leftMargin: 2 * virtualstudio.uiScale
-                anchors.right: inputChannelsCombo.right
-                anchors.top: inputChannelsCombo.bottom
-                anchors.topMargin: 8 * virtualstudio.uiScale
-                textFormat: Text.RichText
-                wrapMode: Text.WordWrap
-                text: audio.inputChannelsComboModel.length > 1 ? "Choose up to 2 channels" : "Only 1 channel available"
-                font { family: "Poppins"; pixelSize: fontTiny * virtualstudio.fontScale * virtualstudio.uiScale }
-                color: textColour
+        DeviceRefreshButton {
+            id: refreshButton
+            anchors.top: parent.top;
+            anchors.topMargin: 16 * virtualstudio.uiScale;
+            anchors.right: parent.right;
+            anchors.rightMargin: 16 * virtualstudio.uiScale;
+            enabled: !audio.scanningDevices
+            onDeviceRefresh: function () {
+                virtualstudio.triggerReconnect(true);
             }
+        }
 
-            Text {
-                id: inputMixModeHelpMessage
-                anchors.left: inputMixModeCombo.left
-                anchors.leftMargin: 2 * virtualstudio.uiScale
-                anchors.right: inputMixModeCombo.right
-                anchors.top: inputMixModeCombo.bottom
-                anchors.topMargin: 8 * virtualstudio.uiScale
-                textFormat: Text.RichText
-                wrapMode: Text.WordWrap
-                text: (() => {
-                    if (audio.inputMixMode === 2) {
-                        return "Treat the channels as Left and Right signals, coming through each speaker separately.";
-                    } else if (audio.inputMixMode === 3) {
-                        return "Combine the channels into one central channel coming through both speakers.";
-                    } else if (audio.inputMixMode === 1) {
-                        return "Send a single channel of audio";
-                    } else {
-                        return "";
-                    }
-                })()
-                font { family: "Poppins"; pixelSize: fontTiny * virtualstudio.fontScale * virtualstudio.uiScale }
-                color: textColour
-            }
+        Text {
+            text: "Restarting Audio"
+            anchors.verticalCenter: refreshButton.verticalCenter
+            anchors.right: refreshButton.left;
+            anchors.rightMargin: 16 * virtualstudio.uiScale;
+            font { family: "Poppins"; pixelSize: fontTiny * virtualstudio.fontScale * virtualstudio.uiScale }
+            color: textColour
+            visible: audio.scanningDevices
+        }
 
-            DeviceWarning {
-                id: deviceWarning
-                anchors.left: inputCombo.left
-                anchors.top: inputMixModeHelpMessage.bottom
-                anchors.topMargin: 48 * virtualstudio.uiScale
-                visible: Boolean(audio.devicesError) || Boolean(audio.devicesWarning)
-            }
+        AudioSettings {
+            id: audioSettings
+            showMeters: false
+            showTestAudio: false
+            connected: true
+            height: 300 * virtualstudio.uiScale
+            anchors.top: refreshButton.bottom;
+            anchors.topMargin: 16 * virtualstudio.uiScale;
         }
     }
 
@@ -512,4 +103,13 @@ Rectangle {
             color: textColour
         }
     }
+
+    DeviceWarning {
+        id: deviceWarning
+        anchors.left: backButton.right
+        anchors.leftMargin: 24 * virtualstudio.uiScale
+        anchors.bottom: parent.bottom
+        anchors.bottomMargin: 16 * virtualstudio.uiScale;
+        visible: Boolean(audio.devicesError) || Boolean(audio.devicesWarning)
+    }
 }
index 2d5199068f7bcef6dada4429fc277a32b76ac5e0..00d6798c077de7b179f4f12b0ce4edfeb74dbfb3 100644 (file)
@@ -75,7 +75,6 @@ Item {
 
     DeviceControlsGroup {
         id: deviceControlsGroup
-        showMinified: false
         anchors.bottom: footer.top
     }
 
@@ -83,13 +82,4 @@ Item {
         id: footer
         anchors.bottom: parent.bottom
     }
-
-    Connections {
-        target: virtualstudio
-
-        // self-managed servers do not support minified controls so keep it full size
-        function onCollapseDeviceControlsChanged(collapseDeviceControls) {
-            deviceControlsGroup.showMinified = virtualstudio.currentStudio.isManaged && collapseDeviceControls;
-        }
-    }
 }
index 4a84c2b33812a38b652f56ed538f5d3b6d668211..38e17458b3b47f3451bfeee82f1ee6979d4e387f 100644 (file)
@@ -22,7 +22,6 @@ Item {
 
             anchors.left: parent.left
             anchors.leftMargin: 8 * virtualstudio.uiScale
-            anchors.verticalCenter: parent.verticalCenter
 
             background: Rectangle {
                 color: isInput ? (audio.inputMuted ? muteButtonMutedColor : buttonColour) : "transparent"
@@ -71,7 +70,7 @@ Item {
 
         ColumnLayout {
             anchors.fill: parent
-            spacing: 2
+            spacing: 2 * virtualstudio.uiScale
 
             VolumeSlider {
                 Layout.fillWidth: true
@@ -93,7 +92,7 @@ Item {
 
         ColumnLayout {
             anchors.fill: parent
-            spacing: 2
+            spacing: 4 * virtualstudio.uiScale
 
             VolumeSlider {
                 Layout.fillWidth: true
@@ -115,15 +114,16 @@ Item {
 
     ColumnLayout {
         anchors.fill: parent
-        spacing: 2
+        spacing: 5 * virtualstudio.uiScale
 
         Item {
-            Layout.preferredHeight: minifiedHeight
+            Layout.topMargin: 5 * virtualstudio.uiScale
+            Layout.preferredHeight: 30 * virtualstudio.uiScale
             Layout.fillWidth: true
 
             RowLayout {
                 anchors.fill: parent
-                spacing: 8
+                spacing: 8 * virtualstudio.uiScale
 
                 Item {
                     Layout.fillHeight: true
@@ -132,7 +132,6 @@ Item {
                     Loader {
                         id: typeIconIndicator
                         anchors.left: parent.left
-                        anchors.verticalCenter: parent.verticalCenter
                         sourceComponent: controlIndicator
                     }
 
@@ -140,7 +139,6 @@ Item {
                         id: label
                         anchors.left: parent.left
                         anchors.leftMargin: 36 * virtualstudio.uiScale
-                        anchors.verticalCenter: parent.verticalCenter
 
                         text: isInput ? "Input" : "Output"
                         font { family: "Poppins"; weight: Font.Bold; pixelSize: fontSmall * virtualstudio.fontScale * virtualstudio.uiScale }
@@ -159,12 +157,11 @@ Item {
                 Item {
                     Layout.fillHeight: true
                     Layout.fillWidth: true
-                    Layout.preferredWidth: 200
+                    Layout.preferredWidth: 200 * virtualstudio.uiScale
 
                     Meter {
                         anchors.fill: parent
-                        anchors.topMargin: 5
-                        anchors.rightMargin: 8
+                        anchors.rightMargin: 8 * virtualstudio.uiScale
                         model: isInput ? audio.inputMeterLevels : audio.outputMeterLevels
                         clipped: isInput ? audio.inputClipped : audio.outputClipped
                         enabled: true
@@ -174,25 +171,23 @@ Item {
         }
 
         Item {
-            Layout.preferredHeight: 42
-            Layout.minimumHeight: 42
-            Layout.maximumHeight: 42
             Layout.fillWidth: true
+            Layout.fillHeight: true
+            Layout.bottomMargin: 5 * virtualstudio.uiScale
 
             RowLayout {
                 anchors.fill: parent
-                spacing: 2
+                spacing: 8 * virtualstudio.uiScale
 
                 Item {
                     Layout.fillHeight: true
                     Layout.fillWidth: true
-                    Layout.alignment: Qt.AlignVCenter
                     Layout.leftMargin: 8 * virtualstudio.uiScale
                     Layout.rightMargin: 8 * virtualstudio.uiScale
 
                     Loader {
                         anchors.fill: parent
-                        anchors.verticalCenter: parent.verticalCenter
+                        anchors.top: parent.top
                         sourceComponent: isInput ? inputControls : outputControls
                     }
                 }
index dc5f71649674e273198c46a55ca3579b9326e62c..8b5b760893e1e92bb14b6ae5fc6ecb39fa56637d 100644 (file)
@@ -3,19 +3,24 @@ import QtQuick.Controls
 import QtQuick.Layouts
 
 Rectangle {
-    required property bool showMinified
-
     property string disabledButtonText: "#D3D4D4"
     property string saveButtonText: "#DB0A0A"
-    property int minifiedHeight: 36
-    property int fullHeight: 84
+    property int fullHeight: 88 * virtualstudio.uiScale
+    property int minimumHeight: 48 * virtualstudio.uiScale
+
+    property bool isUsingRtAudio: audio.audioBackend == "RtAudio"
+    property bool isReady: virtualstudio.currentStudio.id !== "" && virtualstudio.currentStudio.status == "Ready"
+    property bool showDeviceControls: getShowDeviceControls()
 
     id: deviceControlsGroup
     width: parent.width
-    height: showMinified ? minifiedHeight : fullHeight
+    height: isReady ? (showDeviceControls ? fullHeight : (feedbackDetectedModal.visible ? minimumHeight : 0)) : minimumHeight;
     color: backgroundColour
 
-    property bool showDeviceControls: studioStatus === "Ready"
+    function getShowDeviceControls () {
+        // self-managed servers do not support minified controls so keep it full size
+        return !virtualstudio.currentStudio.isManaged || (!virtualstudio.collapseDeviceControls && isReady);
+    }
 
     MouseArea {
         anchors.fill: parent
@@ -31,7 +36,7 @@ Rectangle {
         Item {
             Layout.fillHeight: true
             Layout.fillWidth: true
-            visible: !showDeviceControls
+            visible: !isReady
 
             Button {
                 id: backButton
@@ -75,7 +80,7 @@ Rectangle {
 
         Item {
             Layout.fillHeight: true
-            Layout.preferredWidth: 48
+            Layout.preferredWidth: 48 * virtualstudio.uiScale
             visible: showDeviceControls
 
             ColumnLayout {
@@ -83,21 +88,53 @@ Rectangle {
                 spacing: 2
 
                 Item {
-                    Layout.preferredHeight: 20
-                    Layout.preferredWidth: 40
-                    Layout.alignment: Qt.AlignHCenter
+                    Layout.preferredHeight: 24 * virtualstudio.uiScale
+                    Layout.preferredWidth: 24 * virtualstudio.uiScale
+                    Layout.topMargin: 2 * virtualstudio.uiScale
+                    Layout.rightMargin: 2 * virtualstudio.uiScale
+                    Layout.alignment: Qt.AlignRight | Qt.AlignTop
+
+                    Button {
+                        id: closeDeviceControlsButton
+                        visible: virtualstudio.currentStudio.isManaged
+                        width: 24 * virtualstudio.uiScale
+                        height: 24 * virtualstudio.uiScale
+                        background: Rectangle {
+                            color: backgroundColour
+                        }
+                        anchors.top: parent.top
+                        anchors.right: parent.right
+                        onClicked: {
+                            virtualstudio.collapseDeviceControls = true;
+                        }
+
+                        AppIcon {
+                            id: closeDeviceControlsIcon
+                            anchors { verticalCenter: parent.verticalCenter; horizontalCenter: parent.horizontalCenter }
+                            width: 24 * virtualstudio.uiScale
+                            height: 24 * virtualstudio.uiScale
+                            color: closeDeviceControlsButton.hovered ? textColour : browserButtonHoverColour
+                            icon.source: "close.svg"
+                            onClicked: {
+                                virtualstudio.collapseDeviceControls = true;
+                            }
+                        }
+                    }
                 }
 
                 Item {
-                    Layout.preferredHeight: 48
-                    Layout.preferredWidth: 40
-                    Layout.alignment: Qt.AlignHCenter
-                    visible: !showMinified
+                    Layout.preferredWidth: 40 * virtualstudio.uiScale
+                    Layout.preferredHeight: 64 * virtualstudio.uiScale
+                    Layout.bottomMargin: 5 * virtualstudio.uiScale
+                    Layout.topMargin: 2 * virtualstudio.uiScale
+                    Layout.rightMargin: 2 * virtualstudio.uiScale
+                    Layout.alignment: Qt.AlignHCenter | Qt.AlignTop
 
                     Button {
                         id: changeDevicesButton
-                        width: 36
-                        height: 36
+                        visible: isUsingRtAudio
+                        width: 36 * virtualstudio.uiScale
+                        height: 36 * virtualstudio.uiScale
                         anchors.top: parent.top
                         anchors.horizontalCenter: parent.horizontalCenter
                         background: Rectangle {
@@ -187,7 +224,7 @@ Rectangle {
                     height: 32 * virtualstudio.uiScale
                     icon.source: "warning.svg"
                     color: "#F21B1B"
-                    visible: !showMinified
+                    visible: showDeviceControls
                 }
 
                 AppIcon {
@@ -198,7 +235,7 @@ Rectangle {
                     width: 24 * virtualstudio.uiScale
                     icon.source: "warning.svg"
                     color: "#F21B1B"
-                    visible: showMinified
+                    visible: !showDeviceControls
                 }
 
                 Text {
@@ -213,7 +250,7 @@ Rectangle {
                     color: textColour
                     elide: Text.ElideRight
                     wrapMode: Text.WordWrap
-                    visible: !showMinified
+                    visible: showDeviceControls
                 }
 
                 Text {
@@ -228,7 +265,7 @@ Rectangle {
                     color: textColour
                     elide: Text.ElideRight
                     wrapMode: Text.WordWrap
-                    visible: !showMinified
+                    visible: showDeviceControls
                 }
 
                 Text {
@@ -242,7 +279,7 @@ Rectangle {
                     color: textColour
                     elide: Text.ElideRight
                     wrapMode: Text.WordWrap
-                    visible: showMinified
+                    visible: !showDeviceControls
                 }
 
                 Text {
@@ -258,7 +295,7 @@ Rectangle {
                     color: textColour
                     elide: Text.ElideRight
                     wrapMode: Text.WordWrap
-                    visible: !showMinified
+                    visible: showDeviceControls
                 }
 
                 Button {
@@ -280,13 +317,13 @@ Rectangle {
                     Text {
                         text: "Ok"
                         font.family: "Poppins"
-                        font.pixelSize: showMinified ? fontTiny * virtualstudio.fontScale * virtualstudio.uiScale : fontSmall * virtualstudio.fontScale * virtualstudio.uiScale
+                        font.pixelSize: showDeviceControls ? fontSmall * virtualstudio.fontScale * virtualstudio.uiScale : fontTiny * virtualstudio.fontScale * virtualstudio.uiScale
                         font.weight: Font.Bold
                         color: !Boolean(audio.devicesError) && audio.backendAvailable ? saveButtonText : disabledButtonText
                         anchors.horizontalCenter: parent.horizontalCenter
                         anchors.verticalCenter: parent.verticalCenter
                     }
-                    visible: !showMinified
+                    visible: showDeviceControls
                 }
 
                 Button {
@@ -314,7 +351,7 @@ Rectangle {
                         anchors.horizontalCenter: parent.horizontalCenter
                         anchors.verticalCenter: parent.verticalCenter
                     }
-                    visible: showMinified
+                    visible: !showDeviceControls
                 }
             }
         }
@@ -328,5 +365,19 @@ Rectangle {
                 feedbackDetectedModal.visible = true;
             }
         }
+
+        function onCollapseDeviceControlsChanged(collapseDeviceControls) {
+            showDeviceControls = getShowDeviceControls()
+        }
+
+        function onCurrentStudioChanged(currentStudio) {
+            isReady = virtualstudio.currentStudio.id !== "" && virtualstudio.currentStudio.status == "Ready"
+            showDeviceControls = getShowDeviceControls()
+        }
+
+        function onConnectionStateChanged(connectionState) {
+            isReady = virtualstudio.currentStudio.id !== "" && virtualstudio.currentStudio.status == "Ready"
+            showDeviceControls = getShowDeviceControls()
+        }
     }
 }
\ No newline at end of file
index 1a62d52776c8632bbefc7a8c8c4a7fdb0fde45c3..3b65e3815dec4d9cf75de4360c060ae162dc76ad 100644 (file)
@@ -31,8 +31,7 @@ Item {
         id: devicesWarningTooltip
         anchors.left: warningOrErrorText.right
         anchors.leftMargin: 2 * virtualstudio.uiScale
-        anchors.bottom: warningOrErrorText.bottom
-        anchors.bottomMargin: 6 * virtualstudio.uiScale
+        anchors.top: devicesWarningIcon.top
         content: qsTr(audio.devicesError || audio.devicesWarning)
         iconColor: devicesWarningColour
         size: 16 * virtualstudio.uiScale
index eaeae147ad319c832edca86cf9489fbdb8acaa38..6c1528d05a28d02a3e33750921d7603908ba4419 100644 (file)
@@ -292,39 +292,18 @@ Item {
                     anchors.topMargin: 24 * virtualstudio.uiScale
 
                     Button {
-                        id: noUserFeedbackButton
-                        anchors.left: buttonsArea.left
-                        anchors.verticalCenter: parent.buttonsArea
-                        width: 150 * virtualstudio.uiScale; height: 30 * virtualstudio.uiScale
-                        onClicked: () => {
-                            userFeedbackModal.close();
-                            rating = 0;
-                            serverId = "";
-                            messageBox.clear();
-                        }
-
-                        background: Rectangle {
-                            radius: 6 * virtualstudio.uiScale
-                            color: noUserFeedbackButton.down ? buttonPressedColour : (noUserFeedbackButton.hovered ? buttonHoverColour : buttonColour)
-                            border.width: 1
-                            border.color: noUserFeedbackButton.down ? buttonPressedStroke : (noUserFeedbackButton.hovered ? buttonHoverStroke : buttonStroke)
-                        }
-
-                        Text {
-                            text: "No thanks"
-                            font.family: "Poppins"
-                            font.pixelSize: fontSmall * virtualstudio.fontScale * virtualstudio.uiScale
-                            anchors.horizontalCenter: parent.horizontalCenter
-                            anchors.verticalCenter: parent.verticalCenter
-                        }
-                    }
-
-                    Button {
-                        id: submitUserFeedbackButton
+                        id: userFeedbackButton
                         anchors.right: buttonsArea.right
+                        anchors.horizontalCenter: buttonsArea.horizontalCenter
                         anchors.verticalCenter: parent.buttonsArea
                         width: 150 * virtualstudio.uiScale; height: 30 * virtualstudio.uiScale
                         onClicked: () => {
+                            if (rating === 0 && messageBox.text === "") {
+                                userFeedbackModal.close();
+                                serverId = "";
+                                messageBox.clear();
+                                return;
+                            }
                             virtualstudio.collectFeedbackSurvey(serverId, rating, messageBox.text);
                             submitted = true;
                             rating = 0;
@@ -336,13 +315,13 @@ Item {
 
                         background: Rectangle {
                             radius: 6 * virtualstudio.uiScale
-                            color: submitUserFeedbackButton.down ? buttonPressedColour : (submitUserFeedbackButton.hovered ? buttonHoverColour : buttonColour)
+                            color: userFeedbackButton.down ? buttonPressedColour : (userFeedbackButton.hovered ? buttonHoverColour : buttonColour)
                             border.width: 1
-                            border.color: submitUserFeedbackButton.down ? buttonPressedStroke : (submitUserFeedbackButton.hovered ? buttonHoverStroke : buttonStroke)
+                            border.color: userFeedbackButton.down ? buttonPressedStroke : (userFeedbackButton.hovered ? buttonHoverStroke : buttonStroke)
                         }
 
                         Text {
-                            text: "Submit"
+                            text: (rating === 0 && messageBox.text === "") ? "Dismiss" : "Submit"
                             font.family: "Poppins"
                             font.pixelSize: fontSmall * virtualstudio.fontScale * virtualstudio.uiScale
                             font.weight: Font.Bold
index 0831e01ac6729f1498867aae30ce18597cfcc356..b970e917977d54490af1ae6217a31286bca4e210 100644 (file)
@@ -70,15 +70,6 @@ Item {
         AudioSettings {
             id: audioSettings
         }
-
-        DeviceWarning {
-            id: deviceWarning
-            anchors.left: parent.left
-            anchors.leftMargin: 168 * virtualstudio.uiScale
-            anchors.bottom: parent.bottom
-            anchors.bottomMargin: 48 * virtualstudio.uiScale
-            visible: Boolean(audio.devicesError) || Boolean(audio.devicesWarning)
-        }
     }
 
     ToolBar {
@@ -281,6 +272,7 @@ Item {
                 width: parent.width
                 horizontalAlignment: Text.AlignHCenter
                 verticalAlignment: Text.AlignVCenter
+                bottomPadding: 5 * virtualstudio.uiScale
             }
         }
     }
@@ -296,7 +288,8 @@ Item {
 
         Slider {
             id: scaleSlider
-            x: 234 * virtualstudio.uiScale; y: 100 * virtualstudio.uiScale
+            x: 220 * virtualstudio.uiScale;
+            y: 100 * virtualstudio.uiScale
             width: backendCombo.width
             from: 1; to: 1.25; value: virtualstudio.uiScale
             onMoved: { virtualstudio.uiScale = value }
@@ -393,8 +386,10 @@ Item {
                 // switch mode
                 virtualstudio.toStandard();
             }
-            x: 234 * virtualstudio.uiScale; y: 100 * virtualstudio.uiScale
-            width: 216 * virtualstudio.uiScale; height: 30 * virtualstudio.uiScale
+            x: 220 * virtualstudio.uiScale;
+            y: 100 * virtualstudio.uiScale
+            width: 216 * virtualstudio.uiScale;
+            height: 30 * virtualstudio.uiScale
             Text {
                 text: virtualstudio.psiBuild ? "Switch to Standard Mode" : "Switch to Classic Mode"
                 font { family: "Poppins"; pixelSize: fontSmall * virtualstudio.fontScale * virtualstudio.uiScale }
@@ -413,13 +408,13 @@ Item {
 
         ComboBox {
             id: updateChannelCombo
-            x: 234 * virtualstudio.uiScale; y: modeButton.y + (48 * virtualstudio.uiScale)
+            x: 220 * virtualstudio.uiScale; y: modeButton.y + (48 * virtualstudio.uiScale)
             width: parent.width - x - (16 * virtualstudio.uiScale); height: 36 * virtualstudio.uiScale
             model: virtualstudio.updateChannelComboModel
             currentIndex: virtualstudio.updateChannel == "stable" ? 0 : 1
             onActivated: { virtualstudio.updateChannel = currentIndex == 0 ? "stable": "edge" }
             font.family: "Poppins"
-            visible: !virtualstudio.noUpdater
+            enabled: !virtualstudio.noUpdater
         }
 
         Text {
@@ -428,7 +423,6 @@ Item {
             text: "Update Channel"
             font { family: "Poppins"; pixelSize: fontMedium * virtualstudio.fontScale * virtualstudio.uiScale }
             color: textColour
-            visible: !virtualstudio.noUpdater
         }
 
         ComboBox {
@@ -440,7 +434,7 @@ Item {
                 audio.audioBackend = currentText
                 audio.restartAudio();
             }
-            x: 234 * virtualstudio.uiScale; y: updateChannelCombo.y + (48 * virtualstudio.uiScale)
+            x: 220 * virtualstudio.uiScale; y: updateChannelCombo.y + (48 * virtualstudio.uiScale)
             width: updateChannelCombo.width; height: updateChannelCombo.height
         }
 
@@ -455,7 +449,7 @@ Item {
 
         ComboBox {
             id: bufferCombo
-            x: 234 * virtualstudio.uiScale; y: backendCombo.y + (48 * virtualstudio.uiScale)
+            x: 220 * virtualstudio.uiScale; y: backendCombo.y + (48 * virtualstudio.uiScale)
             width: backendCombo.width; height: updateChannelCombo.height
             model: audio.bufferSizeComboModel
             currentIndex: getCurrentBufferSizeIndex()
@@ -764,6 +758,13 @@ Item {
                 color: Boolean(audio.devicesError) ? disabledButtonTextColour : textColour
             }
         }
+
+        DeviceWarning {
+            id: deviceWarning
+            x: (0.2 * window.width) + 16 * virtualstudio.uiScale
+            anchors.verticalCenter: parent.verticalCenter
+            visible: Boolean(audio.devicesError) || Boolean(audio.devicesWarning)
+        }
     }
 
     Connections {
index dc045307901941b80517731d23c4629b0bc630be..16b566208d214b7395ad8a6b92b10cac7229c938 100644 (file)
@@ -13,6 +13,7 @@ Item {
     property int leftMargin: 48
     property int rightMargin: 16
 
+    property string strokeColor: virtualstudio.darkMode ? "#80827D7D" : "#34979797"
     property string textColour: virtualstudio.darkMode ? "#FAFBFB" : "#0F0D0D"
     property string buttonColour: virtualstudio.darkMode ? "#494646" : "#EAECEC"
     property string buttonHoverColour: virtualstudio.darkMode ? "#5B5858" : "#D3D4D4"  
@@ -37,7 +38,8 @@ Item {
 
         Text {
             id: pageTitle
-            x: 16 * virtualstudio.uiScale; y: 32 * virtualstudio.uiScale
+            x: 16 * virtualstudio.uiScale;
+            y: 16 * virtualstudio.uiScale
             text: "Choose your audio devices"
             font { family: "Poppins"; weight: Font.Bold; pixelSize: fontBig * virtualstudio.fontScale * virtualstudio.uiScale }
             color: textColour
@@ -66,108 +68,132 @@ Item {
             id: audioSettings
             width: parent.width
             anchors.top: pageTitle.bottom
-            anchors.topMargin: 24 * virtualstudio.uiScale
+            anchors.topMargin: 16 * virtualstudio.uiScale
         }
 
-        Button {
-            id: backButton
-            background: Rectangle {
-                radius: 6 * virtualstudio.uiScale
-                color: backButton.down ? buttonPressedColour : buttonColour
-                border.width: 1
-                border.color: backButton.down || backButton.hovered ? buttonPressedStroke : buttonStroke
-            }
-            onClicked: { virtualstudio.windowState = "browse"; virtualstudio.studioToJoin = ""; audio.stopAudio(); }
-            anchors.left: parent.left
-            anchors.leftMargin: 16 * virtualstudio.uiScale
-            anchors.bottomMargin: rightMargin * virtualstudio.uiScale
-            anchors.bottom: parent.bottom
-            width: 150 * virtualstudio.uiScale; height: 30 * virtualstudio.uiScale
-            Text {
-                text: "Back"
-                font.family: "Poppins"
-                font.pixelSize: fontSmall * virtualstudio.fontScale * virtualstudio.uiScale
-                color: textColour
-                anchors.horizontalCenter: parent.horizontalCenter
-                anchors.verticalCenter: parent.verticalCenter
-            }
+        Rectangle {
+            id: headerBorder
+            width: parent.width
+            height: 1
+            anchors.top: audioSettings.top
+            color: strokeColor
         }
 
-        DeviceWarning {
-            id: deviceWarning
-            anchors.left: backButton.right
-            anchors.leftMargin: 16 * virtualstudio.uiScale
-            anchors.verticalCenter: backButton.verticalCenter
-            visible: Boolean(audio.devicesError) || Boolean(audio.devicesWarning)
+        Rectangle {
+            id: footerBorder
+            width: parent.width
+            height: 1
+            anchors.top: audioSettings.bottom
+            color: strokeColor
         }
 
-        Button {
-            id: saveButton
-            background: Rectangle {
-                radius: 6 * virtualstudio.uiScale
-                color: saveButton.down ? saveButtonPressedColour : saveButtonBackgroundColour
-                border.width: 1
-                border.color: saveButton.down || saveButton.hovered ? saveButtonPressedStroke : saveButtonStroke
-            }
-            enabled: !Boolean(audio.devicesError) && audio.backendAvailable && audio.audioReady
-            onClicked: {
-                audio.stopAudio(true);
-                virtualstudio.windowState = "connected";
-                virtualstudio.saveSettings();
-                virtualstudio.joinStudio();
+        Rectangle {
+            property int footerHeight: (30 + (rightMargin * 2)) * virtualstudio.uiScale;
+            x: -1; y: parent.height - footerHeight;
+            width: parent.width; height: footerHeight;
+            border.color: "#33979797"
+            color: backgroundColour
+
+            Button {
+                id: backButton
+                background: Rectangle {
+                    radius: 6 * virtualstudio.uiScale
+                    color: backButton.down ? buttonPressedColour : buttonColour
+                    border.width: 1
+                    border.color: backButton.down || backButton.hovered ? buttonPressedStroke : buttonStroke
+                }
+                onClicked: { virtualstudio.windowState = "browse"; virtualstudio.studioToJoin = ""; audio.stopAudio(); }
+                anchors.left: parent.left
+                anchors.leftMargin: 16 * virtualstudio.uiScale
+                anchors.bottomMargin: rightMargin * virtualstudio.uiScale
+                anchors.bottom: parent.bottom
+                width: 150 * virtualstudio.uiScale; height: 30 * virtualstudio.uiScale
+                Text {
+                    text: "Back"
+                    font.family: "Poppins"
+                    font.pixelSize: fontSmall * virtualstudio.fontScale * virtualstudio.uiScale
+                    color: textColour
+                    anchors.horizontalCenter: parent.horizontalCenter
+                    anchors.verticalCenter: parent.verticalCenter
+                }
             }
-            anchors.right: parent.right
-            anchors.rightMargin: rightMargin * virtualstudio.uiScale
-            anchors.bottomMargin: rightMargin * virtualstudio.uiScale
-            anchors.bottom: parent.bottom
-            width: 150 * virtualstudio.uiScale; height: 30 * virtualstudio.uiScale
-            Text {
-                text: "Connect to Studio"
-                font.family: "Poppins"
-                font.pixelSize: fontSmall * virtualstudio.fontScale * virtualstudio.uiScale
-                font.weight: Font.Bold
-                color: !Boolean(audio.devicesError) && audio.backendAvailable && audio.audioReady ? saveButtonText : disabledButtonText
-                anchors.horizontalCenter: parent.horizontalCenter
-                anchors.verticalCenter: parent.verticalCenter
+
+            DeviceWarning {
+                id: deviceWarning
+                anchors.left: backButton.right
+                anchors.leftMargin: 16 * virtualstudio.uiScale
+                anchors.verticalCenter: backButton.verticalCenter
+                visible: Boolean(audio.devicesError) || Boolean(audio.devicesWarning)
             }
-        }
 
-        CheckBox {
-            id: showAgainCheckbox
-            checked: virtualstudio.showDeviceSetup
-            visible: audio.backendAvailable
-            text: qsTr("Ask again next time")
-            anchors.right: saveButton.left
-            anchors.rightMargin: 16 * virtualstudio.uiScale
-            anchors.verticalCenter: saveButton.verticalCenter
-            onClicked: { virtualstudio.showDeviceSetup = showAgainCheckbox.checkState == Qt.Checked }
-            indicator: Rectangle {
-                implicitWidth: 16 * virtualstudio.uiScale
-                implicitHeight: 16 * virtualstudio.uiScale
-                x: showAgainCheckbox.leftPadding
-                y: parent.height / 2 - height / 2
-                radius: 3 * virtualstudio.uiScale
-                border.color: showAgainCheckbox.down || showAgainCheckbox.hovered ? checkboxPressedStroke : checkboxStroke
-
-                Rectangle {
-                    width: 10 * virtualstudio.uiScale
-                    height: 10 * virtualstudio.uiScale
-                    x: 3 * virtualstudio.uiScale
-                    y: 3 * virtualstudio.uiScale
-                    radius: 2 * virtualstudio.uiScale
-                    color: showAgainCheckbox.down || showAgainCheckbox.hovered ? checkboxPressedStroke : checkboxStroke
-                    visible: showAgainCheckbox.checked
+            Button {
+                id: saveButton
+                background: Rectangle {
+                    radius: 6 * virtualstudio.uiScale
+                    color: saveButton.down ? saveButtonPressedColour : saveButtonBackgroundColour
+                    border.width: 1
+                    border.color: saveButton.down || saveButton.hovered ? saveButtonPressedStroke : saveButtonStroke
+                }
+                enabled: !Boolean(audio.devicesError) && audio.backendAvailable && audio.audioReady
+                onClicked: {
+                    audio.stopAudio(true);
+                    virtualstudio.windowState = "connected";
+                    virtualstudio.saveSettings();
+                    virtualstudio.joinStudio();
+                }
+                anchors.right: parent.right
+                anchors.rightMargin: rightMargin * virtualstudio.uiScale
+                anchors.bottomMargin: rightMargin * virtualstudio.uiScale
+                anchors.bottom: parent.bottom
+                width: 150 * virtualstudio.uiScale; height: 30 * virtualstudio.uiScale
+                Text {
+                    text: "Connect to Studio"
+                    font.family: "Poppins"
+                    font.pixelSize: fontSmall * virtualstudio.fontScale * virtualstudio.uiScale
+                    font.weight: Font.Bold
+                    color: !Boolean(audio.devicesError) && audio.backendAvailable && audio.audioReady ? saveButtonText : disabledButtonText
+                    anchors.horizontalCenter: parent.horizontalCenter
+                    anchors.verticalCenter: parent.verticalCenter
                 }
             }
 
-            contentItem: Text {
-                text: showAgainCheckbox.text
-                font.family: "Poppins"
-                font.pixelSize: 10 * virtualstudio.fontScale * virtualstudio.uiScale
-                anchors.horizontalCenter: parent.horizontalCenter
-                anchors.verticalCenter: parent.verticalCenter
-                leftPadding: showAgainCheckbox.indicator.width + showAgainCheckbox.spacing
-                color: textColour
+            CheckBox {
+                id: showAgainCheckbox
+                checked: virtualstudio.showDeviceSetup
+                visible: audio.backendAvailable
+                text: qsTr("Ask again next time")
+                anchors.right: saveButton.left
+                anchors.rightMargin: 16 * virtualstudio.uiScale
+                anchors.verticalCenter: saveButton.verticalCenter
+                onClicked: { virtualstudio.showDeviceSetup = showAgainCheckbox.checkState == Qt.Checked }
+                indicator: Rectangle {
+                    implicitWidth: 16 * virtualstudio.uiScale
+                    implicitHeight: 16 * virtualstudio.uiScale
+                    x: showAgainCheckbox.leftPadding
+                    y: parent.height / 2 - height / 2
+                    radius: 3 * virtualstudio.uiScale
+                    border.color: showAgainCheckbox.down || showAgainCheckbox.hovered ? checkboxPressedStroke : checkboxStroke
+
+                    Rectangle {
+                        width: 10 * virtualstudio.uiScale
+                        height: 10 * virtualstudio.uiScale
+                        x: 3 * virtualstudio.uiScale
+                        y: 3 * virtualstudio.uiScale
+                        radius: 2 * virtualstudio.uiScale
+                        color: showAgainCheckbox.down || showAgainCheckbox.hovered ? checkboxPressedStroke : checkboxStroke
+                        visible: showAgainCheckbox.checked
+                    }
+                }
+
+                contentItem: Text {
+                    text: showAgainCheckbox.text
+                    font.family: "Poppins"
+                    font.pixelSize: 10 * virtualstudio.fontScale * virtualstudio.uiScale
+                    anchors.horizontalCenter: parent.horizontalCenter
+                    anchors.verticalCenter: parent.verticalCenter
+                    leftPadding: showAgainCheckbox.indicator.width + showAgainCheckbox.spacing
+                    color: textColour
+                }
             }
         }
     }
diff --git a/src/gui/close.svg b/src/gui/close.svg
new file mode 100644 (file)
index 0000000..e96a4e7
--- /dev/null
@@ -0,0 +1 @@
+<svg xmlns="http://www.w3.org/2000/svg" height="24" viewBox="0 0 24 24" width="24"><path d="M0 0h24v24H0z" fill="none"/><path d="M19 6.41L17.59 5 12 10.59 6.41 5 5 6.41 10.59 12 5 17.59 6.41 19 12 13.41 17.59 19 19 17.59 13.41 12z"/></svg>
\ No newline at end of file
index 574dbdf88fcaed3298c05d33f2ddcb76ac2be012..8ef3a189b57feea70ccdb3870c6253c320fb05d6 100644 (file)
@@ -67,6 +67,7 @@
     <file>Prompt.svg</file>
     <file>network.svg</file>
     <file>video.svg</file>
+    <file>close.svg</file>
     <file>jacktrip.png</file>
     <file>jacktrip white.png</file>
     <file>JTOriginal.png</file>
index 9d0ed18e1be83cf6d641e119bbec504dbbd8b641..78c3fed14a55cc244231d92073cb60f084be6448 100644 (file)
@@ -845,6 +845,9 @@ void VirtualStudio::completeConnection()
         return;
     }
 
+    // always connect with audio device controls open
+    setCollapseDeviceControls(false);
+
     m_jackTripRunning = true;
     m_connectionState = QStringLiteral("Preparing audio...");
     emit connectionStateChanged();
@@ -879,6 +882,12 @@ void VirtualStudio::completeConnection()
         int buffer_strategy = m_audioConfigPtr->getBufferStrategy() + 1;
         // adjust buffer_strategy for PLC "auto" mode menu item
         if (buffer_strategy == 3) {
+            // run PLC without worker (4)
+            buffer_strategy = 4;
+            /*
+            // I don't believe this is still necessary,
+            // after splitting the input and output RtAudio streams
+            // See https://github.com/jacktrip/jacktrip/pull/1235
             if (useRtAudio) {
                 // if same device for input and output,
                 // run PLC without worker (4)
@@ -890,6 +899,7 @@ void VirtualStudio::completeConnection()
                 // run PLC without worker (4)
                 buffer_strategy = 4;
             }
+            */
         } else if (buffer_strategy == 5) {
             buffer_strategy = 3;  // run PLC with worker (3)
         }
@@ -1136,7 +1146,14 @@ void VirtualStudio::handleDeeplinkRequest(const QUrl& link)
         return;
     }
 
-    // special case if on create_studio screen
+    // special case if on create_studio screen:
+    // note that the studio creation happens inside of the web view,
+    // and the app doesn't really know anything about it. we depend
+    // on the web app triggering a deep link join event, which is
+    // handled here. it's unlikely that the new studio has been
+    // noticed yet, so we don't join right away; otherwise we'd just
+    // get an unknown studio error. instead, we trigger a refresh and
+    // rely on it to kick off the join afterwards.
     if (m_windowState == "create_studio") {
         refreshStudios(0, true);
         if (showDeviceSetup()) {
@@ -1148,16 +1165,15 @@ void VirtualStudio::handleDeeplinkRequest(const QUrl& link)
         return;
     }
 
-    // special case if on browsing screen
-    if (m_windowState == "browse") {
-        setWindowState("connected");
-        joinStudio();
-        return;
-    }
-
-    if (m_windowState == "failed") {
-        setWindowState("connected");
-        joinStudio();
+    // special case if on browsing and failed screens
+    if (m_windowState == "browse" || m_windowState == "failed") {
+        if (showDeviceSetup()) {
+            setWindowState("setup");
+            m_audioConfigPtr->startAudio();
+        } else {
+            setWindowState("connected");
+            joinStudio();
+        }
         return;
     }
 
@@ -1309,8 +1325,9 @@ void VirtualStudio::processError(const QString& errorMessage)
     } else if (errorMessage.startsWith(RtAudioErrorMsg)) {
         if (errorMessage.length() > RtAudioErrorMsg.length() + 2) {
             const QString details(errorMessage.sliced(RtAudioErrorMsg.length() + 2));
-            if (details.startsWith(
-                    QStringLiteral("RtApiCore: the stream device was disconnected"))) {
+            if (details.contains(QStringLiteral("device was disconnected"))
+                || details.contains(
+                    QStringLiteral("Unable to retrieve capture buffer"))) {
                 msgBox.setText(QStringLiteral("Your audio interface was disconnected."));
             } else {
                 msgBox.setText(details);
@@ -1439,17 +1456,18 @@ void VirtualStudio::resetState()
 
 void VirtualStudio::getServerList(bool signalRefresh, int index)
 {
+    // only allow one thread to refresh at a time
     QMutexLocker refreshLock(&m_refreshMutex);
     if (m_refreshInProgress)
         return;
     m_refreshInProgress = true;
+    refreshLock.unlock();
 
     // Get the serverId of the server at the top of our screen if we know it
     QString topServerId;
-    if (index >= 0 && index < m_servers.count()) {
-        topServerId = m_servers.at(index)->id();
+    if (index >= 0 && index < m_serverModel.count()) {
+        topServerId = m_serverModel.at(index)->id();
     }
-    refreshLock.unlock();
 
     QNetworkReply* reply = m_api->getServers();
     connect(
@@ -1458,9 +1476,9 @@ void VirtualStudio::getServerList(bool signalRefresh, int index)
                 if (signalRefresh) {
                     emit refreshFinished(index);
                 }
-                std::cout << "Error: " << reply->errorString().toStdString() << std::endl;
+                std::cerr << "Error: " << reply->errorString().toStdString() << std::endl;
                 reply->deleteLater();
-                QMutexLocker getServersLock(&m_refreshMutex);
+                QMutexLocker refreshLock(&m_refreshMutex);
                 m_refreshInProgress = false;
                 return;
             }
@@ -1472,10 +1490,8 @@ void VirtualStudio::getServerList(bool signalRefresh, int index)
                 if (signalRefresh) {
                     emit refreshFinished(index);
                 }
-                std::cout << "Error: Not an array" << std::endl;
-                QMutexLocker locker(&m_refreshMutex);
-                m_refreshInProgress = false;
-                QMutexLocker getServersLock(&m_refreshMutex);
+                std::cerr << "Error: Not an array" << std::endl;
+                QMutexLocker refreshLock(&m_refreshMutex);
                 m_refreshInProgress = false;
                 return;
             }
@@ -1487,75 +1503,75 @@ void VirtualStudio::getServerList(bool signalRefresh, int index)
             QVector<VsServerInfoPointer> pubServers;
             int skippedStudios = 0;
 
+            QMutexLocker refreshLock(&m_refreshMutex);  // protect m_servers
+            m_servers.clear();
             for (int i = 0; i < servers.count(); i++) {
                 if (servers.at(i)[QStringLiteral("type")].toString().contains(
                         QStringLiteral("JackTrip"))) {
                     QSharedPointer<VsServerInfo> serverInfo(new VsServerInfo(this));
                     serverInfo->setIsAdmin(
                         servers.at(i)[QStringLiteral("admin")].toBool());
-                    QString status = servers.at(i)[QStringLiteral("status")].toString();
-                    bool activeStudio = status == QLatin1String("Ready");
-                    bool hostedStudio = servers.at(i)[QStringLiteral("managed")].toBool();
-                    // Only iterate through servers that we want to show
-                    if (!m_showSelfHosted && !hostedStudio) {
-                        if (activeStudio || (serverInfo->isAdmin())) {
-                            skippedStudios++;
-                        }
-                        continue;
-                    }
-                    if (!m_showInactive && !activeStudio) {
-                        if (serverInfo->isAdmin()) {
-                            skippedStudios++;
-                        }
-                        continue;
-                    }
-                    if (activeStudio || m_showInactive) {
-                        serverInfo->setName(
-                            servers.at(i)[QStringLiteral("name")].toString());
-                        serverInfo->setHost(
-                            servers.at(i)[QStringLiteral("serverHost")].toString());
-                        serverInfo->setIsManaged(
-                            servers.at(i)[QStringLiteral("managed")].toBool());
-                        serverInfo->setStatus(
-                            servers.at(i)[QStringLiteral("status")].toString());
-                        serverInfo->setPort(
-                            servers.at(i)[QStringLiteral("serverPort")].toInt());
-                        serverInfo->setIsPublic(
-                            servers.at(i)[QStringLiteral("public")].toBool());
-                        serverInfo->setRegion(
-                            servers.at(i)[QStringLiteral("region")].toString());
-                        serverInfo->setPeriod(
-                            servers.at(i)[QStringLiteral("period")].toInt());
-                        serverInfo->setSampleRate(
-                            servers.at(i)[QStringLiteral("sampleRate")].toInt());
-                        serverInfo->setQueueBuffer(
-                            servers.at(i)[QStringLiteral("queueBuffer")].toInt());
-                        serverInfo->setBannerURL(
-                            servers.at(i)[QStringLiteral("bannerURL")].toString());
-                        serverInfo->setId(servers.at(i)[QStringLiteral("id")].toString());
-                        serverInfo->setSessionId(
-                            servers.at(i)[QStringLiteral("sessionId")].toString());
-                        serverInfo->setInviteKey(
-                            servers.at(i)[QStringLiteral("inviteKey")].toString());
-                        serverInfo->setCloudId(
-                            servers.at(i)[QStringLiteral("cloudId")].toString());
-                        serverInfo->setEnabled(
-                            servers.at(i)[QStringLiteral("enabled")].toBool());
-                        serverInfo->setIsOwner(
-                            servers.at(i)[QStringLiteral("owner")].toBool());
-                        if (servers.at(i)[QStringLiteral("owner")].toBool()) {
+                    serverInfo->setName(servers.at(i)[QStringLiteral("name")].toString());
+                    serverInfo->setHost(
+                        servers.at(i)[QStringLiteral("serverHost")].toString());
+                    serverInfo->setIsManaged(
+                        servers.at(i)[QStringLiteral("managed")].toBool());
+                    serverInfo->setStatus(
+                        servers.at(i)[QStringLiteral("status")].toString());
+                    serverInfo->setPort(
+                        servers.at(i)[QStringLiteral("serverPort")].toInt());
+                    serverInfo->setIsPublic(
+                        servers.at(i)[QStringLiteral("public")].toBool());
+                    serverInfo->setRegion(
+                        servers.at(i)[QStringLiteral("region")].toString());
+                    serverInfo->setPeriod(
+                        servers.at(i)[QStringLiteral("period")].toInt());
+                    serverInfo->setSampleRate(
+                        servers.at(i)[QStringLiteral("sampleRate")].toInt());
+                    serverInfo->setQueueBuffer(
+                        servers.at(i)[QStringLiteral("queueBuffer")].toInt());
+                    serverInfo->setBannerURL(
+                        servers.at(i)[QStringLiteral("bannerURL")].toString());
+                    serverInfo->setId(servers.at(i)[QStringLiteral("id")].toString());
+                    serverInfo->setSessionId(
+                        servers.at(i)[QStringLiteral("sessionId")].toString());
+                    serverInfo->setInviteKey(
+                        servers.at(i)[QStringLiteral("inviteKey")].toString());
+                    serverInfo->setCloudId(
+                        servers.at(i)[QStringLiteral("cloudId")].toString());
+                    serverInfo->setEnabled(
+                        servers.at(i)[QStringLiteral("enabled")].toBool());
+                    serverInfo->setIsOwner(
+                        servers.at(i)[QStringLiteral("owner")].toBool());
+
+                    // Always add servers to m_servers
+                    m_servers.append(serverInfo);
+
+                    // Only add servers to the model that we want to show
+                    if (serverInfo->isAdmin() || serverInfo->isOwner()) {
+                        if (filterStudio(*serverInfo)) {
+                            ++skippedStudios;
+                        } else {
                             yourServers.append(serverInfo);
                             serverInfo->setSection(VsServerInfo::YOUR_STUDIOS);
-                        } else if (m_subscribedServers.contains(serverInfo->id())) {
+                        }
+                    } else if (m_subscribedServers.contains(serverInfo->id())) {
+                        if (filterStudio(*serverInfo)) {
+                            ++skippedStudios;
+                        } else {
                             subServers.append(serverInfo);
                             serverInfo->setSection(VsServerInfo::SUBSCRIBED_STUDIOS);
-                        } else {
+                        }
+                    } else {
+                        if (!filterStudio(*serverInfo)) {
                             pubServers.append(serverInfo);
                             serverInfo->setSection(VsServerInfo::PUBLIC_STUDIOS);
                         }
+                        // don't count public studios in skipped count
                     }
                 }
             }
+            refreshLock.unlock();
 
             // sort studios in each section by name
             auto serverSorter = [](VsServerInfoPointer first,
@@ -1572,37 +1588,36 @@ void VirtualStudio::getServerList(bool signalRefresh, int index)
                 if (subServers.isEmpty()) {
                     m_logoSection = QStringLiteral("Public Studios");
 
-                    if (pubServers.isEmpty() && skippedStudios == 0) {
+                    if (skippedStudios == 0) {
                         // This is a new user
                         setShowCreateStudio(true);
                     } else {
-                        // This is not a new user.
-                        // Set to false in case the studio created since refreshing.
+                        // This is not a new user. One or more studios were filtered.
                         setShowCreateStudio(false);
                     }
                 } else {
                     m_logoSection = QStringLiteral("Subscribed Studios");
                 }
-                emit logoSectionChanged();
             } else {
                 m_logoSection = QStringLiteral("Your Studios");
-                emit logoSectionChanged();
             }
+            emit logoSectionChanged();
 
-            QMutexLocker getServersLock(&m_refreshMutex);
-            m_servers.clear();
-            m_servers.append(yourServers);
-            m_servers.append(subServers);
-            m_servers.append(pubServers);
             m_serverModel.clear();
-            for (const VsServerInfoPointer& s : m_servers) {
+            for (const VsServerInfoPointer& s : yourServers) {
+                m_serverModel.append(s.get());
+            }
+            for (const VsServerInfoPointer& s : subServers) {
+                m_serverModel.append(s.get());
+            }
+            for (const VsServerInfoPointer& s : pubServers) {
                 m_serverModel.append(s.get());
             }
             emit serverModelChanged();
             int index = -1;
             if (!topServerId.isEmpty()) {
-                for (int i = 0; i < m_servers.count(); i++) {
-                    if (m_servers.at(i)->id() == topServerId) {
+                for (int i = 0; i < m_serverModel.count(); i++) {
+                    if (m_serverModel.at(i)->id() == topServerId) {
                         index = i;
                         break;
                     }
@@ -1622,6 +1637,20 @@ void VirtualStudio::getServerList(bool signalRefresh, int index)
         });
 }
 
+bool VirtualStudio::filterStudio(const VsServerInfo& serverInfo) const
+{
+    // Return true if we want to filter the studio out of the display model
+    bool activeStudio = serverInfo.status() == QLatin1String("Ready");
+    bool hostedStudio = serverInfo.isManaged();
+    if (!m_showSelfHosted && !hostedStudio) {
+        return true;
+    }
+    if (!m_showInactive && !activeStudio) {
+        return true;
+    }
+    return false;
+}
+
 void VirtualStudio::getSubscriptions()
 {
     if (m_userId.isEmpty()) {
index 036b80857fe2e1e7b015c2c4b4c2b60f448f62ab..d665166dd4a27ca5489346ce577dc5d1b13c44c4 100644 (file)
@@ -260,6 +260,7 @@ class VirtualStudio : public QObject
    private:
     void resetState();
     void getServerList(bool signalRefresh = false, int index = -1);
+    bool filterStudio(const VsServerInfo& serverInfo) const;
     void getSubscriptions();
     void getRegions();
     void getUserMetadata();
index 96b584e46339d5f3e7cfb4c0b63946bd4a635783..0244771ab572278ac7dd5888823d0e826f0c887c 100644 (file)
@@ -798,10 +798,6 @@ AudioInterface* VsAudio::newAudioInterface(JackTrip* jackTripPtr)
 {
     AudioInterface* ifPtr = nullptr;
 
-#if defined(__unix__)
-    AudioInterface::setPipewireLatency(getBufferSize(), m_sampleRate);
-#endif
-
     // Create AudioInterface Client Object
     if (isBackendAvailable<AudioInterfaceMode::ALL>() && jackIsAvailable()) {
         // all backends area available
@@ -851,7 +847,7 @@ AudioInterface* VsAudio::newAudioInterface(JackTrip* jackTripPtr)
         setBufferSize(ifPtr->getBufferSizeInSamples());
     }
 
-    std::cout << "The Sampling Rate is: " << m_sampleRate << std::endl;
+    std::cout << "The Sampling Rate is: " << ifPtr->getSampleRate() << std::endl;
     std::cout << gPrintSeparator << std::endl;
     int AudioBufferSizeInBytes = ifPtr->getBufferSizeInSamples() * sizeof(sample_t);
     std::cout << "The Audio Buffer Size is: " << ifPtr->getBufferSizeInSamples()
@@ -892,9 +888,12 @@ AudioInterface* VsAudio::newJackAudioInterface([[maybe_unused]] JackTrip* jackTr
         ifPtr = new JackAudioInterface(inputChans, outputChans, m_audioBitResolution,
                                        jackTripPtr != nullptr, jackTripPtr);
         ifPtr->setClientName(QStringLiteral("JackTrip"));
+#if defined(__unix__)
+        AudioInterface::setPipewireLatency(
+            getBufferSize(),
+            jackTripPtr == nullptr ? 44100 : jackTripPtr->getSampleRate());
+#endif
         ifPtr->setup(true);
-
-        m_sampleRate = ifPtr->getSampleRate();
     }
 #endif
     return ifPtr;
@@ -920,7 +919,7 @@ AudioInterface* VsAudio::newRtAudioInterface([[maybe_unused]] JackTrip* jackTrip
         inputChans, outputChans,
         static_cast<AudioInterface::inputMixModeT>(getInputMixMode()),
         m_audioBitResolution, jackTripPtr != nullptr, jackTripPtr);
-    ifPtr->setSampleRate(m_sampleRate);
+    ifPtr->setSampleRate(jackTripPtr == nullptr ? 44100 : jackTripPtr->getSampleRate());
     ifPtr->setInputDevice(getInputDevice().toStdString());
     ifPtr->setOutputDevice(getOutputDevice().toStdString());
     ifPtr->setBufferSizeInSamples(getBufferSize());
@@ -929,6 +928,10 @@ AudioInterface* VsAudio::newRtAudioInterface([[maybe_unused]] JackTrip* jackTrip
     if (!devices.empty())
         static_cast<RtAudioInterface*>(ifPtr)->setRtAudioDevices(devices);
 
+#if defined(__unix__)
+    AudioInterface::setPipewireLatency(getBufferSize(), ifPtr->getSampleRate());
+#endif
+
     // Note: setup might change the number of channels and/or buffer size
     ifPtr->setup(true);
 
index 298d2115becb65203a38677d2c72499658e7cbf6..e6e68ddd0046af097a751a68603ca9d7389663d3 100644 (file)
@@ -343,7 +343,6 @@ class VsAudio : public QObject
     float m_inMultiplier    = 1.0;
     float m_outMultiplier   = 1.0;
     float m_monMultiplier   = 0;
-    uint32_t m_sampleRate   = gDefaultSampleRate;
 
     QString m_inputDevice;
     QString m_outputDevice;
index 31e1ad930c1f5a982477a4aea40a2513dfbf091d..99d1443cc5b9a7c9099bb32a0c06ab619d4346cd 100644 (file)
@@ -293,12 +293,12 @@ JackTrip* VsDevice::initJackTrip(
 #ifdef RT_AUDIO
     if (useRtAudio) {
         m_jackTrip->setAudiointerfaceMode(JackTrip::RTAUDIO);
-        m_jackTrip->setSampleRate(studioInfo->sampleRate());
         m_jackTrip->setAudioBufferSizeInSamples(bufferSize);
         m_jackTrip->setInputDevice(input);
         m_jackTrip->setOutputDevice(output);
     }
 #endif
+    m_jackTrip->setSampleRate(studioInfo->sampleRate());
     int bindPort = selectBindPort();
     if (bindPort == 0) {
         return 0;
index 199a5c7236d90098a68299a912887b9250275e93..bebe0988d3668169370e343b10383b94012ba545 100644 (file)
@@ -40,7 +40,7 @@
 
 #include "AudioInterface.h"
 
-constexpr const char* const gVersion = "2.1.0";  ///< JackTrip version
+constexpr const char* const gVersion = "2.2.0";  ///< JackTrip version
 
 //*******************************************************************************
 /// \name Default Values
diff --git a/subprojects/packagefiles/rtaudio-remove-input-disconnect-listener.patch b/subprojects/packagefiles/rtaudio-remove-input-disconnect-listener.patch
new file mode 100644 (file)
index 0000000..8809fd4
--- /dev/null
@@ -0,0 +1,11 @@
+--- a/RtAudio.cpp      2024-01-11 13:04:29.148565300 -0800
++++ b/RtAudio.cpp      2024-01-11 13:04:42.305228600 -0800
+@@ -1981,7 +1981,7 @@
+         }
+       }
+-      if ( handle->disconnectListenerAdded[0] ) {
++      if ( handle->disconnectListenerAdded[1] ) {
+         property.mSelector = kAudioDevicePropertyDeviceIsAlive;
+         if (AudioObjectRemovePropertyListener( handle->id[1], &property, streamDisconnectListener, (void *) &stream_.callbackInfo ) != noErr) {
+           errorText_ = "RtApiCore::closeStream(): error removing disconnect property listener!";
index a57365a9abd8a2a0a2bad4f1e1b2a60290e65e43..68eadc60ac2a1f88e2c265943fcc0f8fc8905315 100644 (file)
@@ -3,6 +3,7 @@ directory = rtaudio-6.0.1
 source_url = https://github.com/thestk/rtaudio/archive/refs/tags/6.0.1.tar.gz
 source_filename = 6.0.1.tar.gz
 source_hash = 7206c8b6cee43b474f43d64988fefaadfdcfc4264ed38d8de5f5d0e6ddb0a123
+diff_files = rtaudio-remove-input-disconnect-listener.patch
 
 [provide]
 dependency_names = rtaudio