Location via proxy:   [ UP ]  
[Report a bug]   [Manage cookies]                
aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
Diffstat (limited to 'examples')
-rw-r--r--examples/graphs/2d/graphsaudio/GraphsAudio/Main.qml50
-rw-r--r--examples/graphs/2d/graphsaudio/GraphsAudio/qmldir2
-rw-r--r--examples/graphs/2d/graphsaudio/doc/graphsaudio.rst8
-rw-r--r--examples/graphs/2d/graphsaudio/doc/graphsaudio.webpbin0 -> 12908 bytes
-rw-r--r--examples/graphs/2d/graphsaudio/graphsaudio.pyproject3
-rw-r--r--examples/graphs/2d/graphsaudio/main.py80
6 files changed, 143 insertions, 0 deletions
diff --git a/examples/graphs/2d/graphsaudio/GraphsAudio/Main.qml b/examples/graphs/2d/graphsaudio/GraphsAudio/Main.qml
new file mode 100644
index 000000000..51bf3ef12
--- /dev/null
+++ b/examples/graphs/2d/graphsaudio/GraphsAudio/Main.qml
@@ -0,0 +1,50 @@
+// Copyright (C) 2025 The Qt Company Ltd.
+// SPDX-License-Identifier: LicenseRef-Qt-Commercial OR BSD-3-Clause
+
+import QtQuick
+import QtQuick.Controls
+import QtGraphs
+
+ApplicationWindow {
+ visible: true
+ width: 1000
+ height: 800
+ title: "Data from the microphone (" + device_name + ")"
+
+ GraphsView {
+ id: graph
+ anchors.fill: parent
+
+ LineSeries {
+ id: audio_series
+ width: 2
+ color: "#007acc"
+ }
+
+ axisX: ValueAxis {
+ min: 0
+ max: 2000
+ tickInterval : 500
+ labelFormat: "%g"
+ titleText: "Samples"
+ }
+
+ axisY: ValueAxis {
+ min: -1
+ max: 1
+ tickInterval : 0.5
+ labelFormat: "%0.1f"
+ titleText: "Audio level"
+ }
+ }
+
+ Connections {
+ target: audio_bridge
+ function onDataUpdated(buffer) {
+ audio_series.clear()
+ for (let i = 0; i < buffer.length; ++i) {
+ audio_series.append(buffer[i])
+ }
+ }
+ }
+}
diff --git a/examples/graphs/2d/graphsaudio/GraphsAudio/qmldir b/examples/graphs/2d/graphsaudio/GraphsAudio/qmldir
new file mode 100644
index 000000000..cc5408a66
--- /dev/null
+++ b/examples/graphs/2d/graphsaudio/GraphsAudio/qmldir
@@ -0,0 +1,2 @@
+module GraphsAudio
+Main 1.0 Main.qml
diff --git a/examples/graphs/2d/graphsaudio/doc/graphsaudio.rst b/examples/graphs/2d/graphsaudio/doc/graphsaudio.rst
new file mode 100644
index 000000000..f19b28caf
--- /dev/null
+++ b/examples/graphs/2d/graphsaudio/doc/graphsaudio.rst
@@ -0,0 +1,8 @@
+GraphsAudio Example
+===================
+
+This example shows the drawing of dynamic data (microphone input) using QtGraphs and Qml.
+
+.. image:: graphsaudio.webp
+ :width: 400
+ :alt: GraphsAudio Screenshot
diff --git a/examples/graphs/2d/graphsaudio/doc/graphsaudio.webp b/examples/graphs/2d/graphsaudio/doc/graphsaudio.webp
new file mode 100644
index 000000000..bb57b18e5
--- /dev/null
+++ b/examples/graphs/2d/graphsaudio/doc/graphsaudio.webp
Binary files differ
diff --git a/examples/graphs/2d/graphsaudio/graphsaudio.pyproject b/examples/graphs/2d/graphsaudio/graphsaudio.pyproject
new file mode 100644
index 000000000..eff791919
--- /dev/null
+++ b/examples/graphs/2d/graphsaudio/graphsaudio.pyproject
@@ -0,0 +1,3 @@
+{
+ "files": ["main.py", "GraphsAudio/Main.qml", "GraphsAudio/qmldir"]
+}
diff --git a/examples/graphs/2d/graphsaudio/main.py b/examples/graphs/2d/graphsaudio/main.py
new file mode 100644
index 000000000..239aee036
--- /dev/null
+++ b/examples/graphs/2d/graphsaudio/main.py
@@ -0,0 +1,80 @@
+# Copyright (C) 2025 The Qt Company Ltd.
+# SPDX-License-Identifier: LicenseRef-Qt-Commercial OR BSD-3-Clause
+from __future__ import annotations
+
+import sys
+from pathlib import Path
+from PySide6.QtCore import QObject, QPointF, Slot, Signal
+from PySide6.QtMultimedia import QAudioFormat, QAudioSource, QMediaDevices
+from PySide6.QtWidgets import QMessageBox
+from PySide6.QtQml import QQmlApplicationEngine
+from PySide6.QtGui import QGuiApplication
+
+
+SAMPLE_COUNT = 2000
+RESOLUTION = 4
+
+
+class Audio(QObject):
+ dataUpdated = Signal(list)
+
+ def __init__(self, device):
+ super().__init__()
+
+ format_audio = QAudioFormat()
+ format_audio.setSampleRate(8000)
+ format_audio.setChannelCount(1)
+ format_audio.setSampleFormat(QAudioFormat.UInt8)
+
+ self.device_name = device.description()
+
+ self._audio_input = QAudioSource(device, format_audio, self)
+ self._io_device = self._audio_input.start()
+ self._io_device.readyRead.connect(self._readyRead)
+
+ self._buffer = [QPointF(x, 0) for x in range(SAMPLE_COUNT)]
+
+ def closeEvent(self, event):
+ if self._audio_input is not None:
+ self._audio_input.stop()
+ event.accept()
+
+ @Slot()
+ def _readyRead(self):
+ data = self._io_device.readAll()
+ available_samples = data.size() // RESOLUTION
+ start = 0
+ if (available_samples < SAMPLE_COUNT):
+ start = SAMPLE_COUNT - available_samples
+ for s in range(start):
+ self._buffer[s].setY(self._buffer[s + available_samples].y())
+
+ data_index = 0
+ for s in range(start, SAMPLE_COUNT):
+ value = (ord(data[data_index]) - 128) / 128
+ self._buffer[s].setY(value)
+ data_index = data_index + RESOLUTION
+
+ self.dataUpdated.emit(self._buffer)
+
+
+if __name__ == '__main__':
+ app = QGuiApplication(sys.argv)
+ engine = QQmlApplicationEngine()
+
+ input_devices = QMediaDevices.audioInputs()
+ if not input_devices:
+ QMessageBox.warning(None, "audio", "There is no audio input device available.")
+ sys.exit(-1)
+
+ audio_bridge = Audio(input_devices[0])
+ engine.rootContext().setContextProperty("audio_bridge", audio_bridge)
+
+ device = input_devices[0]
+ device_name = device.description()
+ engine.rootContext().setContextProperty("device_name", device_name)
+
+ engine.addImportPath(Path(__file__).parent)
+ engine.loadFromModule("GraphsAudio", "Main")
+
+ sys.exit(app.exec())