/****************************************************************************
**
** Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
** All rights reserved.
** Contact: Nokia Corporation (qt-info@nokia.com)
**
** This file is part of the examples of the Qt Toolkit.
**
** $QT_BEGIN_LICENSE:BSD$
** You may use this file under the terms of the BSD license as follows:
**
** "Redistribution and use in source and binary forms, with or without
** modification, are permitted provided that the following conditions are
** met:
** * Redistributions of source code must retain the above copyright
** notice, this list of conditions and the following disclaimer.
** * Redistributions in binary form must reproduce the above copyright
** notice, this list of conditions and the following disclaimer in
** the documentation and/or other materials provided with the
** distribution.
** * Neither the name of Nokia Corporation and its Subsidiary(-ies) nor
** the names of its contributors may be used to endorse or promote
** products derived from this software without specific prior written
** permission.
**
** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
** OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
** LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
** DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
** (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
** OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
** $QT_END_LICENSE$
**
****************************************************************************/#include "engine.h"#include "tonegenerator.h"#include "utils.h"#include <math.h>#include <QCoreApplication>#include <QMetaObject>#include <QSet>#include <QtMultimedia/QAudioInput>#include <QtMultimedia/QAudioOutput>#include <QDebug>#include <QThread>#include <QFile>//-----------------------------------------------------------------------------// Constants//-----------------------------------------------------------------------------constqint64 BufferDurationUs =10*1000000;
constint NotifyIntervalMs =100;
// Size of the level calculation window in microsecondsconstint LevelWindowUs =0.1*1000000;
//-----------------------------------------------------------------------------// Helper functions//-----------------------------------------------------------------------------QDebug&operator<<(QDebug&debug,constQAudioFormat&format)
{
debug << format.frequency() <<"Hz"<< format.channels() <<"channels";
return debug;
}
//-----------------------------------------------------------------------------// Constructor and destructor//-----------------------------------------------------------------------------
Engine::Engine(QObject*parent)
: QObject(parent)
, m_mode(QAudio::AudioInput)
, m_state(QAudio::StoppedState)
, m_generateTone(false)
, m_file(0)
, m_analysisFile(0)
, m_availableAudioInputDevices
(QAudioDeviceInfo::availableDevices(QAudio::AudioInput))
, m_audioInputDevice(QAudioDeviceInfo::defaultInputDevice())
, m_audioInput(0)
, m_audioInputIODevice(0)
, m_recordPosition(0)
, m_availableAudioOutputDevices
(QAudioDeviceInfo::availableDevices(QAudio::AudioOutput))
, m_audioOutputDevice(QAudioDeviceInfo::defaultOutputDevice())
, m_audioOutput(0)
, m_playPosition(0)
, m_bufferPosition(0)
, m_bufferLength(0)
, m_dataLength(0)
, m_levelBufferLength(0)
, m_rmsLevel(0.0)
, m_peakLevel(0.0)
, m_spectrumBufferLength(0)
, m_spectrumAnalyser()
, m_spectrumPosition(0)
, m_count(0)
{
qRegisterMetaType<FrequencySpectrum>("FrequencySpectrum");
qRegisterMetaType<WindowFunction>("WindowFunction");
CHECKED_CONNECT(&m_spectrumAnalyser,
SIGNAL(spectrumChanged(FrequencySpectrum)),this,
SLOT(spectrumChanged(FrequencySpectrum)));
initialize();
#ifdef DUMP_DATA
createOutputDir();
#endif#ifdef DUMP_SPECTRUM
m_spectrumAnalyser.setOutputPath(outputPath());
#endif
}
Engine::~Engine()
{
}
//-----------------------------------------------------------------------------// Public functions//-----------------------------------------------------------------------------bool Engine::loadFile(constQString&fileName)
{
reset();
bool result =false;
Q_ASSERT(!m_generateTone);
Q_ASSERT(!m_file);
Q_ASSERT(!fileName.isEmpty());
m_file =new WavFile(this);
if (m_file->open(fileName)) {
if (isPCMS16LE(m_file->fileFormat())) {
result = initialize();
} else {
emit errorMessage(tr("Audio format not supported"),
formatToString(m_file->fileFormat()));
}
} else {
emit errorMessage(tr("Could not open file"), fileName);
}
if (result) {
m_analysisFile =new WavFile(this);
m_analysisFile->open(fileName);
}
return result;
}
bool Engine::generateTone(const Tone &tone)
{
reset();
Q_ASSERT(!m_generateTone);
Q_ASSERT(!m_file);
m_generateTone =true;
m_tone = tone;
ENGINE_DEBUG <<"Engine::generateTone"<<"startFreq"<< m_tone.startFreq
<<"endFreq"<< m_tone.endFreq
<<"amp"<< m_tone.amplitude;
return initialize();
}
bool Engine::generateSweptTone(qreal amplitude)
{
Q_ASSERT(!m_generateTone);
Q_ASSERT(!m_file);
m_generateTone =true;
m_tone.startFreq =1;
m_tone.endFreq =0;
m_tone.amplitude = amplitude;
ENGINE_DEBUG <<"Engine::generateSweptTone"<<"startFreq"<< m_tone.startFreq
<<"amp"<< m_tone.amplitude;
return initialize();
}
bool Engine::initializeRecord()
{
reset();
ENGINE_DEBUG <<"Engine::initializeRecord";
Q_ASSERT(!m_generateTone);
Q_ASSERT(!m_file);
m_generateTone =false;
m_tone = SweptTone();
return initialize();
}
qint64 Engine::bufferLength() const
{
return m_file ? m_file->size() : m_bufferLength;
}
void Engine::setWindowFunction(WindowFunction type)
{
m_spectrumAnalyser.setWindowFunction(type);
}
//-----------------------------------------------------------------------------// Public slots//-----------------------------------------------------------------------------void Engine::startRecording()
{
if (m_audioInput) {
if (QAudio::AudioInput == m_mode &&QAudio::SuspendedState == m_state) {
m_audioInput->resume();
} else {
m_spectrumAnalyser.cancelCalculation();
spectrumChanged(0,0, FrequencySpectrum());
m_buffer.fill(0);
setRecordPosition(0,true);
stopPlayback();
m_mode =QAudio::AudioInput;
CHECKED_CONNECT(m_audioInput, SIGNAL(stateChanged(QAudio::State)),this, SLOT(audioStateChanged(QAudio::State)));
CHECKED_CONNECT(m_audioInput, SIGNAL(notify()),this, SLOT(audioNotify()));
m_count =0;
m_dataLength =0;
emit dataLengthChanged(0);
m_audioInputIODevice = m_audioInput->start();
CHECKED_CONNECT(m_audioInputIODevice, SIGNAL(readyRead()),this, SLOT(audioDataReady()));
}
}
}
void Engine::startPlayback()
{
if (m_audioOutput) {
if (QAudio::AudioOutput == m_mode &&QAudio::SuspendedState == m_state) {
#ifdef Q_OS_WIN// The Windows backend seems to internally go back into ActiveState// while still returning SuspendedState, so to ensure that it doesn't// ignore the resume() call, we first re-suspend
m_audioOutput->suspend();
#endif
m_audioOutput->resume();
} else {
m_spectrumAnalyser.cancelCalculation();
spectrumChanged(0,0, FrequencySpectrum());
setPlayPosition(0,true);
stopRecording();
m_mode =QAudio::AudioOutput;
CHECKED_CONNECT(m_audioOutput, SIGNAL(stateChanged(QAudio::State)),this, SLOT(audioStateChanged(QAudio::State)));
CHECKED_CONNECT(m_audioOutput, SIGNAL(notify()),this, SLOT(audioNotify()));
m_count =0;
if (m_file) {
m_file->seek(0);
m_bufferPosition =0;
m_dataLength =0;
m_audioOutput->start(m_file);
} else {
m_audioOutputIODevice.close();
m_audioOutputIODevice.setBuffer(&m_buffer);
m_audioOutputIODevice.open(QIODevice::ReadOnly);
m_audioOutput->start(&m_audioOutputIODevice);
}
}
}
}
void Engine::suspend()
{
if (QAudio::ActiveState == m_state ||QAudio::IdleState == m_state) {
switch (m_mode) {
caseQAudio::AudioInput:
m_audioInput->suspend();
break;
caseQAudio::AudioOutput:
m_audioOutput->suspend();
break;
}
}
}
void Engine::setAudioInputDevice(constQAudioDeviceInfo&device)
{
if (device.deviceName() != m_audioInputDevice.deviceName()) {
m_audioInputDevice = device;
initialize();
}
}
void Engine::setAudioOutputDevice(constQAudioDeviceInfo&device)
{
if (device.deviceName() != m_audioOutputDevice.deviceName()) {
m_audioOutputDevice = device;
initialize();
}
}
//-----------------------------------------------------------------------------// Private slots//-----------------------------------------------------------------------------void Engine::audioNotify()
{
switch (m_mode) {
caseQAudio::AudioInput: {
constqint64 recordPosition =qMin(m_bufferLength, audioLength(m_format, m_audioInput->processedUSecs()));
setRecordPosition(recordPosition);
constqint64 levelPosition = m_dataLength - m_levelBufferLength;
if (levelPosition >=0)
calculateLevel(levelPosition, m_levelBufferLength);
if (m_dataLength >= m_spectrumBufferLength) {
constqint64 spectrumPosition = m_dataLength - m_spectrumBufferLength;
calculateSpectrum(spectrumPosition);
}
emit bufferChanged(0, m_dataLength, m_buffer);
}
break;
caseQAudio::AudioOutput: {
constqint64 playPosition = audioLength(m_format, m_audioOutput->processedUSecs());
setPlayPosition(qMin(bufferLength(), playPosition));
constqint64 levelPosition = playPosition - m_levelBufferLength;
constqint64 spectrumPosition = playPosition - m_spectrumBufferLength;
if (m_file) {
if (levelPosition > m_bufferPosition ||
spectrumPosition > m_bufferPosition ||qMax(m_levelBufferLength, m_spectrumBufferLength) > m_dataLength) {
m_bufferPosition =0;
m_dataLength =0;
// Data needs to be read into m_buffer in order to be analysedconstqint64 readPos =qMax(qint64(0),qMin(levelPosition, spectrumPosition));
constqint64 readEnd =qMin(m_analysisFile->size(),qMax(levelPosition + m_levelBufferLength, spectrumPosition + m_spectrumBufferLength));
constqint64 readLen = readEnd - readPos + audioLength(m_format, WaveformWindowDuration);
qDebug() <<"Engine::audioNotify [1]"<<"analysisFileSize"<< m_analysisFile->size()
<<"readPos"<< readPos
<<"readLen"<< readLen;
if (m_analysisFile->seek(readPos + m_analysisFile->headerLength())) {
m_buffer.resize(readLen);
m_bufferPosition = readPos;
m_dataLength = m_analysisFile->read(m_buffer.data(), readLen);
qDebug() <<"Engine::audioNotify [2]"<<"bufferPosition"<< m_bufferPosition <<"dataLength"<< m_dataLength;
} else {
qDebug() <<"Engine::audioNotify [2]"<<"file seek error";
}
emit bufferChanged(m_bufferPosition, m_dataLength, m_buffer);
}
} else {
if (playPosition >= m_dataLength)
stopPlayback();
}
if (levelPosition >=0&& levelPosition + m_levelBufferLength < m_bufferPosition + m_dataLength)
calculateLevel(levelPosition, m_levelBufferLength);
if (spectrumPosition >=0&& spectrumPosition + m_spectrumBufferLength < m_bufferPosition + m_dataLength)
calculateSpectrum(spectrumPosition);
}
break;
}
}
void Engine::audioStateChanged(QAudio::State state)
{
ENGINE_DEBUG <<"Engine::audioStateChanged from"<< m_state
<<"to"<< state;
if (QAudio::IdleState == state && m_file && m_file->pos() == m_file->size()) {
stopPlayback();
} else {
if (QAudio::StoppedState == state) {
// Check errorQAudio::Error error =QAudio::NoError;
switch (m_mode) {
caseQAudio::AudioInput:
error = m_audioInput->error();
break;
caseQAudio::AudioOutput:
error = m_audioOutput->error();
break;
}
if (QAudio::NoError != error) {
reset();
return;
}
}
setState(state);
}
}
void Engine::audioDataReady()
{
Q_ASSERT(0== m_bufferPosition);
constqint64 bytesReady = m_audioInput->bytesReady();
constqint64 bytesSpace = m_buffer.size() - m_dataLength;
constqint64 bytesToRead =qMin(bytesReady, bytesSpace);
constqint64 bytesRead = m_audioInputIODevice->read(
m_buffer.data() + m_dataLength,
bytesToRead);
if (bytesRead) {
m_dataLength += bytesRead;
emit dataLengthChanged(dataLength());
}
if (m_buffer.size() == m_dataLength)
stopRecording();
}
void Engine::spectrumChanged(const FrequencySpectrum &spectrum)
{
ENGINE_DEBUG <<"Engine::spectrumChanged"<<"pos"<< m_spectrumPosition;
emit spectrumChanged(m_spectrumPosition, m_spectrumBufferLength, spectrum);
}
//-----------------------------------------------------------------------------// Private functions//-----------------------------------------------------------------------------void Engine::resetAudioDevices()
{
delete m_audioInput;
m_audioInput =0;
m_audioInputIODevice =0;
setRecordPosition(0);
delete m_audioOutput;
m_audioOutput =0;
setPlayPosition(0);
m_spectrumPosition =0;
setLevel(0.0,0.0,0);
}
void Engine::reset()
{
stopRecording();
stopPlayback();
setState(QAudio::AudioInput,QAudio::StoppedState);
setFormat(QAudioFormat());
m_generateTone =false;
delete m_file;
m_file =0;
delete m_analysisFile;
m_analysisFile =0;
m_buffer.clear();
m_bufferPosition =0;
m_bufferLength =0;
m_dataLength =0;
emit dataLengthChanged(0);
resetAudioDevices();
}
bool Engine::initialize()
{
bool result =false;
QAudioFormat format = m_format;
if (selectFormat()) {
if (m_format != format) {
resetAudioDevices();
if (m_file) {
emit bufferLengthChanged(bufferLength());
emit dataLengthChanged(dataLength());
emit bufferChanged(0,0, m_buffer);
setRecordPosition(bufferLength());
result =true;
} else {
m_bufferLength = audioLength(m_format, BufferDurationUs);
m_buffer.resize(m_bufferLength);
m_buffer.fill(0);
emit bufferLengthChanged(bufferLength());
if (m_generateTone) {
if (0== m_tone.endFreq) {
constqreal nyquist = nyquistFrequency(m_format);
m_tone.endFreq =qMin(qreal(SpectrumHighFreq), nyquist);
}
// Call function defined in utils.h, at global scope::generateTone(m_tone, m_format, m_buffer);
m_dataLength = m_bufferLength;
emit dataLengthChanged(dataLength());
emit bufferChanged(0, m_dataLength, m_buffer);
setRecordPosition(m_bufferLength);
result =true;
} else {
emit bufferChanged(0,0, m_buffer);
m_audioInput =newQAudioInput(m_audioInputDevice, m_format,this);
m_audioInput->setNotifyInterval(NotifyIntervalMs);
result =true;
}
}
m_audioOutput =newQAudioOutput(m_audioOutputDevice, m_format,this);
m_audioOutput->setNotifyInterval(NotifyIntervalMs);
}
} else {
if (m_file)
emit errorMessage(tr("Audio format not supported"),
formatToString(m_format));
elseif (m_generateTone)
emit errorMessage(tr("No suitable format found"),"");
elseemit errorMessage(tr("No common input / output format found"),"");
}
ENGINE_DEBUG <<"Engine::initialize"<<"m_bufferLength"<< m_bufferLength;
ENGINE_DEBUG <<"Engine::initialize"<<"m_dataLength"<< m_dataLength;
ENGINE_DEBUG <<"Engine::initialize"<<"format"<< m_format;
return result;
}
bool Engine::selectFormat()
{
bool foundSupportedFormat =false;
if (m_file ||QAudioFormat() != m_format) {
QAudioFormat format = m_format;
if (m_file)
// Header is read from the WAV file; just need to check whether// it is supported by the audio output device
format = m_file->fileFormat();
if (m_audioOutputDevice.isFormatSupported(format)) {
setFormat(format);
foundSupportedFormat =true;
}
} else {
QList<int> frequenciesList;
#ifdef Q_OS_WIN// The Windows audio backend does not correctly report format support// (see QTBUG-9100). Furthermore, although the audio subsystem captures// at 11025Hz, the resulting audio is corrupted.
frequenciesList +=8000;
#endifif (!m_generateTone)
frequenciesList += m_audioInputDevice.supportedFrequencies();
frequenciesList += m_audioOutputDevice.supportedFrequencies();
frequenciesList = frequenciesList.toSet().toList(); // remove duplicatesqSort(frequenciesList);
ENGINE_DEBUG <<"Engine::initialize frequenciesList"<< frequenciesList;
QList<int> channelsList;
channelsList += m_audioInputDevice.supportedChannels();
channelsList += m_audioOutputDevice.supportedChannels();
channelsList = channelsList.toSet().toList();
qSort(channelsList);
ENGINE_DEBUG <<"Engine::initialize channelsList"<< channelsList;
QAudioFormat format;
format.setByteOrder(QAudioFormat::LittleEndian);
format.setCodec("audio/pcm");
format.setSampleSize(16);
format.setSampleType(QAudioFormat::SignedInt);
int frequency, channels;
foreach (frequency, frequenciesList) {
if (foundSupportedFormat)
break;
format.setFrequency(frequency);
foreach (channels, channelsList) {
format.setChannels(channels);
constbool inputSupport = m_generateTone ||
m_audioInputDevice.isFormatSupported(format);
constbool outputSupport = m_audioOutputDevice.isFormatSupported(format);
ENGINE_DEBUG <<"Engine::initialize checking "<< format
<<"input"<< inputSupport
<<"output"<< outputSupport;
if (inputSupport && outputSupport) {
foundSupportedFormat =true;
break;
}
}
}
if (!foundSupportedFormat)
format =QAudioFormat();
setFormat(format);
}
return foundSupportedFormat;
}
void Engine::stopRecording()
{
if (m_audioInput) {
m_audioInput->stop();
QCoreApplication::instance()->processEvents();
m_audioInput->disconnect();
}
m_audioInputIODevice =0;
#ifdef DUMP_AUDIO
dumpData();
#endif
}
void Engine::stopPlayback()
{
if (m_audioOutput) {
m_audioOutput->stop();
QCoreApplication::instance()->processEvents();
m_audioOutput->disconnect();
setPlayPosition(0);
}
}
void Engine::setState(QAudio::State state)
{
constbool changed = (m_state != state);
m_state = state;
if (changed)
emit stateChanged(m_mode, m_state);
}
void Engine::setState(QAudio::Mode mode,QAudio::State state)
{
constbool changed = (m_mode != mode || m_state != state);
m_mode = mode;
m_state = state;
if (changed)
emit stateChanged(m_mode, m_state);
}
void Engine::setRecordPosition(qint64 position,bool forceEmit)
{
constbool changed = (m_recordPosition != position);
m_recordPosition = position;
if (changed || forceEmit)
emit recordPositionChanged(m_recordPosition);
}
void Engine::setPlayPosition(qint64 position,bool forceEmit)
{
constbool changed = (m_playPosition != position);
m_playPosition = position;
if (changed || forceEmit)
emit playPositionChanged(m_playPosition);
}
void Engine::calculateLevel(qint64 position,qint64 length)
{
#ifdef DISABLE_LEVEL
Q_UNUSED(position)
Q_UNUSED(length)
#else
Q_ASSERT(position + length <= m_bufferPosition + m_dataLength);
qreal peakLevel =0.0;
qreal sum =0.0;
constchar*ptr = m_buffer.constData() + position - m_bufferPosition;
constchar*const end = ptr + length;
while (ptr < end) {
constqint16 value =*reinterpret_cast<constqint16*>(ptr);
constqreal fracValue = pcmToReal(value);
peakLevel =qMax(peakLevel, fracValue);
sum += fracValue * fracValue;
ptr +=2;
}
constint numSamples = length /2;
qreal rmsLevel = sqrt(sum / numSamples);
rmsLevel =qMax(qreal(0.0), rmsLevel);
rmsLevel =qMin(qreal(1.0), rmsLevel);
setLevel(rmsLevel, peakLevel, numSamples);
ENGINE_DEBUG <<"Engine::calculateLevel"<<"pos"<< position <<"len"<< length
<<"rms"<< rmsLevel <<"peak"<< peakLevel;
#endif
}
void Engine::calculateSpectrum(qint64 position)
{
#ifdef DISABLE_SPECTRUM
Q_UNUSED(position)
#else
Q_ASSERT(position + m_spectrumBufferLength <= m_bufferPosition + m_dataLength);
Q_ASSERT(0== m_spectrumBufferLength %2); // constraint of FFT algorithm// QThread::currentThread is marked 'for internal use only', but// we're only using it for debug output here, so it's probably OK :)
ENGINE_DEBUG <<"Engine::calculateSpectrum"<<QThread::currentThread()
<<"count"<< m_count <<"pos"<< position <<"len"<< m_spectrumBufferLength
<<"spectrumAnalyser.isReady"<< m_spectrumAnalyser.isReady();
if(m_spectrumAnalyser.isReady()) {
m_spectrumBuffer =QByteArray::fromRawData(m_buffer.constData() + position - m_bufferPosition,
m_spectrumBufferLength);
m_spectrumPosition = position;
m_spectrumAnalyser.calculate(m_spectrumBuffer, m_format);
}
#endif
}
void Engine::setFormat(constQAudioFormat&format)
{
constbool changed = (format != m_format);
m_format = format;
m_levelBufferLength = audioLength(m_format, LevelWindowUs);
m_spectrumBufferLength = SpectrumLengthSamples *
(m_format.sampleSize() /8) * m_format.channels();
if (changed)
emit formatChanged(m_format);
}
void Engine::setLevel(qreal rmsLevel,qreal peakLevel,int numSamples)
{
m_rmsLevel = rmsLevel;
m_peakLevel = peakLevel;
emit levelChanged(m_rmsLevel, m_peakLevel, numSamples);
}
#ifdef DUMP_DATAvoid Engine::createOutputDir()
{
m_outputDir.setPath("output");
// Ensure output directory exists and is emptyif (m_outputDir.exists()) {
constQStringList files = m_outputDir.entryList(QDir::Files);
QString file;
foreach (file, files)
m_outputDir.remove(file);
} else {
QDir::current().mkdir("output");
}
}
#endif // DUMP_DATA#ifdef DUMP_AUDIOvoid Engine::dumpData()
{
constQString txtFileName = m_outputDir.filePath("data.txt");
QFile txtFile(txtFileName);
txtFile.open(QFile::WriteOnly |QFile::Text);
QTextStream stream(&txtFile);
constqint16*ptr =reinterpret_cast<constqint16*>(m_buffer.constData());
constint numSamples = m_dataLength / (2* m_format.channels());
for (int i=0; i<numSamples; ++i) {
stream << i <<"\t"<<*ptr <<"\n";
ptr += m_format.channels();
}
constQString pcmFileName = m_outputDir.filePath("data.pcm");
QFile pcmFile(pcmFileName);
pcmFile.open(QFile::WriteOnly);
pcmFile.write(m_buffer.constData(), m_dataLength);
}
#endif // DUMP_AUDIO
Cette page est une traduction d'une page de la documentation de Qt, écrite par Nokia Corporation and/or its subsidiary(-ies). Les éventuels problèmes résultant d'une mauvaise traduction ne sont pas imputables à Nokia.
Vous avez déniché une erreur ? Un bug ? Une redirection cassée ? Ou tout autre problème, quel qu'il soit ? Ou bien vous désirez participer à ce projet de traduction ? N'hésitez pas à nous contacter
ou par MP !