refactor: organize a little more the code in launcher/net/
This also reduces some code duplication by using some Task logic in NetAction.
This commit is contained in:
parent
649b8ac7c6
commit
8c8eabf7ac
@ -14,27 +14,25 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#include "InstanceImportTask.h"
|
#include "InstanceImportTask.h"
|
||||||
|
#include <QtConcurrentRun>
|
||||||
|
#include "Application.h"
|
||||||
#include "BaseInstance.h"
|
#include "BaseInstance.h"
|
||||||
#include "FileSystem.h"
|
#include "FileSystem.h"
|
||||||
#include "Application.h"
|
|
||||||
#include "MMCZip.h"
|
#include "MMCZip.h"
|
||||||
#include "NullInstance.h"
|
#include "NullInstance.h"
|
||||||
#include "settings/INISettingsObject.h"
|
#include "icons/IconList.h"
|
||||||
#include "icons/IconUtils.h"
|
#include "icons/IconUtils.h"
|
||||||
#include <QtConcurrentRun>
|
#include "settings/INISettingsObject.h"
|
||||||
|
|
||||||
// FIXME: this does not belong here, it's Minecraft/Flame specific
|
// FIXME: this does not belong here, it's Minecraft/Flame specific
|
||||||
|
#include <quazip/quazipdir.h>
|
||||||
|
#include "Json.h"
|
||||||
#include "minecraft/MinecraftInstance.h"
|
#include "minecraft/MinecraftInstance.h"
|
||||||
#include "minecraft/PackProfile.h"
|
#include "minecraft/PackProfile.h"
|
||||||
#include "modplatform/flame/FileResolvingTask.h"
|
#include "modplatform/flame/FileResolvingTask.h"
|
||||||
#include "modplatform/flame/PackManifest.h"
|
#include "modplatform/flame/PackManifest.h"
|
||||||
#include "Json.h"
|
|
||||||
#include <quazip/quazipdir.h>
|
|
||||||
#include "modplatform/technic/TechnicPackProcessor.h"
|
#include "modplatform/technic/TechnicPackProcessor.h"
|
||||||
|
|
||||||
#include "icons/IconList.h"
|
|
||||||
#include "Application.h"
|
|
||||||
|
|
||||||
InstanceImportTask::InstanceImportTask(const QUrl sourceUrl)
|
InstanceImportTask::InstanceImportTask(const QUrl sourceUrl)
|
||||||
{
|
{
|
||||||
m_sourceUrl = sourceUrl;
|
m_sourceUrl = sourceUrl;
|
||||||
|
@ -297,7 +297,7 @@ NetAction::Ptr AssetObject::getDownloadAction()
|
|||||||
auto rawHash = QByteArray::fromHex(hash.toLatin1());
|
auto rawHash = QByteArray::fromHex(hash.toLatin1());
|
||||||
objectDL->addValidator(new Net::ChecksumValidator(QCryptographicHash::Sha1, rawHash));
|
objectDL->addValidator(new Net::ChecksumValidator(QCryptographicHash::Sha1, rawHash));
|
||||||
}
|
}
|
||||||
objectDL->m_total_progress = size;
|
objectDL->setProgress(objectDL->getProgress(), size);
|
||||||
return objectDL;
|
return objectDL;
|
||||||
}
|
}
|
||||||
return nullptr;
|
return nullptr;
|
||||||
|
@ -3,60 +3,59 @@
|
|||||||
#include "Sink.h"
|
#include "Sink.h"
|
||||||
|
|
||||||
namespace Net {
|
namespace Net {
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Sink object for downloads that uses an external QByteArray it doesn't own as a target.
|
* Sink object for downloads that uses an external QByteArray it doesn't own as a target.
|
||||||
*/
|
*/
|
||||||
class ByteArraySink : public Sink
|
class ByteArraySink : public Sink {
|
||||||
{
|
public:
|
||||||
public:
|
ByteArraySink(QByteArray* output) : m_output(output){};
|
||||||
ByteArraySink(QByteArray *output)
|
|
||||||
:m_output(output)
|
|
||||||
{
|
|
||||||
// nil
|
|
||||||
};
|
|
||||||
|
|
||||||
virtual ~ByteArraySink()
|
virtual ~ByteArraySink() = default;
|
||||||
{
|
|
||||||
// nil
|
|
||||||
}
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
JobStatus init(QNetworkRequest & request) override
|
auto init(QNetworkRequest& request) -> Task::State override
|
||||||
{
|
{
|
||||||
|
if(!m_output)
|
||||||
|
return Task::State::Failed;
|
||||||
|
|
||||||
m_output->clear();
|
m_output->clear();
|
||||||
if(initAllValidators(request))
|
if (initAllValidators(request))
|
||||||
return Job_InProgress;
|
return Task::State::Running;
|
||||||
return Job_Failed;
|
return Task::State::Failed;
|
||||||
};
|
};
|
||||||
|
|
||||||
JobStatus write(QByteArray & data) override
|
auto write(QByteArray& data) -> Task::State override
|
||||||
{
|
{
|
||||||
|
if(!m_output)
|
||||||
|
return Task::State::Failed;
|
||||||
|
|
||||||
m_output->append(data);
|
m_output->append(data);
|
||||||
if(writeAllValidators(data))
|
if (writeAllValidators(data))
|
||||||
return Job_InProgress;
|
return Task::State::Running;
|
||||||
return Job_Failed;
|
return Task::State::Failed;
|
||||||
}
|
}
|
||||||
|
|
||||||
JobStatus abort() override
|
auto abort() -> Task::State override
|
||||||
{
|
{
|
||||||
|
if(!m_output)
|
||||||
|
return Task::State::Failed;
|
||||||
|
|
||||||
m_output->clear();
|
m_output->clear();
|
||||||
failAllValidators();
|
failAllValidators();
|
||||||
return Job_Failed;
|
return Task::State::Failed;
|
||||||
}
|
}
|
||||||
|
|
||||||
JobStatus finalize(QNetworkReply &reply) override
|
auto finalize(QNetworkReply& reply) -> Task::State override
|
||||||
{
|
{
|
||||||
if(finalizeAllValidators(reply))
|
if (finalizeAllValidators(reply))
|
||||||
return Job_Finished;
|
return Task::State::Succeeded;
|
||||||
return Job_Failed;
|
return Task::State::Failed;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool hasLocalData() override
|
auto hasLocalData() -> bool override { return false; }
|
||||||
{
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
QByteArray * m_output;
|
QByteArray* m_output;
|
||||||
};
|
};
|
||||||
}
|
} // namespace Net
|
||||||
|
@ -30,7 +30,7 @@ namespace Net {
|
|||||||
|
|
||||||
Download::Download() : NetAction()
|
Download::Download() : NetAction()
|
||||||
{
|
{
|
||||||
m_status = Job_NotStarted;
|
m_state = State::Inactive;
|
||||||
}
|
}
|
||||||
|
|
||||||
Download::Ptr Download::makeCached(QUrl url, MetaEntryPtr entry, Options options)
|
Download::Ptr Download::makeCached(QUrl url, MetaEntryPtr entry, Options options)
|
||||||
@ -68,29 +68,29 @@ void Download::addValidator(Validator* v)
|
|||||||
m_sink->addValidator(v);
|
m_sink->addValidator(v);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Download::startImpl()
|
void Download::executeTask()
|
||||||
{
|
{
|
||||||
if (m_status == Job_Aborted) {
|
if (getState() == Task::State::AbortedByUser) {
|
||||||
qWarning() << "Attempt to start an aborted Download:" << m_url.toString();
|
qWarning() << "Attempt to start an aborted Download:" << m_url.toString();
|
||||||
emit aborted(m_index_within_job);
|
emit aborted(m_index_within_job);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
QNetworkRequest request(m_url);
|
QNetworkRequest request(m_url);
|
||||||
m_status = m_sink->init(request);
|
m_state = m_sink->init(request);
|
||||||
switch (m_status) {
|
switch (m_state) {
|
||||||
case Job_Finished:
|
case State::Succeeded:
|
||||||
emit succeeded(m_index_within_job);
|
emit succeeded(m_index_within_job);
|
||||||
qDebug() << "Download cache hit " << m_url.toString();
|
qDebug() << "Download cache hit " << m_url.toString();
|
||||||
return;
|
return;
|
||||||
case Job_InProgress:
|
case State::Running:
|
||||||
qDebug() << "Downloading " << m_url.toString();
|
qDebug() << "Downloading " << m_url.toString();
|
||||||
break;
|
break;
|
||||||
case Job_Failed_Proceed: // this is meaningless in this context. We do need a sink.
|
case State::Inactive:
|
||||||
case Job_NotStarted:
|
case State::Failed:
|
||||||
case Job_Failed:
|
|
||||||
emit failed(m_index_within_job);
|
emit failed(m_index_within_job);
|
||||||
return;
|
return;
|
||||||
case Job_Aborted:
|
case State::AbortedByUser:
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -111,8 +111,7 @@ void Download::startImpl()
|
|||||||
|
|
||||||
void Download::downloadProgress(qint64 bytesReceived, qint64 bytesTotal)
|
void Download::downloadProgress(qint64 bytesReceived, qint64 bytesTotal)
|
||||||
{
|
{
|
||||||
m_total_progress = bytesTotal;
|
setProgress(bytesReceived, bytesTotal);
|
||||||
m_progress = bytesReceived;
|
|
||||||
emit netActionProgress(m_index_within_job, bytesReceived, bytesTotal);
|
emit netActionProgress(m_index_within_job, bytesReceived, bytesTotal);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -120,17 +119,17 @@ void Download::downloadError(QNetworkReply::NetworkError error)
|
|||||||
{
|
{
|
||||||
if (error == QNetworkReply::OperationCanceledError) {
|
if (error == QNetworkReply::OperationCanceledError) {
|
||||||
qCritical() << "Aborted " << m_url.toString();
|
qCritical() << "Aborted " << m_url.toString();
|
||||||
m_status = Job_Aborted;
|
m_state = State::AbortedByUser;
|
||||||
} else {
|
} else {
|
||||||
if (m_options & Option::AcceptLocalFiles) {
|
if (m_options & Option::AcceptLocalFiles) {
|
||||||
if (m_sink->hasLocalData()) {
|
if (m_sink->hasLocalData()) {
|
||||||
m_status = Job_Failed_Proceed;
|
m_state = State::Succeeded;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// error happened during download.
|
// error happened during download.
|
||||||
qCritical() << "Failed " << m_url.toString() << " with reason " << error;
|
qCritical() << "Failed " << m_url.toString() << " with reason " << error;
|
||||||
m_status = Job_Failed;
|
m_state = State::Failed;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -194,7 +193,8 @@ bool Download::handleRedirect()
|
|||||||
|
|
||||||
m_url = QUrl(redirect.toString());
|
m_url = QUrl(redirect.toString());
|
||||||
qDebug() << "Following redirect to " << m_url.toString();
|
qDebug() << "Following redirect to " << m_url.toString();
|
||||||
start(m_network);
|
startAction(m_network);
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -207,19 +207,20 @@ void Download::downloadFinished()
|
|||||||
}
|
}
|
||||||
|
|
||||||
// if the download failed before this point ...
|
// if the download failed before this point ...
|
||||||
if (m_status == Job_Failed_Proceed) {
|
if (m_state == State::Succeeded) // pretend to succeed so we continue processing :)
|
||||||
|
{
|
||||||
qDebug() << "Download failed but we are allowed to proceed:" << m_url.toString();
|
qDebug() << "Download failed but we are allowed to proceed:" << m_url.toString();
|
||||||
m_sink->abort();
|
m_sink->abort();
|
||||||
m_reply.reset();
|
m_reply.reset();
|
||||||
emit succeeded(m_index_within_job);
|
emit succeeded(m_index_within_job);
|
||||||
return;
|
return;
|
||||||
} else if (m_status == Job_Failed) {
|
} else if (m_state == State::Failed) {
|
||||||
qDebug() << "Download failed in previous step:" << m_url.toString();
|
qDebug() << "Download failed in previous step:" << m_url.toString();
|
||||||
m_sink->abort();
|
m_sink->abort();
|
||||||
m_reply.reset();
|
m_reply.reset();
|
||||||
emit failed(m_index_within_job);
|
emit failed(m_index_within_job);
|
||||||
return;
|
return;
|
||||||
} else if (m_status == Job_Aborted) {
|
} else if (m_state == State::AbortedByUser) {
|
||||||
qDebug() << "Download aborted in previous step:" << m_url.toString();
|
qDebug() << "Download aborted in previous step:" << m_url.toString();
|
||||||
m_sink->abort();
|
m_sink->abort();
|
||||||
m_reply.reset();
|
m_reply.reset();
|
||||||
@ -231,12 +232,12 @@ void Download::downloadFinished()
|
|||||||
auto data = m_reply->readAll();
|
auto data = m_reply->readAll();
|
||||||
if (data.size()) {
|
if (data.size()) {
|
||||||
qDebug() << "Writing extra" << data.size() << "bytes to" << m_target_path;
|
qDebug() << "Writing extra" << data.size() << "bytes to" << m_target_path;
|
||||||
m_status = m_sink->write(data);
|
m_state = m_sink->write(data);
|
||||||
}
|
}
|
||||||
|
|
||||||
// otherwise, finalize the whole graph
|
// otherwise, finalize the whole graph
|
||||||
m_status = m_sink->finalize(*m_reply.get());
|
m_state = m_sink->finalize(*m_reply.get());
|
||||||
if (m_status != Job_Finished) {
|
if (m_state != State::Succeeded) {
|
||||||
qDebug() << "Download failed to finalize:" << m_url.toString();
|
qDebug() << "Download failed to finalize:" << m_url.toString();
|
||||||
m_sink->abort();
|
m_sink->abort();
|
||||||
m_reply.reset();
|
m_reply.reset();
|
||||||
@ -250,10 +251,10 @@ void Download::downloadFinished()
|
|||||||
|
|
||||||
void Download::downloadReadyRead()
|
void Download::downloadReadyRead()
|
||||||
{
|
{
|
||||||
if (m_status == Job_InProgress) {
|
if (m_state == State::Running) {
|
||||||
auto data = m_reply->readAll();
|
auto data = m_reply->readAll();
|
||||||
m_status = m_sink->write(data);
|
m_state = m_sink->write(data);
|
||||||
if (m_status == Job_Failed) {
|
if (m_state == State::Failed) {
|
||||||
qCritical() << "Failed to process response chunk for " << m_target_path;
|
qCritical() << "Failed to process response chunk for " << m_target_path;
|
||||||
}
|
}
|
||||||
// qDebug() << "Download" << m_url.toString() << "gained" << data.size() << "bytes";
|
// qDebug() << "Download" << m_url.toString() << "gained" << data.size() << "bytes";
|
||||||
@ -269,12 +270,7 @@ bool Net::Download::abort()
|
|||||||
if (m_reply) {
|
if (m_reply) {
|
||||||
m_reply->abort();
|
m_reply->abort();
|
||||||
} else {
|
} else {
|
||||||
m_status = Job_Aborted;
|
m_state = State::AbortedByUser;
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Net::Download::canAbort()
|
|
||||||
{
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
@ -27,7 +27,7 @@ class Download : public NetAction
|
|||||||
{
|
{
|
||||||
Q_OBJECT
|
Q_OBJECT
|
||||||
|
|
||||||
public: /* types */
|
public:
|
||||||
typedef shared_qobject_ptr<class Download> Ptr;
|
typedef shared_qobject_ptr<class Download> Ptr;
|
||||||
enum class Option
|
enum class Option
|
||||||
{
|
{
|
||||||
@ -36,7 +36,7 @@ public: /* types */
|
|||||||
};
|
};
|
||||||
Q_DECLARE_FLAGS(Options, Option)
|
Q_DECLARE_FLAGS(Options, Option)
|
||||||
|
|
||||||
protected: /* con/des */
|
protected:
|
||||||
explicit Download();
|
explicit Download();
|
||||||
public:
|
public:
|
||||||
virtual ~Download(){};
|
virtual ~Download(){};
|
||||||
@ -44,16 +44,16 @@ public:
|
|||||||
static Download::Ptr makeByteArray(QUrl url, QByteArray *output, Options options = Option::NoOptions);
|
static Download::Ptr makeByteArray(QUrl url, QByteArray *output, Options options = Option::NoOptions);
|
||||||
static Download::Ptr makeFile(QUrl url, QString path, Options options = Option::NoOptions);
|
static Download::Ptr makeFile(QUrl url, QString path, Options options = Option::NoOptions);
|
||||||
|
|
||||||
public: /* methods */
|
public:
|
||||||
QString getTargetFilepath()
|
QString getTargetFilepath()
|
||||||
{
|
{
|
||||||
return m_target_path;
|
return m_target_path;
|
||||||
}
|
}
|
||||||
void addValidator(Validator * v);
|
void addValidator(Validator * v);
|
||||||
bool abort() override;
|
bool abort() override;
|
||||||
bool canAbort() override;
|
bool canAbort() const override { return true; };
|
||||||
|
|
||||||
private: /* methods */
|
private:
|
||||||
bool handleRedirect();
|
bool handleRedirect();
|
||||||
|
|
||||||
protected slots:
|
protected slots:
|
||||||
@ -64,9 +64,9 @@ protected slots:
|
|||||||
void downloadReadyRead() override;
|
void downloadReadyRead() override;
|
||||||
|
|
||||||
public slots:
|
public slots:
|
||||||
void startImpl() override;
|
void executeTask() override;
|
||||||
|
|
||||||
private: /* data */
|
private:
|
||||||
// FIXME: remove this, it has no business being here.
|
// FIXME: remove this, it has no business being here.
|
||||||
QString m_target_path;
|
QString m_target_path;
|
||||||
std::unique_ptr<Sink> m_sink;
|
std::unique_ptr<Sink> m_sink;
|
||||||
|
@ -1,25 +1,15 @@
|
|||||||
#include "FileSink.h"
|
#include "FileSink.h"
|
||||||
|
|
||||||
#include <QFile>
|
#include <QFile>
|
||||||
#include <QFileInfo>
|
|
||||||
#include "FileSystem.h"
|
#include "FileSystem.h"
|
||||||
|
|
||||||
namespace Net {
|
namespace Net {
|
||||||
|
|
||||||
FileSink::FileSink(QString filename)
|
Task::State FileSink::init(QNetworkRequest& request)
|
||||||
:m_filename(filename)
|
|
||||||
{
|
|
||||||
// nil
|
|
||||||
}
|
|
||||||
|
|
||||||
FileSink::~FileSink()
|
|
||||||
{
|
|
||||||
// nil
|
|
||||||
}
|
|
||||||
|
|
||||||
JobStatus FileSink::init(QNetworkRequest& request)
|
|
||||||
{
|
{
|
||||||
auto result = initCache(request);
|
auto result = initCache(request);
|
||||||
if(result != Job_InProgress)
|
if(result != Task::State::Running)
|
||||||
{
|
{
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
@ -27,27 +17,27 @@ JobStatus FileSink::init(QNetworkRequest& request)
|
|||||||
if (!FS::ensureFilePathExists(m_filename))
|
if (!FS::ensureFilePathExists(m_filename))
|
||||||
{
|
{
|
||||||
qCritical() << "Could not create folder for " + m_filename;
|
qCritical() << "Could not create folder for " + m_filename;
|
||||||
return Job_Failed;
|
return Task::State::Failed;
|
||||||
}
|
}
|
||||||
wroteAnyData = false;
|
wroteAnyData = false;
|
||||||
m_output_file.reset(new QSaveFile(m_filename));
|
m_output_file.reset(new QSaveFile(m_filename));
|
||||||
if (!m_output_file->open(QIODevice::WriteOnly))
|
if (!m_output_file->open(QIODevice::WriteOnly))
|
||||||
{
|
{
|
||||||
qCritical() << "Could not open " + m_filename + " for writing";
|
qCritical() << "Could not open " + m_filename + " for writing";
|
||||||
return Job_Failed;
|
return Task::State::Failed;
|
||||||
}
|
}
|
||||||
|
|
||||||
if(initAllValidators(request))
|
if(initAllValidators(request))
|
||||||
return Job_InProgress;
|
return Task::State::Running;
|
||||||
return Job_Failed;
|
return Task::State::Failed;
|
||||||
}
|
}
|
||||||
|
|
||||||
JobStatus FileSink::initCache(QNetworkRequest &)
|
Task::State FileSink::initCache(QNetworkRequest &)
|
||||||
{
|
{
|
||||||
return Job_InProgress;
|
return Task::State::Running;
|
||||||
}
|
}
|
||||||
|
|
||||||
JobStatus FileSink::write(QByteArray& data)
|
Task::State FileSink::write(QByteArray& data)
|
||||||
{
|
{
|
||||||
if (!writeAllValidators(data) || m_output_file->write(data) != data.size())
|
if (!writeAllValidators(data) || m_output_file->write(data) != data.size())
|
||||||
{
|
{
|
||||||
@ -55,20 +45,20 @@ JobStatus FileSink::write(QByteArray& data)
|
|||||||
m_output_file->cancelWriting();
|
m_output_file->cancelWriting();
|
||||||
m_output_file.reset();
|
m_output_file.reset();
|
||||||
wroteAnyData = false;
|
wroteAnyData = false;
|
||||||
return Job_Failed;
|
return Task::State::Failed;
|
||||||
}
|
}
|
||||||
wroteAnyData = true;
|
wroteAnyData = true;
|
||||||
return Job_InProgress;
|
return Task::State::Running;
|
||||||
}
|
}
|
||||||
|
|
||||||
JobStatus FileSink::abort()
|
Task::State FileSink::abort()
|
||||||
{
|
{
|
||||||
m_output_file->cancelWriting();
|
m_output_file->cancelWriting();
|
||||||
failAllValidators();
|
failAllValidators();
|
||||||
return Job_Failed;
|
return Task::State::Failed;
|
||||||
}
|
}
|
||||||
|
|
||||||
JobStatus FileSink::finalize(QNetworkReply& reply)
|
Task::State FileSink::finalize(QNetworkReply& reply)
|
||||||
{
|
{
|
||||||
bool gotFile = false;
|
bool gotFile = false;
|
||||||
QVariant statusCodeV = reply.attribute(QNetworkRequest::HttpStatusCodeAttribute);
|
QVariant statusCodeV = reply.attribute(QNetworkRequest::HttpStatusCodeAttribute);
|
||||||
@ -86,13 +76,13 @@ JobStatus FileSink::finalize(QNetworkReply& reply)
|
|||||||
// ask validators for data consistency
|
// ask validators for data consistency
|
||||||
// we only do this for actual downloads, not 'your data is still the same' cache hits
|
// we only do this for actual downloads, not 'your data is still the same' cache hits
|
||||||
if(!finalizeAllValidators(reply))
|
if(!finalizeAllValidators(reply))
|
||||||
return Job_Failed;
|
return Task::State::Failed;
|
||||||
// nothing went wrong...
|
// nothing went wrong...
|
||||||
if (!m_output_file->commit())
|
if (!m_output_file->commit())
|
||||||
{
|
{
|
||||||
qCritical() << "Failed to commit changes to " << m_filename;
|
qCritical() << "Failed to commit changes to " << m_filename;
|
||||||
m_output_file->cancelWriting();
|
m_output_file->cancelWriting();
|
||||||
return Job_Failed;
|
return Task::State::Failed;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// then get rid of the save file
|
// then get rid of the save file
|
||||||
@ -101,9 +91,9 @@ JobStatus FileSink::finalize(QNetworkReply& reply)
|
|||||||
return finalizeCache(reply);
|
return finalizeCache(reply);
|
||||||
}
|
}
|
||||||
|
|
||||||
JobStatus FileSink::finalizeCache(QNetworkReply &)
|
Task::State FileSink::finalizeCache(QNetworkReply &)
|
||||||
{
|
{
|
||||||
return Job_Finished;
|
return Task::State::Succeeded;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool FileSink::hasLocalData()
|
bool FileSink::hasLocalData()
|
||||||
|
@ -1,28 +1,30 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
#include "Sink.h"
|
|
||||||
#include <QSaveFile>
|
#include <QSaveFile>
|
||||||
|
|
||||||
|
#include "Sink.h"
|
||||||
|
|
||||||
namespace Net {
|
namespace Net {
|
||||||
class FileSink : public Sink
|
class FileSink : public Sink {
|
||||||
{
|
public:
|
||||||
public: /* con/des */
|
FileSink(QString filename) : m_filename(filename){};
|
||||||
FileSink(QString filename);
|
virtual ~FileSink() = default;
|
||||||
virtual ~FileSink();
|
|
||||||
|
|
||||||
public: /* methods */
|
public:
|
||||||
JobStatus init(QNetworkRequest & request) override;
|
auto init(QNetworkRequest& request) -> Task::State override;
|
||||||
JobStatus write(QByteArray & data) override;
|
auto write(QByteArray& data) -> Task::State override;
|
||||||
JobStatus abort() override;
|
auto abort() -> Task::State override;
|
||||||
JobStatus finalize(QNetworkReply & reply) override;
|
auto finalize(QNetworkReply& reply) -> Task::State override;
|
||||||
bool hasLocalData() override;
|
|
||||||
|
|
||||||
protected: /* methods */
|
auto hasLocalData() -> bool override;
|
||||||
virtual JobStatus initCache(QNetworkRequest &);
|
|
||||||
virtual JobStatus finalizeCache(QNetworkReply &reply);
|
|
||||||
|
|
||||||
protected: /* data */
|
protected:
|
||||||
|
virtual auto initCache(QNetworkRequest&) -> Task::State;
|
||||||
|
virtual auto finalizeCache(QNetworkReply& reply) -> Task::State;
|
||||||
|
|
||||||
|
protected:
|
||||||
QString m_filename;
|
QString m_filename;
|
||||||
bool wroteAnyData = false;
|
bool wroteAnyData = false;
|
||||||
std::unique_ptr<QSaveFile> m_output_file;
|
std::unique_ptr<QSaveFile> m_output_file;
|
||||||
};
|
};
|
||||||
}
|
} // namespace Net
|
||||||
|
@ -12,17 +12,13 @@ MetaCacheSink::MetaCacheSink(MetaEntryPtr entry, ChecksumValidator * md5sum)
|
|||||||
addValidator(md5sum);
|
addValidator(md5sum);
|
||||||
}
|
}
|
||||||
|
|
||||||
MetaCacheSink::~MetaCacheSink()
|
Task::State MetaCacheSink::initCache(QNetworkRequest& request)
|
||||||
{
|
|
||||||
// nil
|
|
||||||
}
|
|
||||||
|
|
||||||
JobStatus MetaCacheSink::initCache(QNetworkRequest& request)
|
|
||||||
{
|
{
|
||||||
if (!m_entry->isStale())
|
if (!m_entry->isStale())
|
||||||
{
|
{
|
||||||
return Job_Finished;
|
return Task::State::Succeeded;
|
||||||
}
|
}
|
||||||
|
|
||||||
// check if file exists, if it does, use its information for the request
|
// check if file exists, if it does, use its information for the request
|
||||||
QFile current(m_filename);
|
QFile current(m_filename);
|
||||||
if(current.exists() && current.size() != 0)
|
if(current.exists() && current.size() != 0)
|
||||||
@ -36,25 +32,31 @@ JobStatus MetaCacheSink::initCache(QNetworkRequest& request)
|
|||||||
request.setRawHeader(QString("If-None-Match").toLatin1(), m_entry->getETag().toLatin1());
|
request.setRawHeader(QString("If-None-Match").toLatin1(), m_entry->getETag().toLatin1());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return Job_InProgress;
|
|
||||||
|
return Task::State::Running;
|
||||||
}
|
}
|
||||||
|
|
||||||
JobStatus MetaCacheSink::finalizeCache(QNetworkReply & reply)
|
Task::State MetaCacheSink::finalizeCache(QNetworkReply & reply)
|
||||||
{
|
{
|
||||||
QFileInfo output_file_info(m_filename);
|
QFileInfo output_file_info(m_filename);
|
||||||
|
|
||||||
if(wroteAnyData)
|
if(wroteAnyData)
|
||||||
{
|
{
|
||||||
m_entry->setMD5Sum(m_md5Node->hash().toHex().constData());
|
m_entry->setMD5Sum(m_md5Node->hash().toHex().constData());
|
||||||
}
|
}
|
||||||
|
|
||||||
m_entry->setETag(reply.rawHeader("ETag").constData());
|
m_entry->setETag(reply.rawHeader("ETag").constData());
|
||||||
|
|
||||||
if (reply.hasRawHeader("Last-Modified"))
|
if (reply.hasRawHeader("Last-Modified"))
|
||||||
{
|
{
|
||||||
m_entry->setRemoteChangedTimestamp(reply.rawHeader("Last-Modified").constData());
|
m_entry->setRemoteChangedTimestamp(reply.rawHeader("Last-Modified").constData());
|
||||||
}
|
}
|
||||||
|
|
||||||
m_entry->setLocalChangedTimestamp(output_file_info.lastModified().toUTC().toMSecsSinceEpoch());
|
m_entry->setLocalChangedTimestamp(output_file_info.lastModified().toUTC().toMSecsSinceEpoch());
|
||||||
m_entry->setStale(false);
|
m_entry->setStale(false);
|
||||||
APPLICATION->metacache()->updateEntry(m_entry);
|
APPLICATION->metacache()->updateEntry(m_entry);
|
||||||
return Job_Finished;
|
|
||||||
|
return Task::State::Succeeded;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool MetaCacheSink::hasLocalData()
|
bool MetaCacheSink::hasLocalData()
|
||||||
|
@ -1,22 +1,23 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
#include "FileSink.h"
|
|
||||||
#include "ChecksumValidator.h"
|
#include "ChecksumValidator.h"
|
||||||
|
#include "FileSink.h"
|
||||||
#include "net/HttpMetaCache.h"
|
#include "net/HttpMetaCache.h"
|
||||||
|
|
||||||
namespace Net {
|
namespace Net {
|
||||||
class MetaCacheSink : public FileSink
|
class MetaCacheSink : public FileSink {
|
||||||
{
|
public:
|
||||||
public: /* con/des */
|
MetaCacheSink(MetaEntryPtr entry, ChecksumValidator* md5sum);
|
||||||
MetaCacheSink(MetaEntryPtr entry, ChecksumValidator * md5sum);
|
virtual ~MetaCacheSink() = default;
|
||||||
virtual ~MetaCacheSink();
|
|
||||||
bool hasLocalData() override;
|
|
||||||
|
|
||||||
protected: /* methods */
|
auto hasLocalData() -> bool override;
|
||||||
JobStatus initCache(QNetworkRequest & request) override;
|
|
||||||
JobStatus finalizeCache(QNetworkReply & reply) override;
|
|
||||||
|
|
||||||
private: /* data */
|
protected:
|
||||||
|
auto initCache(QNetworkRequest& request) -> Task::State override;
|
||||||
|
auto finalizeCache(QNetworkReply& reply) -> Task::State override;
|
||||||
|
|
||||||
|
private:
|
||||||
MetaEntryPtr m_entry;
|
MetaEntryPtr m_entry;
|
||||||
ChecksumValidator * m_md5Node;
|
ChecksumValidator* m_md5Node;
|
||||||
};
|
};
|
||||||
}
|
} // namespace Net
|
||||||
|
@ -1,108 +1,81 @@
|
|||||||
/* Copyright 2013-2021 MultiMC Contributors
|
// SPDX-License-Identifier: GPL-3.0-only
|
||||||
|
/*
|
||||||
|
* PolyMC - Minecraft Launcher
|
||||||
*
|
*
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
* This program is free software: you can redistribute it and/or modify
|
||||||
* you may not use this file except in compliance with the License.
|
* it under the terms of the GNU General Public License as published by
|
||||||
* You may obtain a copy of the License at
|
* the Free Software Foundation, version 3.
|
||||||
*
|
*
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
* This program is distributed in the hope that it will be useful,
|
||||||
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
* GNU General Public License for more details.
|
||||||
*
|
*
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
* You should have received a copy of the GNU General Public License
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
* along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
*
|
||||||
* See the License for the specific language governing permissions and
|
* This file incorporates work covered by the following copyright and
|
||||||
* limitations under the License.
|
* permission notice:
|
||||||
|
*
|
||||||
|
* Copyright 2013-2021 MultiMC Contributors
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <QObject>
|
|
||||||
#include <QUrl>
|
|
||||||
#include <memory>
|
|
||||||
#include <QNetworkReply>
|
#include <QNetworkReply>
|
||||||
#include <QObjectPtr.h>
|
#include <QUrl>
|
||||||
|
|
||||||
enum JobStatus
|
#include "QObjectPtr.h"
|
||||||
{
|
#include "tasks/Task.h"
|
||||||
Job_NotStarted,
|
|
||||||
Job_InProgress,
|
|
||||||
Job_Finished,
|
|
||||||
Job_Failed,
|
|
||||||
Job_Aborted,
|
|
||||||
/*
|
|
||||||
* FIXME: @NUKE this confuses the task failing with us having a fallback in the form of local data. Clear up the confusion.
|
|
||||||
* Same could be true for aborted task - the presence of pre-existing result is a separate concern
|
|
||||||
*/
|
|
||||||
Job_Failed_Proceed
|
|
||||||
};
|
|
||||||
|
|
||||||
class NetAction : public QObject
|
class NetAction : public Task {
|
||||||
{
|
|
||||||
Q_OBJECT
|
Q_OBJECT
|
||||||
protected:
|
protected:
|
||||||
explicit NetAction() : QObject(nullptr) {};
|
explicit NetAction() : Task(nullptr) {};
|
||||||
|
|
||||||
public:
|
public:
|
||||||
using Ptr = shared_qobject_ptr<NetAction>;
|
using Ptr = shared_qobject_ptr<NetAction>;
|
||||||
|
|
||||||
virtual ~NetAction() {};
|
virtual ~NetAction() = default;
|
||||||
|
|
||||||
bool isRunning() const
|
QUrl url() { return m_url; }
|
||||||
{
|
|
||||||
return m_status == Job_InProgress;
|
|
||||||
}
|
|
||||||
bool isFinished() const
|
|
||||||
{
|
|
||||||
return m_status >= Job_Finished;
|
|
||||||
}
|
|
||||||
bool wasSuccessful() const
|
|
||||||
{
|
|
||||||
return m_status == Job_Finished || m_status == Job_Failed_Proceed;
|
|
||||||
}
|
|
||||||
|
|
||||||
qint64 totalProgress() const
|
signals:
|
||||||
{
|
|
||||||
return m_total_progress;
|
|
||||||
}
|
|
||||||
qint64 currentProgress() const
|
|
||||||
{
|
|
||||||
return m_progress;
|
|
||||||
}
|
|
||||||
virtual bool abort()
|
|
||||||
{
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
virtual bool canAbort()
|
|
||||||
{
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
QUrl url()
|
|
||||||
{
|
|
||||||
return m_url;
|
|
||||||
}
|
|
||||||
|
|
||||||
signals:
|
|
||||||
void started(int index);
|
void started(int index);
|
||||||
void netActionProgress(int index, qint64 current, qint64 total);
|
void netActionProgress(int index, qint64 current, qint64 total);
|
||||||
void succeeded(int index);
|
void succeeded(int index);
|
||||||
void failed(int index);
|
void failed(int index);
|
||||||
void aborted(int index);
|
void aborted(int index);
|
||||||
|
|
||||||
protected slots:
|
protected slots:
|
||||||
virtual void downloadProgress(qint64 bytesReceived, qint64 bytesTotal) = 0;
|
virtual void downloadProgress(qint64 bytesReceived, qint64 bytesTotal) = 0;
|
||||||
virtual void downloadError(QNetworkReply::NetworkError error) = 0;
|
virtual void downloadError(QNetworkReply::NetworkError error) = 0;
|
||||||
virtual void downloadFinished() = 0;
|
virtual void downloadFinished() = 0;
|
||||||
virtual void downloadReadyRead() = 0;
|
virtual void downloadReadyRead() = 0;
|
||||||
|
|
||||||
public slots:
|
public slots:
|
||||||
void start(shared_qobject_ptr<QNetworkAccessManager> network) {
|
void startAction(shared_qobject_ptr<QNetworkAccessManager> network)
|
||||||
|
{
|
||||||
m_network = network;
|
m_network = network;
|
||||||
startImpl();
|
executeTask();
|
||||||
}
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
virtual void startImpl() = 0;
|
void executeTask() override {};
|
||||||
|
|
||||||
public:
|
public:
|
||||||
shared_qobject_ptr<QNetworkAccessManager> m_network;
|
shared_qobject_ptr<QNetworkAccessManager> m_network;
|
||||||
|
|
||||||
/// index within the parent job, FIXME: nuke
|
/// index within the parent job, FIXME: nuke
|
||||||
@ -113,10 +86,4 @@ public:
|
|||||||
|
|
||||||
/// source URL
|
/// source URL
|
||||||
QUrl m_url;
|
QUrl m_url;
|
||||||
|
|
||||||
qint64 m_progress = 0;
|
|
||||||
qint64 m_total_progress = 1;
|
|
||||||
|
|
||||||
protected:
|
|
||||||
JobStatus m_status = Job_NotStarted;
|
|
||||||
};
|
};
|
||||||
|
@ -1,79 +1,170 @@
|
|||||||
/* Copyright 2013-2021 MultiMC Contributors
|
// SPDX-License-Identifier: GPL-3.0-only
|
||||||
|
/*
|
||||||
|
* PolyMC - Minecraft Launcher
|
||||||
*
|
*
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
* This program is free software: you can redistribute it and/or modify
|
||||||
* you may not use this file except in compliance with the License.
|
* it under the terms of the GNU General Public License as published by
|
||||||
* You may obtain a copy of the License at
|
* the Free Software Foundation, version 3.
|
||||||
*
|
*
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
* This program is distributed in the hope that it will be useful,
|
||||||
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
* GNU General Public License for more details.
|
||||||
*
|
*
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
* You should have received a copy of the GNU General Public License
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
* along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
*
|
||||||
* See the License for the specific language governing permissions and
|
* This file incorporates work covered by the following copyright and
|
||||||
* limitations under the License.
|
* permission notice:
|
||||||
|
*
|
||||||
|
* Copyright 2013-2021 MultiMC Contributors
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include "NetJob.h"
|
#include "NetJob.h"
|
||||||
#include "Download.h"
|
#include "Download.h"
|
||||||
|
|
||||||
#include <QDebug>
|
auto NetJob::addNetAction(NetAction::Ptr action) -> bool
|
||||||
|
{
|
||||||
|
action->m_index_within_job = m_downloads.size();
|
||||||
|
m_downloads.append(action);
|
||||||
|
part_info pi;
|
||||||
|
m_parts_progress.append(pi);
|
||||||
|
|
||||||
|
partProgress(m_parts_progress.count() - 1, action->getProgress(), action->getTotalProgress());
|
||||||
|
|
||||||
|
if (action->isRunning()) {
|
||||||
|
connect(action.get(), &NetAction::succeeded, this, &NetJob::partSucceeded);
|
||||||
|
connect(action.get(), &NetAction::failed, this, &NetJob::partFailed);
|
||||||
|
connect(action.get(), &NetAction::netActionProgress, this, &NetJob::partProgress);
|
||||||
|
} else {
|
||||||
|
m_todo.append(m_parts_progress.size() - 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
auto NetJob::canAbort() const -> bool
|
||||||
|
{
|
||||||
|
bool canFullyAbort = true;
|
||||||
|
|
||||||
|
// can abort the downloads on the queue?
|
||||||
|
for (auto index : m_todo) {
|
||||||
|
auto part = m_downloads[index];
|
||||||
|
canFullyAbort &= part->canAbort();
|
||||||
|
}
|
||||||
|
// can abort the active downloads?
|
||||||
|
for (auto index : m_doing) {
|
||||||
|
auto part = m_downloads[index];
|
||||||
|
canFullyAbort &= part->canAbort();
|
||||||
|
}
|
||||||
|
|
||||||
|
return canFullyAbort;
|
||||||
|
}
|
||||||
|
|
||||||
|
void NetJob::executeTask()
|
||||||
|
{
|
||||||
|
// hack that delays early failures so they can be caught easier
|
||||||
|
QMetaObject::invokeMethod(this, "startMoreParts", Qt::QueuedConnection);
|
||||||
|
}
|
||||||
|
|
||||||
|
auto NetJob::getFailedFiles() -> QStringList
|
||||||
|
{
|
||||||
|
QStringList failed;
|
||||||
|
for (auto index : m_failed) {
|
||||||
|
failed.push_back(m_downloads[index]->url().toString());
|
||||||
|
}
|
||||||
|
failed.sort();
|
||||||
|
return failed;
|
||||||
|
}
|
||||||
|
|
||||||
|
auto NetJob::abort() -> bool
|
||||||
|
{
|
||||||
|
bool fullyAborted = true;
|
||||||
|
|
||||||
|
// fail all downloads on the queue
|
||||||
|
m_failed.unite(m_todo.toSet());
|
||||||
|
m_todo.clear();
|
||||||
|
|
||||||
|
// abort active downloads
|
||||||
|
auto toKill = m_doing.toList();
|
||||||
|
for (auto index : toKill) {
|
||||||
|
auto part = m_downloads[index];
|
||||||
|
fullyAborted &= part->abort();
|
||||||
|
}
|
||||||
|
|
||||||
|
return fullyAborted;
|
||||||
|
}
|
||||||
|
|
||||||
void NetJob::partSucceeded(int index)
|
void NetJob::partSucceeded(int index)
|
||||||
{
|
{
|
||||||
// do progress. all slots are 1 in size at least
|
// do progress. all slots are 1 in size at least
|
||||||
auto &slot = parts_progress[index];
|
auto& slot = m_parts_progress[index];
|
||||||
partProgress(index, slot.total_progress, slot.total_progress);
|
partProgress(index, slot.total_progress, slot.total_progress);
|
||||||
|
|
||||||
m_doing.remove(index);
|
m_doing.remove(index);
|
||||||
m_done.insert(index);
|
m_done.insert(index);
|
||||||
downloads[index].get()->disconnect(this);
|
m_downloads[index].get()->disconnect(this);
|
||||||
|
|
||||||
startMoreParts();
|
startMoreParts();
|
||||||
}
|
}
|
||||||
|
|
||||||
void NetJob::partFailed(int index)
|
void NetJob::partFailed(int index)
|
||||||
{
|
{
|
||||||
m_doing.remove(index);
|
m_doing.remove(index);
|
||||||
auto &slot = parts_progress[index];
|
|
||||||
if (slot.failures == 3)
|
auto& slot = m_parts_progress[index];
|
||||||
{
|
// Can try 3 times before failing by definitive
|
||||||
|
if (slot.failures == 3) {
|
||||||
m_failed.insert(index);
|
m_failed.insert(index);
|
||||||
}
|
} else {
|
||||||
else
|
|
||||||
{
|
|
||||||
slot.failures++;
|
slot.failures++;
|
||||||
m_todo.enqueue(index);
|
m_todo.enqueue(index);
|
||||||
}
|
}
|
||||||
downloads[index].get()->disconnect(this);
|
|
||||||
|
m_downloads[index].get()->disconnect(this);
|
||||||
|
|
||||||
startMoreParts();
|
startMoreParts();
|
||||||
}
|
}
|
||||||
|
|
||||||
void NetJob::partAborted(int index)
|
void NetJob::partAborted(int index)
|
||||||
{
|
{
|
||||||
m_aborted = true;
|
m_aborted = true;
|
||||||
|
|
||||||
m_doing.remove(index);
|
m_doing.remove(index);
|
||||||
m_failed.insert(index);
|
m_failed.insert(index);
|
||||||
downloads[index].get()->disconnect(this);
|
m_downloads[index].get()->disconnect(this);
|
||||||
|
|
||||||
startMoreParts();
|
startMoreParts();
|
||||||
}
|
}
|
||||||
|
|
||||||
void NetJob::partProgress(int index, qint64 bytesReceived, qint64 bytesTotal)
|
void NetJob::partProgress(int index, qint64 bytesReceived, qint64 bytesTotal)
|
||||||
{
|
{
|
||||||
auto &slot = parts_progress[index];
|
auto& slot = m_parts_progress[index];
|
||||||
slot.current_progress = bytesReceived;
|
slot.current_progress = bytesReceived;
|
||||||
slot.total_progress = bytesTotal;
|
slot.total_progress = bytesTotal;
|
||||||
|
|
||||||
int done = m_done.size();
|
int done = m_done.size();
|
||||||
int doing = m_doing.size();
|
int doing = m_doing.size();
|
||||||
int all = parts_progress.size();
|
int all = m_parts_progress.size();
|
||||||
|
|
||||||
qint64 bytesAll = 0;
|
qint64 bytesAll = 0;
|
||||||
qint64 bytesTotalAll = 0;
|
qint64 bytesTotalAll = 0;
|
||||||
for(auto & partIdx: m_doing)
|
for (auto& partIdx : m_doing) {
|
||||||
{
|
auto part = m_parts_progress[partIdx];
|
||||||
auto part = parts_progress[partIdx];
|
|
||||||
// do not count parts with unknown/nonsensical total size
|
// do not count parts with unknown/nonsensical total size
|
||||||
if(part.total_progress <= 0)
|
if (part.total_progress <= 0) {
|
||||||
{
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
bytesAll += part.current_progress;
|
bytesAll += part.current_progress;
|
||||||
@ -85,134 +176,53 @@ void NetJob::partProgress(int index, qint64 bytesReceived, qint64 bytesTotal)
|
|||||||
auto current_total = all * 1000;
|
auto current_total = all * 1000;
|
||||||
// HACK: make sure it never jumps backwards.
|
// HACK: make sure it never jumps backwards.
|
||||||
// FAIL: This breaks if the size is not known (or is it something else?) and jumps to 1000, so if it is 1000 reset it to inprogress
|
// FAIL: This breaks if the size is not known (or is it something else?) and jumps to 1000, so if it is 1000 reset it to inprogress
|
||||||
if(m_current_progress == 1000) {
|
if (m_current_progress == 1000) {
|
||||||
m_current_progress = inprogress;
|
m_current_progress = inprogress;
|
||||||
}
|
}
|
||||||
if(m_current_progress > current)
|
if (m_current_progress > current) {
|
||||||
{
|
|
||||||
current = m_current_progress;
|
current = m_current_progress;
|
||||||
}
|
}
|
||||||
m_current_progress = current;
|
m_current_progress = current;
|
||||||
setProgress(current, current_total);
|
setProgress(current, current_total);
|
||||||
}
|
}
|
||||||
|
|
||||||
void NetJob::executeTask()
|
|
||||||
{
|
|
||||||
// hack that delays early failures so they can be caught easier
|
|
||||||
QMetaObject::invokeMethod(this, "startMoreParts", Qt::QueuedConnection);
|
|
||||||
}
|
|
||||||
|
|
||||||
void NetJob::startMoreParts()
|
void NetJob::startMoreParts()
|
||||||
{
|
{
|
||||||
if(!isRunning())
|
if (!isRunning()) {
|
||||||
{
|
// this actually makes sense. You can put running m_downloads into a NetJob and then not start it until much later.
|
||||||
// this actually makes sense. You can put running downloads into a NetJob and then not start it until much later.
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// OK. We are actively processing tasks, proceed.
|
// OK. We are actively processing tasks, proceed.
|
||||||
// Check for final conditions if there's nothing in the queue.
|
// Check for final conditions if there's nothing in the queue.
|
||||||
if(!m_todo.size())
|
if (!m_todo.size()) {
|
||||||
{
|
if (!m_doing.size()) {
|
||||||
if(!m_doing.size())
|
if (!m_failed.size()) {
|
||||||
{
|
|
||||||
if(!m_failed.size())
|
|
||||||
{
|
|
||||||
emitSucceeded();
|
emitSucceeded();
|
||||||
}
|
} else if (m_aborted) {
|
||||||
else if(m_aborted)
|
|
||||||
{
|
|
||||||
emitAborted();
|
emitAborted();
|
||||||
}
|
} else {
|
||||||
else
|
|
||||||
{
|
|
||||||
emitFailed(tr("Job '%1' failed to process:\n%2").arg(objectName()).arg(getFailedFiles().join("\n")));
|
emitFailed(tr("Job '%1' failed to process:\n%2").arg(objectName()).arg(getFailedFiles().join("\n")));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
// There's work to do, try to start more parts.
|
|
||||||
while (m_doing.size() < 6)
|
// There's work to do, try to start more parts, to a maximum of 6 concurrent ones.
|
||||||
{
|
while (m_doing.size() < 6) {
|
||||||
if(!m_todo.size())
|
if (m_todo.size() == 0)
|
||||||
return;
|
return;
|
||||||
int doThis = m_todo.dequeue();
|
int doThis = m_todo.dequeue();
|
||||||
m_doing.insert(doThis);
|
m_doing.insert(doThis);
|
||||||
auto part = downloads[doThis];
|
|
||||||
|
auto part = m_downloads[doThis];
|
||||||
|
|
||||||
// connect signals :D
|
// connect signals :D
|
||||||
connect(part.get(), SIGNAL(succeeded(int)), SLOT(partSucceeded(int)));
|
connect(part.get(), &NetAction::succeeded, this, &NetJob::partSucceeded);
|
||||||
connect(part.get(), SIGNAL(failed(int)), SLOT(partFailed(int)));
|
connect(part.get(), &NetAction::failed, this, &NetJob::partFailed);
|
||||||
connect(part.get(), SIGNAL(aborted(int)), SLOT(partAborted(int)));
|
connect(part.get(), &NetAction::aborted, this, &NetJob::partAborted);
|
||||||
connect(part.get(), SIGNAL(netActionProgress(int, qint64, qint64)),
|
connect(part.get(), &NetAction::netActionProgress, this, &NetJob::partProgress);
|
||||||
SLOT(partProgress(int, qint64, qint64)));
|
|
||||||
part->start(m_network);
|
part->startAction(m_network);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
QStringList NetJob::getFailedFiles()
|
|
||||||
{
|
|
||||||
QStringList failed;
|
|
||||||
for (auto index: m_failed)
|
|
||||||
{
|
|
||||||
failed.push_back(downloads[index]->url().toString());
|
|
||||||
}
|
|
||||||
failed.sort();
|
|
||||||
return failed;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool NetJob::canAbort() const
|
|
||||||
{
|
|
||||||
bool canFullyAbort = true;
|
|
||||||
// can abort the waiting?
|
|
||||||
for(auto index: m_todo)
|
|
||||||
{
|
|
||||||
auto part = downloads[index];
|
|
||||||
canFullyAbort &= part->canAbort();
|
|
||||||
}
|
|
||||||
// can abort the active?
|
|
||||||
for(auto index: m_doing)
|
|
||||||
{
|
|
||||||
auto part = downloads[index];
|
|
||||||
canFullyAbort &= part->canAbort();
|
|
||||||
}
|
|
||||||
return canFullyAbort;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool NetJob::abort()
|
|
||||||
{
|
|
||||||
bool fullyAborted = true;
|
|
||||||
// fail all waiting
|
|
||||||
m_failed.unite(m_todo.toSet());
|
|
||||||
m_todo.clear();
|
|
||||||
// abort active
|
|
||||||
auto toKill = m_doing.toList();
|
|
||||||
for(auto index: toKill)
|
|
||||||
{
|
|
||||||
auto part = downloads[index];
|
|
||||||
fullyAborted &= part->abort();
|
|
||||||
}
|
|
||||||
return fullyAborted;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool NetJob::addNetAction(NetAction::Ptr action)
|
|
||||||
{
|
|
||||||
action->m_index_within_job = downloads.size();
|
|
||||||
downloads.append(action);
|
|
||||||
part_info pi;
|
|
||||||
parts_progress.append(pi);
|
|
||||||
partProgress(parts_progress.count() - 1, action->currentProgress(), action->totalProgress());
|
|
||||||
|
|
||||||
if(action->isRunning())
|
|
||||||
{
|
|
||||||
connect(action.get(), SIGNAL(succeeded(int)), SLOT(partSucceeded(int)));
|
|
||||||
connect(action.get(), SIGNAL(failed(int)), SLOT(partFailed(int)));
|
|
||||||
connect(action.get(), SIGNAL(netActionProgress(int, qint64, qint64)), SLOT(partProgress(int, qint64, qint64)));
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
m_todo.append(parts_progress.size() - 1);
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
NetJob::~NetJob() = default;
|
|
||||||
|
@ -1,88 +1,97 @@
|
|||||||
/* Copyright 2013-2021 MultiMC Contributors
|
// SPDX-License-Identifier: GPL-3.0-only
|
||||||
|
/*
|
||||||
|
* PolyMC - Minecraft Launcher
|
||||||
*
|
*
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
* This program is free software: you can redistribute it and/or modify
|
||||||
* you may not use this file except in compliance with the License.
|
* it under the terms of the GNU General Public License as published by
|
||||||
* You may obtain a copy of the License at
|
* the Free Software Foundation, version 3.
|
||||||
*
|
*
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
* This program is distributed in the hope that it will be useful,
|
||||||
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
* GNU General Public License for more details.
|
||||||
*
|
*
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
* You should have received a copy of the GNU General Public License
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
* along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
*
|
||||||
* See the License for the specific language governing permissions and
|
* This file incorporates work covered by the following copyright and
|
||||||
* limitations under the License.
|
* permission notice:
|
||||||
|
*
|
||||||
|
* Copyright 2013-2021 MultiMC Contributors
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <QtNetwork>
|
#include <QtNetwork>
|
||||||
|
|
||||||
|
#include <QObject>
|
||||||
#include "NetAction.h"
|
#include "NetAction.h"
|
||||||
#include "Download.h"
|
|
||||||
#include "HttpMetaCache.h"
|
|
||||||
#include "tasks/Task.h"
|
#include "tasks/Task.h"
|
||||||
#include "QObjectPtr.h"
|
|
||||||
|
|
||||||
class NetJob;
|
// Those are included so that they are also included by anyone using NetJob
|
||||||
|
#include "net/Download.h"
|
||||||
|
#include "net/HttpMetaCache.h"
|
||||||
|
|
||||||
class NetJob : public Task
|
class NetJob : public Task {
|
||||||
{
|
|
||||||
Q_OBJECT
|
Q_OBJECT
|
||||||
public:
|
|
||||||
|
public:
|
||||||
using Ptr = shared_qobject_ptr<NetJob>;
|
using Ptr = shared_qobject_ptr<NetJob>;
|
||||||
|
|
||||||
explicit NetJob(QString job_name, shared_qobject_ptr<QNetworkAccessManager> network) : Task(), m_network(network)
|
explicit NetJob(QString job_name, shared_qobject_ptr<QNetworkAccessManager> network) : Task(), m_network(network)
|
||||||
{
|
{
|
||||||
setObjectName(job_name);
|
setObjectName(job_name);
|
||||||
}
|
}
|
||||||
virtual ~NetJob();
|
virtual ~NetJob() = default;
|
||||||
|
|
||||||
bool addNetAction(NetAction::Ptr action);
|
void executeTask() override;
|
||||||
|
|
||||||
NetAction::Ptr operator[](int index)
|
auto canAbort() const -> bool override;
|
||||||
{
|
|
||||||
return downloads[index];
|
|
||||||
}
|
|
||||||
const NetAction::Ptr at(const int index)
|
|
||||||
{
|
|
||||||
return downloads.at(index);
|
|
||||||
}
|
|
||||||
NetAction::Ptr first()
|
|
||||||
{
|
|
||||||
if (downloads.size())
|
|
||||||
return downloads[0];
|
|
||||||
return NetAction::Ptr();
|
|
||||||
}
|
|
||||||
int size() const
|
|
||||||
{
|
|
||||||
return downloads.size();
|
|
||||||
}
|
|
||||||
QStringList getFailedFiles();
|
|
||||||
|
|
||||||
bool canAbort() const override;
|
auto addNetAction(NetAction::Ptr action) -> bool;
|
||||||
|
|
||||||
private slots:
|
auto operator[](int index) -> NetAction::Ptr { return m_downloads[index]; }
|
||||||
|
auto at(int index) -> const NetAction::Ptr { return m_downloads.at(index); }
|
||||||
|
auto size() const -> int { return m_downloads.size(); }
|
||||||
|
auto first() -> NetAction::Ptr { return m_downloads.size() != 0 ? m_downloads[0] : NetAction::Ptr{}; }
|
||||||
|
|
||||||
|
auto getFailedFiles() -> QStringList;
|
||||||
|
|
||||||
|
public slots:
|
||||||
|
// Qt can't handle auto at the start for some reason?
|
||||||
|
bool abort() override;
|
||||||
|
|
||||||
|
private slots:
|
||||||
void startMoreParts();
|
void startMoreParts();
|
||||||
|
|
||||||
public slots:
|
|
||||||
virtual void executeTask() override;
|
|
||||||
virtual bool abort() override;
|
|
||||||
|
|
||||||
private slots:
|
|
||||||
void partProgress(int index, qint64 bytesReceived, qint64 bytesTotal);
|
void partProgress(int index, qint64 bytesReceived, qint64 bytesTotal);
|
||||||
void partSucceeded(int index);
|
void partSucceeded(int index);
|
||||||
void partFailed(int index);
|
void partFailed(int index);
|
||||||
void partAborted(int index);
|
void partAborted(int index);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
shared_qobject_ptr<QNetworkAccessManager> m_network;
|
shared_qobject_ptr<QNetworkAccessManager> m_network;
|
||||||
|
|
||||||
struct part_info
|
struct part_info {
|
||||||
{
|
|
||||||
qint64 current_progress = 0;
|
qint64 current_progress = 0;
|
||||||
qint64 total_progress = 1;
|
qint64 total_progress = 1;
|
||||||
int failures = 0;
|
int failures = 0;
|
||||||
};
|
};
|
||||||
QList<NetAction::Ptr> downloads;
|
|
||||||
QList<part_info> parts_progress;
|
QList<NetAction::Ptr> m_downloads;
|
||||||
|
QList<part_info> m_parts_progress;
|
||||||
QQueue<int> m_todo;
|
QQueue<int> m_todo;
|
||||||
QSet<int> m_doing;
|
QSet<int> m_doing;
|
||||||
QSet<int> m_done;
|
QSet<int> m_done;
|
||||||
|
@ -5,33 +5,30 @@
|
|||||||
#include "Validator.h"
|
#include "Validator.h"
|
||||||
|
|
||||||
namespace Net {
|
namespace Net {
|
||||||
class Sink
|
class Sink {
|
||||||
{
|
public:
|
||||||
public: /* con/des */
|
Sink() = default;
|
||||||
Sink() {};
|
virtual ~Sink(){};
|
||||||
virtual ~Sink() {};
|
|
||||||
|
|
||||||
public: /* methods */
|
public:
|
||||||
virtual JobStatus init(QNetworkRequest & request) = 0;
|
virtual Task::State init(QNetworkRequest& request) = 0;
|
||||||
virtual JobStatus write(QByteArray & data) = 0;
|
virtual Task::State write(QByteArray& data) = 0;
|
||||||
virtual JobStatus abort() = 0;
|
virtual Task::State abort() = 0;
|
||||||
virtual JobStatus finalize(QNetworkReply & reply) = 0;
|
virtual Task::State finalize(QNetworkReply& reply) = 0;
|
||||||
virtual bool hasLocalData() = 0;
|
virtual bool hasLocalData() = 0;
|
||||||
|
|
||||||
void addValidator(Validator * validator)
|
void addValidator(Validator* validator)
|
||||||
{
|
{
|
||||||
if(validator)
|
if (validator) {
|
||||||
{
|
|
||||||
validators.push_back(std::shared_ptr<Validator>(validator));
|
validators.push_back(std::shared_ptr<Validator>(validator));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
protected: /* methods */
|
protected: /* methods */
|
||||||
bool finalizeAllValidators(QNetworkReply & reply)
|
bool finalizeAllValidators(QNetworkReply& reply)
|
||||||
{
|
{
|
||||||
for(auto & validator: validators)
|
for (auto& validator : validators) {
|
||||||
{
|
if (!validator->validate(reply))
|
||||||
if(!validator->validate(reply))
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
@ -39,32 +36,29 @@ protected: /* methods */
|
|||||||
bool failAllValidators()
|
bool failAllValidators()
|
||||||
{
|
{
|
||||||
bool success = true;
|
bool success = true;
|
||||||
for(auto & validator: validators)
|
for (auto& validator : validators) {
|
||||||
{
|
|
||||||
success &= validator->abort();
|
success &= validator->abort();
|
||||||
}
|
}
|
||||||
return success;
|
return success;
|
||||||
}
|
}
|
||||||
bool initAllValidators(QNetworkRequest & request)
|
bool initAllValidators(QNetworkRequest& request)
|
||||||
{
|
{
|
||||||
for(auto & validator: validators)
|
for (auto& validator : validators) {
|
||||||
{
|
if (!validator->init(request))
|
||||||
if(!validator->init(request))
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
bool writeAllValidators(QByteArray & data)
|
bool writeAllValidators(QByteArray& data)
|
||||||
{
|
{
|
||||||
for(auto & validator: validators)
|
for (auto& validator : validators) {
|
||||||
{
|
if (!validator->write(data))
|
||||||
if(!validator->write(data))
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
protected: /* data */
|
protected: /* data */
|
||||||
std::vector<std::shared_ptr<Validator>> validators;
|
std::vector<std::shared_ptr<Validator>> validators;
|
||||||
};
|
};
|
||||||
}
|
} // namespace Net
|
||||||
|
@ -13,12 +13,12 @@
|
|||||||
ImgurAlbumCreation::ImgurAlbumCreation(QList<ScreenShot::Ptr> screenshots) : NetAction(), m_screenshots(screenshots)
|
ImgurAlbumCreation::ImgurAlbumCreation(QList<ScreenShot::Ptr> screenshots) : NetAction(), m_screenshots(screenshots)
|
||||||
{
|
{
|
||||||
m_url = BuildConfig.IMGUR_BASE_URL + "album.json";
|
m_url = BuildConfig.IMGUR_BASE_URL + "album.json";
|
||||||
m_status = Job_NotStarted;
|
m_state = State::Inactive;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ImgurAlbumCreation::startImpl()
|
void ImgurAlbumCreation::executeTask()
|
||||||
{
|
{
|
||||||
m_status = Job_InProgress;
|
m_state = State::Running;
|
||||||
QNetworkRequest request(m_url);
|
QNetworkRequest request(m_url);
|
||||||
request.setHeader(QNetworkRequest::UserAgentHeader, BuildConfig.USER_AGENT_UNCACHED);
|
request.setHeader(QNetworkRequest::UserAgentHeader, BuildConfig.USER_AGENT_UNCACHED);
|
||||||
request.setHeader(QNetworkRequest::ContentTypeHeader, "application/x-www-form-urlencoded");
|
request.setHeader(QNetworkRequest::ContentTypeHeader, "application/x-www-form-urlencoded");
|
||||||
@ -43,11 +43,11 @@ void ImgurAlbumCreation::startImpl()
|
|||||||
void ImgurAlbumCreation::downloadError(QNetworkReply::NetworkError error)
|
void ImgurAlbumCreation::downloadError(QNetworkReply::NetworkError error)
|
||||||
{
|
{
|
||||||
qDebug() << m_reply->errorString();
|
qDebug() << m_reply->errorString();
|
||||||
m_status = Job_Failed;
|
m_state = State::Failed;
|
||||||
}
|
}
|
||||||
void ImgurAlbumCreation::downloadFinished()
|
void ImgurAlbumCreation::downloadFinished()
|
||||||
{
|
{
|
||||||
if (m_status != Job_Failed)
|
if (m_state != State::Failed)
|
||||||
{
|
{
|
||||||
QByteArray data = m_reply->readAll();
|
QByteArray data = m_reply->readAll();
|
||||||
m_reply.reset();
|
m_reply.reset();
|
||||||
@ -68,7 +68,7 @@ void ImgurAlbumCreation::downloadFinished()
|
|||||||
}
|
}
|
||||||
m_deleteHash = object.value("data").toObject().value("deletehash").toString();
|
m_deleteHash = object.value("data").toObject().value("deletehash").toString();
|
||||||
m_id = object.value("data").toObject().value("id").toString();
|
m_id = object.value("data").toObject().value("id").toString();
|
||||||
m_status = Job_Finished;
|
m_state = State::Succeeded;
|
||||||
emit succeeded(m_index_within_job);
|
emit succeeded(m_index_within_job);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -82,7 +82,6 @@ void ImgurAlbumCreation::downloadFinished()
|
|||||||
}
|
}
|
||||||
void ImgurAlbumCreation::downloadProgress(qint64 bytesReceived, qint64 bytesTotal)
|
void ImgurAlbumCreation::downloadProgress(qint64 bytesReceived, qint64 bytesTotal)
|
||||||
{
|
{
|
||||||
m_total_progress = bytesTotal;
|
setProgress(bytesReceived, bytesTotal);
|
||||||
m_progress = bytesReceived;
|
|
||||||
emit netActionProgress(m_index_within_job, bytesReceived, bytesTotal);
|
emit netActionProgress(m_index_within_job, bytesReceived, bytesTotal);
|
||||||
}
|
}
|
||||||
|
@ -24,16 +24,14 @@ public:
|
|||||||
|
|
||||||
protected
|
protected
|
||||||
slots:
|
slots:
|
||||||
virtual void downloadProgress(qint64 bytesReceived, qint64 bytesTotal);
|
void downloadProgress(qint64 bytesReceived, qint64 bytesTotal) override;
|
||||||
virtual void downloadError(QNetworkReply::NetworkError error);
|
void downloadError(QNetworkReply::NetworkError error) override;
|
||||||
virtual void downloadFinished();
|
void downloadFinished() override;
|
||||||
virtual void downloadReadyRead()
|
void downloadReadyRead() override {}
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
public
|
public
|
||||||
slots:
|
slots:
|
||||||
virtual void startImpl();
|
void executeTask() override;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
QList<ScreenShot::Ptr> m_screenshots;
|
QList<ScreenShot::Ptr> m_screenshots;
|
||||||
|
@ -13,13 +13,13 @@
|
|||||||
ImgurUpload::ImgurUpload(ScreenShot::Ptr shot) : NetAction(), m_shot(shot)
|
ImgurUpload::ImgurUpload(ScreenShot::Ptr shot) : NetAction(), m_shot(shot)
|
||||||
{
|
{
|
||||||
m_url = BuildConfig.IMGUR_BASE_URL + "upload.json";
|
m_url = BuildConfig.IMGUR_BASE_URL + "upload.json";
|
||||||
m_status = Job_NotStarted;
|
m_state = State::Inactive;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ImgurUpload::startImpl()
|
void ImgurUpload::executeTask()
|
||||||
{
|
{
|
||||||
finished = false;
|
finished = false;
|
||||||
m_status = Job_InProgress;
|
m_state = Task::State::Running;
|
||||||
QNetworkRequest request(m_url);
|
QNetworkRequest request(m_url);
|
||||||
request.setHeader(QNetworkRequest::UserAgentHeader, BuildConfig.USER_AGENT_UNCACHED);
|
request.setHeader(QNetworkRequest::UserAgentHeader, BuildConfig.USER_AGENT_UNCACHED);
|
||||||
request.setRawHeader("Authorization", QString("Client-ID %1").arg(BuildConfig.IMGUR_CLIENT_ID).toStdString().c_str());
|
request.setRawHeader("Authorization", QString("Client-ID %1").arg(BuildConfig.IMGUR_CLIENT_ID).toStdString().c_str());
|
||||||
@ -63,7 +63,7 @@ void ImgurUpload::downloadError(QNetworkReply::NetworkError error)
|
|||||||
qCritical() << "Double finished ImgurUpload!";
|
qCritical() << "Double finished ImgurUpload!";
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
m_status = Job_Failed;
|
m_state = Task::State::Failed;
|
||||||
finished = true;
|
finished = true;
|
||||||
m_reply.reset();
|
m_reply.reset();
|
||||||
emit failed(m_index_within_job);
|
emit failed(m_index_within_job);
|
||||||
@ -99,14 +99,13 @@ void ImgurUpload::downloadFinished()
|
|||||||
m_shot->m_imgurId = object.value("data").toObject().value("id").toString();
|
m_shot->m_imgurId = object.value("data").toObject().value("id").toString();
|
||||||
m_shot->m_url = object.value("data").toObject().value("link").toString();
|
m_shot->m_url = object.value("data").toObject().value("link").toString();
|
||||||
m_shot->m_imgurDeleteHash = object.value("data").toObject().value("deletehash").toString();
|
m_shot->m_imgurDeleteHash = object.value("data").toObject().value("deletehash").toString();
|
||||||
m_status = Job_Finished;
|
m_state = Task::State::Succeeded;
|
||||||
finished = true;
|
finished = true;
|
||||||
emit succeeded(m_index_within_job);
|
emit succeeded(m_index_within_job);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
void ImgurUpload::downloadProgress(qint64 bytesReceived, qint64 bytesTotal)
|
void ImgurUpload::downloadProgress(qint64 bytesReceived, qint64 bytesTotal)
|
||||||
{
|
{
|
||||||
m_total_progress = bytesTotal;
|
setProgress(bytesReceived, bytesTotal);
|
||||||
m_progress = bytesReceived;
|
|
||||||
emit netActionProgress(m_index_within_job, bytesReceived, bytesTotal);
|
emit netActionProgress(m_index_within_job, bytesReceived, bytesTotal);
|
||||||
}
|
}
|
||||||
|
@ -21,7 +21,7 @@ slots:
|
|||||||
|
|
||||||
public
|
public
|
||||||
slots:
|
slots:
|
||||||
void startImpl() override;
|
void executeTask() override;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
ScreenShot::Ptr m_shot;
|
ScreenShot::Ptr m_shot;
|
||||||
|
@ -52,6 +52,8 @@ class Task : public QObject {
|
|||||||
|
|
||||||
virtual bool canAbort() const { return false; }
|
virtual bool canAbort() const { return false; }
|
||||||
|
|
||||||
|
auto getState() const -> State { return m_state; }
|
||||||
|
|
||||||
QString getStatus() { return m_status; }
|
QString getStatus() { return m_status; }
|
||||||
virtual auto getStepStatus() const -> QString { return m_status; }
|
virtual auto getStepStatus() const -> QString { return m_status; }
|
||||||
|
|
||||||
@ -90,7 +92,7 @@ class Task : public QObject {
|
|||||||
void setStatus(const QString& status);
|
void setStatus(const QString& status);
|
||||||
void setProgress(qint64 current, qint64 total);
|
void setProgress(qint64 current, qint64 total);
|
||||||
|
|
||||||
private:
|
protected:
|
||||||
State m_state = State::Inactive;
|
State m_state = State::Inactive;
|
||||||
QStringList m_Warnings;
|
QStringList m_Warnings;
|
||||||
QString m_failReason = "";
|
QString m_failReason = "";
|
||||||
|
@ -667,7 +667,7 @@ void TranslationsModel::downloadTranslation(QString key)
|
|||||||
auto dl = Net::Download::makeCached(QUrl(BuildConfig.TRANSLATIONS_BASE_URL + lang->file_name), entry);
|
auto dl = Net::Download::makeCached(QUrl(BuildConfig.TRANSLATIONS_BASE_URL + lang->file_name), entry);
|
||||||
auto rawHash = QByteArray::fromHex(lang->file_sha1.toLatin1());
|
auto rawHash = QByteArray::fromHex(lang->file_sha1.toLatin1());
|
||||||
dl->addValidator(new Net::ChecksumValidator(QCryptographicHash::Sha1, rawHash));
|
dl->addValidator(new Net::ChecksumValidator(QCryptographicHash::Sha1, rawHash));
|
||||||
dl->m_total_progress = lang->file_size;
|
dl->setProgress(dl->getProgress(), lang->file_size);
|
||||||
|
|
||||||
d->m_dl_job = new NetJob("Translation for " + key, APPLICATION->network());
|
d->m_dl_job = new NetJob("Translation for " + key, APPLICATION->network());
|
||||||
d->m_dl_job->addNetAction(dl);
|
d->m_dl_job->addNetAction(dl);
|
||||||
|
Loading…
Reference in New Issue
Block a user