Fixes 4344 - Duplicate Uploads (#4442)

* Fixes 4344
- Update the retention policy of the Work Manager to ExistingWorkPolicy.APPEND_OR_REPLACE- which would append the new work to the end of existing one. This helps remove the while loop in UploadWorker which was meant to handle the cases where a new worker would be created for retries. The while loop seemed to have race conditions uploading duplicate entries.

* Update states to IN_PROGRESS before uploads are processed
This commit is contained in:
Ashish 2021-06-14 17:27:15 +05:30 committed by GitHub
parent 09cacde670
commit 10ed6678b3
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
3 changed files with 59 additions and 56 deletions

View file

@ -318,7 +318,7 @@ public class MainActivity extends BaseActivity
} else {
WorkManager.getInstance(getApplicationContext()).enqueueUniqueWork(
UploadWorker.class.getSimpleName(),
ExistingWorkPolicy.KEEP, OneTimeWorkRequest.from(UploadWorker.class));
ExistingWorkPolicy.APPEND_OR_REPLACE, OneTimeWorkRequest.from(UploadWorker.class));
viewUtilWrapper
.showShortToast(getBaseContext(), getString(R.string.limited_connection_disabled));

View file

@ -289,7 +289,7 @@ public class UploadActivity extends BaseActivity implements UploadContract.View,
public void makeUploadRequest() {
WorkManager.getInstance(getApplicationContext()).enqueueUniqueWork(
UploadWorker.class.getSimpleName(),
ExistingWorkPolicy.KEEP, OneTimeWorkRequest.from(UploadWorker.class));
ExistingWorkPolicy.APPEND_OR_REPLACE, OneTimeWorkRequest.from(UploadWorker.class));
}
@Override

View file

@ -7,6 +7,7 @@ import androidx.core.app.NotificationCompat
import androidx.core.app.NotificationManagerCompat
import androidx.work.CoroutineWorker
import androidx.work.WorkerParameters
import com.google.gson.Gson
import com.mapbox.mapboxsdk.plugins.localization.BuildConfig
import dagger.android.ContributesAndroidInjector
import fr.free.nrw.commons.CommonsApplication
@ -146,61 +147,60 @@ class UploadWorker(var appContext: Context, workerParams: WorkerParameters) :
CommonsApplication.NOTIFICATION_CHANNEL_ID_ALL
)!!
withContext(Dispatchers.IO) {
//Doing this so that retry requests do not create new work requests and while a work is
// already running, all the requests should go through this, so kind of a queue
while (contributionDao.getContribution(statesToProcess)
.blockingGet().isNotEmpty()
) {
val queuedContributions = contributionDao.getContribution(statesToProcess)
.blockingGet()
//Showing initial notification for the number of uploads being processed
val queuedContributions = contributionDao.getContribution(statesToProcess)
.blockingGet()
//Showing initial notification for the number of uploads being processed
processingUploads.setContentTitle(appContext.getString(R.string.starting_uploads))
processingUploads.setContentText(
appContext.resources.getQuantityString(
R.plurals.starting_multiple_uploads,
queuedContributions.size,
queuedContributions.size
)
)
notificationManager?.notify(
PROCESSING_UPLOADS_NOTIFICATION_TAG,
PROCESSING_UPLOADS_NOTIFICATION_ID,
processingUploads.build()
Timber.e("Queued Contributions: "+ queuedContributions.size)
processingUploads.setContentTitle(appContext.getString(R.string.starting_uploads))
processingUploads.setContentText(
appContext.resources.getQuantityString(
R.plurals.starting_multiple_uploads,
queuedContributions.size,
queuedContributions.size
)
)
notificationManager?.notify(
PROCESSING_UPLOADS_NOTIFICATION_TAG,
PROCESSING_UPLOADS_NOTIFICATION_ID,
processingUploads.build()
)
queuedContributions.asFlow().map { contribution ->
/**
* If the limited connection mode is on, lets iterate through the queued
* contributions
* and set the state as STATE_QUEUED_LIMITED_CONNECTION_MODE ,
* otherwise proceed with the upload
*/
if(isLimitedConnectionModeEnabled()){
if (contribution.state == Contribution.STATE_QUEUED) {
contribution.state = Contribution.STATE_QUEUED_LIMITED_CONNECTION_MODE
contributionDao.save(contribution)
}
} else {
contribution.transferred = 0
contribution.state = Contribution.STATE_IN_PROGRESS
contributionDao.save(contribution)
uploadContribution(contribution = contribution)
}
}.collect()
//Dismiss the global notification
notificationManager?.cancel(
PROCESSING_UPLOADS_NOTIFICATION_TAG,
PROCESSING_UPLOADS_NOTIFICATION_ID
)
//No need to keep looking if the limited connection mode is on,
//If the user toggles it, the work manager will be started again
if(isLimitedConnectionModeEnabled()){
break;
}
/**
* To avoid race condition when multiple of these workers are working, assign this state
so that the next one does not process these contribution again
*/
queuedContributions.forEach {
it.state=Contribution.STATE_IN_PROGRESS
contributionDao.saveSynchronous(it)
}
queuedContributions.asFlow().map { contribution ->
/**
* If the limited connection mode is on, lets iterate through the queued
* contributions
* and set the state as STATE_QUEUED_LIMITED_CONNECTION_MODE ,
* otherwise proceed with the upload
*/
if (isLimitedConnectionModeEnabled()) {
if (contribution.state == Contribution.STATE_QUEUED) {
contribution.state = Contribution.STATE_QUEUED_LIMITED_CONNECTION_MODE
contributionDao.saveSynchronous(contribution)
}
} else {
contribution.transferred = 0
contribution.state = Contribution.STATE_IN_PROGRESS
contributionDao.saveSynchronous(contribution)
uploadContribution(contribution = contribution)
}
}.collect()
//Dismiss the global notification
notificationManager?.cancel(
PROCESSING_UPLOADS_NOTIFICATION_TAG,
PROCESSING_UPLOADS_NOTIFICATION_ID
)
}
//TODO make this smart, think of handling retries in the future
return Result.success()
@ -307,6 +307,7 @@ class UploadWorker(var appContext: Context, workerParams: WorkerParameters) :
Timber.e(exception)
Timber.e("Upload from stash failed for contribution : $filename")
showFailedNotification(contribution)
contribution.state=Contribution.STATE_FAILED
if (STASH_ERROR_CODES.contains(exception.message)) {
clearChunks(contribution)
}
@ -315,26 +316,28 @@ class UploadWorker(var appContext: Context, workerParams: WorkerParameters) :
StashUploadState.PAUSED -> {
showPausedNotification(contribution)
contribution.state = Contribution.STATE_PAUSED
contributionDao.save(contribution).blockingGet()
contributionDao.saveSynchronous(contribution)
}
else -> {
Timber.e("""upload file to stash failed with status: ${stashUploadResult.state}""")
showFailedNotification(contribution)
contribution.state = Contribution.STATE_FAILED
contribution.chunkInfo = null
contributionDao.save(contribution).blockingAwait()
contributionDao.saveSynchronous(contribution)
}
}
}catch (exception: Exception){
Timber.e(exception)
Timber.e("Stash upload failed for contribution: $filename")
showFailedNotification(contribution)
contribution.state=Contribution.STATE_FAILED
clearChunks(contribution)
}
}
private fun clearChunks(contribution: Contribution) {
contribution.chunkInfo=null
contributionDao.save(contribution).blockingAwait()
contributionDao.saveSynchronous(contribution)
}
/**