mirror of
https://github.com/commons-app/apps-android-commons.git
synced 2025-10-26 20:33:53 +01:00
Implemented flag for workers
This commit is contained in:
parent
5b57c25110
commit
7255c994a6
13 changed files with 153 additions and 204 deletions
|
|
@ -142,11 +142,6 @@ public class CommonsApplication extends MultiDexApplication {
|
|||
@Inject
|
||||
ContributionDao contributionDao;
|
||||
|
||||
/**
|
||||
* In-memory list of contributions whose uploads have been paused by the user
|
||||
*/
|
||||
public static Map<String, Boolean> pauseUploads = new HashMap<>();
|
||||
|
||||
/**
|
||||
* Used to declare and initialize various components and dependencies
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -101,7 +101,6 @@ data class Contribution constructor(
|
|||
const val STATE_QUEUED = 2
|
||||
const val STATE_IN_PROGRESS = 3
|
||||
const val STATE_PAUSED = 4
|
||||
const val STATE_QUEUED_LIMITED_CONNECTION_MODE=5
|
||||
|
||||
/**
|
||||
* Formatting captions to the Wikibase format for sending labels
|
||||
|
|
@ -129,18 +128,6 @@ data class Contribution constructor(
|
|||
return chunkInfo != null && chunkInfo!!.totalChunks == chunkInfo!!.indexOfNextChunkToUpload
|
||||
}
|
||||
|
||||
fun isPaused(): Boolean {
|
||||
return CommonsApplication.pauseUploads[pageId] ?: false
|
||||
}
|
||||
|
||||
fun unpause() {
|
||||
CommonsApplication.pauseUploads[pageId] = false
|
||||
}
|
||||
|
||||
fun dateModifiedInMillis(): Long {
|
||||
return dateModified!!.time
|
||||
}
|
||||
|
||||
fun dateUploadStartedInMillis(): Long {
|
||||
return dateUploadStarted!!.time
|
||||
}
|
||||
|
|
|
|||
|
|
@ -13,6 +13,7 @@ import io.reactivex.Completable;
|
|||
import io.reactivex.Single;
|
||||
import java.util.Calendar;
|
||||
import java.util.List;
|
||||
import timber.log.Timber;
|
||||
|
||||
@Dao
|
||||
public abstract class ContributionDao {
|
||||
|
|
@ -86,6 +87,9 @@ public abstract class ContributionDao {
|
|||
@Update
|
||||
public abstract void updateSynchronous(Contribution contribution);
|
||||
|
||||
@Query("UPDATE contribution SET state = :newState WHERE state IN (:states)")
|
||||
public abstract void updateContributionsState(List<Integer> states, int newState);
|
||||
|
||||
public Completable update(final Contribution contribution) {
|
||||
return Completable
|
||||
.fromAction(() -> {
|
||||
|
|
@ -93,4 +97,11 @@ public abstract class ContributionDao {
|
|||
updateSynchronous(contribution);
|
||||
});
|
||||
}
|
||||
|
||||
public Completable updateContributionsWithStates(List<Integer> states, int newState) {
|
||||
return Completable
|
||||
.fromAction(() -> {
|
||||
updateContributionsState(states, newState);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -809,8 +809,7 @@ public class ContributionsFragment
|
|||
*/
|
||||
public void retryUpload(Contribution contribution) {
|
||||
if (NetworkUtils.isInternetConnectionEstablished(getContext())) {
|
||||
if (contribution.getState() == STATE_PAUSED
|
||||
|| contribution.getState() == Contribution.STATE_QUEUED_LIMITED_CONNECTION_MODE) {
|
||||
if (contribution.getState() == STATE_PAUSED) {
|
||||
restartUpload(contribution);
|
||||
} else if (contribution.getState() == STATE_FAILED) {
|
||||
int retries = contribution.getRetries();
|
||||
|
|
|
|||
|
|
@ -105,4 +105,8 @@ class ContributionsLocalDataSource {
|
|||
public Completable updateContribution(final Contribution contribution) {
|
||||
return contributionDao.update(contribution);
|
||||
}
|
||||
|
||||
public Completable updateContributionsWithStates(List<Integer> states, int newState) {
|
||||
return contributionDao.updateContributionsWithStates(states, newState);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -76,4 +76,8 @@ public class ContributionsRepository {
|
|||
public Completable updateContribution(Contribution contribution) {
|
||||
return localDataSource.updateContribution(contribution);
|
||||
}
|
||||
|
||||
public Completable updateContributionWithStates(List<Integer> states, int newState) {
|
||||
return localDataSource.updateContributionsWithStates(states, newState);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -197,8 +197,7 @@ class FailedUploadsFragment : CommonsDaggerSupportFragment(), PendingUploadsCont
|
|||
ViewUtil.showShortToast(context, R.string.cancelling_upload)
|
||||
uploadProgressActivity.hidePendingIcons()
|
||||
pendingUploadsPresenter.deleteUploads(
|
||||
listOf(Contribution.STATE_FAILED),
|
||||
this.requireContext().applicationContext
|
||||
listOf(Contribution.STATE_FAILED)
|
||||
)
|
||||
},
|
||||
{}
|
||||
|
|
|
|||
|
|
@ -42,14 +42,6 @@ class PendingUploadsFragment : CommonsDaggerSupportFragment(), PendingUploadsCon
|
|||
@Inject
|
||||
lateinit var pendingUploadsPresenter: PendingUploadsPresenter
|
||||
|
||||
@Inject
|
||||
lateinit var mediaClient: MediaClient
|
||||
|
||||
@Inject
|
||||
lateinit var sessionManager: SessionManager
|
||||
|
||||
private var userName: String? = null
|
||||
|
||||
private lateinit var binding: FragmentPendingUploadsBinding
|
||||
|
||||
private lateinit var uploadProgressActivity: UploadProgressActivity
|
||||
|
|
@ -65,16 +57,6 @@ class PendingUploadsFragment : CommonsDaggerSupportFragment(), PendingUploadsCon
|
|||
param1 = it.getString(ARG_PARAM1)
|
||||
param2 = it.getString(ARG_PARAM2)
|
||||
}
|
||||
|
||||
//Now that we are allowing this fragment to be started for
|
||||
// any userName- we expect it to be passed as an argument
|
||||
if (arguments != null) {
|
||||
userName = requireArguments().getString(ProfileActivity.KEY_USERNAME)
|
||||
}
|
||||
|
||||
if (StringUtils.isEmpty(userName)) {
|
||||
userName = sessionManager!!.getUserName()
|
||||
}
|
||||
}
|
||||
|
||||
override fun onAttach(context: Context) {
|
||||
|
|
@ -193,39 +175,39 @@ class PendingUploadsFragment : CommonsDaggerSupportFragment(), PendingUploadsCon
|
|||
}
|
||||
|
||||
fun pauseUploads() {
|
||||
if (contributionsList != null) {
|
||||
pendingUploadsPresenter.pauseUploads(
|
||||
contributionsList,
|
||||
0,
|
||||
this.requireContext().applicationContext
|
||||
)
|
||||
}
|
||||
pendingUploadsPresenter.pauseUploads(
|
||||
listOf(Contribution.STATE_QUEUED, Contribution.STATE_IN_PROGRESS),
|
||||
Contribution.STATE_PAUSED
|
||||
)
|
||||
|
||||
}
|
||||
|
||||
fun deleteUploads() {
|
||||
if (contributionsList != null) {
|
||||
showAlertDialog(
|
||||
requireActivity(),
|
||||
String.format(
|
||||
Locale.getDefault(),
|
||||
getString(R.string.cancelling_all_the_uploads)
|
||||
),
|
||||
String.format(
|
||||
Locale.getDefault(),
|
||||
getString(R.string.are_you_sure_that_you_want_cancel_all_the_uploads)
|
||||
),
|
||||
String.format(Locale.getDefault(), getString(R.string.yes)),
|
||||
String.format(Locale.getDefault(), getString(R.string.no)),
|
||||
{
|
||||
ViewUtil.showShortToast(context, R.string.cancelling_upload)
|
||||
uploadProgressActivity.hidePendingIcons()
|
||||
pendingUploadsPresenter.deleteUploads(
|
||||
listOf(Contribution.STATE_QUEUED, Contribution.STATE_IN_PROGRESS, Contribution.STATE_PAUSED),
|
||||
this.requireContext().applicationContext
|
||||
showAlertDialog(
|
||||
requireActivity(),
|
||||
String.format(
|
||||
Locale.getDefault(),
|
||||
getString(R.string.cancelling_all_the_uploads)
|
||||
),
|
||||
String.format(
|
||||
Locale.getDefault(),
|
||||
getString(R.string.are_you_sure_that_you_want_cancel_all_the_uploads)
|
||||
),
|
||||
String.format(Locale.getDefault(), getString(R.string.yes)),
|
||||
String.format(Locale.getDefault(), getString(R.string.no)),
|
||||
{
|
||||
ViewUtil.showShortToast(context, R.string.cancelling_upload)
|
||||
uploadProgressActivity.hidePendingIcons()
|
||||
pendingUploadsPresenter.deleteUploads(
|
||||
listOf(
|
||||
Contribution.STATE_QUEUED,
|
||||
Contribution.STATE_IN_PROGRESS,
|
||||
Contribution.STATE_PAUSED
|
||||
)
|
||||
},
|
||||
{}
|
||||
)
|
||||
}
|
||||
)
|
||||
},
|
||||
{}
|
||||
)
|
||||
|
||||
}
|
||||
}
|
||||
|
|
@ -101,49 +101,26 @@ public class PendingUploadsPresenter implements UserActionListener {
|
|||
contributionBoundaryCallback.dispose();
|
||||
}
|
||||
|
||||
/**
|
||||
* Delete a failed contribution from the local db
|
||||
*/
|
||||
@Override
|
||||
public void deleteUpload(final Contribution contribution, Context context) {
|
||||
compositeDisposable.add(repository
|
||||
.deleteContributionFromDB(contribution)
|
||||
.subscribeOn(ioThreadScheduler)
|
||||
.subscribe(() ->
|
||||
WorkRequestHelper.Companion.makeOneTimeWorkRequest(
|
||||
context, ExistingWorkPolicy.KEEP)
|
||||
));
|
||||
.subscribe());
|
||||
}
|
||||
|
||||
public void pauseUploads(List<Contribution> contributionList, int index, Context context) {
|
||||
if (index >= contributionList.size()) {
|
||||
return;
|
||||
}
|
||||
Contribution it = contributionList.get(index);
|
||||
CommonsApplication.pauseUploads.put(it.getPageId().toString(), true);
|
||||
//Retain the paused state in DB
|
||||
it.setState(Contribution.STATE_PAUSED);
|
||||
public void pauseUploads(List<Integer> states, int newState) {
|
||||
compositeDisposable.add(repository
|
||||
.save(it)
|
||||
.updateContributionWithStates(states, newState)
|
||||
.subscribeOn(ioThreadScheduler)
|
||||
.doOnComplete(() -> {
|
||||
pauseUploads(contributionList, index + 1, context);
|
||||
}
|
||||
)
|
||||
.subscribe(() ->
|
||||
WorkRequestHelper.Companion.makeOneTimeWorkRequest(
|
||||
context, ExistingWorkPolicy.KEEP)
|
||||
));
|
||||
.subscribe());
|
||||
}
|
||||
|
||||
public void deleteUploads(List<Integer> states, Context context) {
|
||||
public void deleteUploads(List<Integer> states) {
|
||||
compositeDisposable.add(repository
|
||||
.deleteContributionsFromDBWithStates(states)
|
||||
.subscribeOn(ioThreadScheduler)
|
||||
.subscribe(() ->
|
||||
WorkRequestHelper.Companion.makeOneTimeWorkRequest(
|
||||
context, ExistingWorkPolicy.KEEP)
|
||||
));
|
||||
.subscribe());
|
||||
}
|
||||
|
||||
public void restartUploads(List<Contribution> contributionList, int index, Context context) {
|
||||
|
|
@ -163,7 +140,6 @@ public class PendingUploadsPresenter implements UserActionListener {
|
|||
.save(it)
|
||||
.subscribeOn(ioThreadScheduler)
|
||||
.doOnComplete(() -> {
|
||||
CommonsApplication.pauseUploads.put(it.getPageId().toString(), false);
|
||||
restartUploads(contributionList, index + 1, context);
|
||||
}
|
||||
)
|
||||
|
|
@ -202,18 +178,4 @@ public class PendingUploadsPresenter implements UserActionListener {
|
|||
));
|
||||
}
|
||||
|
||||
/**
|
||||
* Update the contribution's state in the databse, upon completion, trigger the workmanager to
|
||||
* process this contribution
|
||||
*
|
||||
* @param contribution
|
||||
*/
|
||||
public void saveContribution(Contribution contribution, Context context) {
|
||||
compositeDisposable.add(repository
|
||||
.save(contribution)
|
||||
.subscribeOn(ioThreadScheduler)
|
||||
.subscribe(() -> WorkRequestHelper.Companion.makeOneTimeWorkRequest(
|
||||
context, ExistingWorkPolicy.KEEP)));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -79,7 +79,7 @@ class UploadClient @Inject constructor(
|
|||
val errorMessage = AtomicReference<String>()
|
||||
compositeDisposable.add(
|
||||
Observable.fromIterable(fileChunks).forEach { chunkFile: File ->
|
||||
if (canProcess(contribution, failures)) {
|
||||
if (canProcess(contributionDao, contribution, failures)) {
|
||||
if (contributionDao.getContribution(contribution.pageId) == null) {
|
||||
compositeDisposable.clear()
|
||||
return@forEach
|
||||
|
|
@ -106,8 +106,8 @@ class UploadClient @Inject constructor(
|
|||
contributionDao.getContribution(contribution.pageId) == null -> {
|
||||
return Observable.just(StashUploadResult(StashUploadState.CANCELLED, null, "Upload cancelled"))
|
||||
}
|
||||
contribution.isPaused() -> {
|
||||
Timber.d("Upload stash paused %s", contribution.pageId)
|
||||
contributionDao.getContribution(contribution.pageId).state == Contribution.STATE_PAUSED -> {
|
||||
Timber.tag("PRINT").d("Upload stash paused %s", contribution.pageId)
|
||||
Observable.just(StashUploadResult(StashUploadState.PAUSED, null, null))
|
||||
}
|
||||
failures.get() -> {
|
||||
|
|
@ -265,10 +265,16 @@ class UploadClient @Inject constructor(
|
|||
}
|
||||
}
|
||||
|
||||
private fun canProcess(contribution: Contribution, failures: AtomicBoolean): Boolean {
|
||||
private fun canProcess(
|
||||
contributionDao: ContributionDao,
|
||||
contribution: Contribution,
|
||||
failures: AtomicBoolean
|
||||
): Boolean {
|
||||
// As long as the contribution hasn't been paused and there are no errors,
|
||||
// we can process the current chunk.
|
||||
return !(contribution.isPaused() || failures.get())
|
||||
Timber.tag("PRINT").e("oyee" + contributionDao.getContribution(contribution.pageId).state)
|
||||
return !(contributionDao.getContribution(contribution.pageId).state == Contribution.STATE_PAUSED
|
||||
|| failures.get())
|
||||
}
|
||||
|
||||
private fun shouldSkip(
|
||||
|
|
|
|||
|
|
@ -109,7 +109,6 @@ class UploadWorker(var appContext: Context, workerParams: WorkerParameters) :
|
|||
getNotificationBuilder(CommonsApplication.NOTIFICATION_CHANNEL_ID_ALL)!!
|
||||
|
||||
statesToProcess.add(Contribution.STATE_QUEUED)
|
||||
statesToProcess.add(Contribution.STATE_QUEUED_LIMITED_CONNECTION_MODE)
|
||||
}
|
||||
|
||||
@dagger.Module
|
||||
|
|
@ -169,85 +168,81 @@ class UploadWorker(var appContext: Context, workerParams: WorkerParameters) :
|
|||
}
|
||||
|
||||
override suspend fun doWork(): Result {
|
||||
var countUpload = 0
|
||||
// Start a foreground service
|
||||
setForeground(createForegroundInfo())
|
||||
notificationManager = NotificationManagerCompat.from(appContext)
|
||||
val processingUploads = getNotificationBuilder(
|
||||
CommonsApplication.NOTIFICATION_CHANNEL_ID_ALL
|
||||
)!!
|
||||
withContext(Dispatchers.IO) {
|
||||
try {
|
||||
var countUpload = 0
|
||||
// Start a foreground service
|
||||
setForeground(createForegroundInfo())
|
||||
notificationManager = NotificationManagerCompat.from(appContext)
|
||||
val processingUploads = getNotificationBuilder(
|
||||
CommonsApplication.NOTIFICATION_CHANNEL_ID_ALL
|
||||
)!!
|
||||
withContext(Dispatchers.IO) {
|
||||
while (contributionDao.getContribution(statesToProcess)
|
||||
.blockingGet().size > 0
|
||||
) {
|
||||
/*
|
||||
queuedContributions receives the results from a one-shot query.
|
||||
This means that once the list has been fetched from the database,
|
||||
it does not get updated even if some changes (insertions, deletions, etc.)
|
||||
are made to the contribution table afterwards.
|
||||
|
||||
//TODO: Implement Worker Flags
|
||||
/*
|
||||
queuedContributions receives the results from a one-shot query.
|
||||
This means that once the list has been fetched from the database,
|
||||
it does not get updated even if some changes (insertions, deletions, etc.)
|
||||
are made to the contribution table afterwards.
|
||||
|
||||
Related issues (fixed):
|
||||
https://github.com/commons-app/apps-android-commons/issues/5136
|
||||
https://github.com/commons-app/apps-android-commons/issues/5346
|
||||
*/
|
||||
while (contributionDao.getContribution(statesToProcess)
|
||||
.blockingGet().size > 0
|
||||
) {
|
||||
val queuedContributions = contributionDao.getContribution(statesToProcess)
|
||||
.blockingGet()
|
||||
//Showing initial notification for the number of uploads being processed
|
||||
|
||||
processingUploads.setContentTitle(appContext.getString(R.string.starting_uploads))
|
||||
processingUploads.setContentText(
|
||||
appContext.resources.getQuantityString(
|
||||
R.plurals.starting_multiple_uploads,
|
||||
queuedContributions.size,
|
||||
queuedContributions.size
|
||||
)
|
||||
)
|
||||
notificationManager?.notify(
|
||||
PROCESSING_UPLOADS_NOTIFICATION_TAG,
|
||||
PROCESSING_UPLOADS_NOTIFICATION_ID,
|
||||
processingUploads.build()
|
||||
)
|
||||
|
||||
val sortedQueuedContributionsList: List<Contribution> =
|
||||
queuedContributions.sortedBy { it.dateUploadStartedInMillis() }
|
||||
|
||||
/**
|
||||
* To avoid race condition when multiple of these workers are working, assign this state
|
||||
so that the next one does not process these contribution again
|
||||
Related issues (fixed):
|
||||
https://github.com/commons-app/apps-android-commons/issues/5136
|
||||
https://github.com/commons-app/apps-android-commons/issues/5346
|
||||
*/
|
||||
// sortedQueuedContributionsList.forEach {
|
||||
// it.state = Contribution.STATE_IN_PROGRESS
|
||||
// contributionDao.saveSynchronous(it)
|
||||
// }
|
||||
val queuedContributions = contributionDao.getContribution(statesToProcess)
|
||||
.blockingGet()
|
||||
//Showing initial notification for the number of uploads being processed
|
||||
|
||||
var contribution = sortedQueuedContributionsList.first()
|
||||
processingUploads.setContentTitle(appContext.getString(R.string.starting_uploads))
|
||||
processingUploads.setContentText(
|
||||
appContext.resources.getQuantityString(
|
||||
R.plurals.starting_multiple_uploads,
|
||||
queuedContributions.size,
|
||||
queuedContributions.size
|
||||
)
|
||||
)
|
||||
notificationManager?.notify(
|
||||
PROCESSING_UPLOADS_NOTIFICATION_TAG,
|
||||
PROCESSING_UPLOADS_NOTIFICATION_ID,
|
||||
processingUploads.build()
|
||||
)
|
||||
|
||||
if (contributionDao.getContribution(contribution.pageId) != null) {
|
||||
contribution.transferred = 0
|
||||
contribution.state = Contribution.STATE_IN_PROGRESS
|
||||
contributionDao.saveSynchronous(contribution)
|
||||
setProgressAsync(Data.Builder().putInt("progress", countUpload).build())
|
||||
countUpload++
|
||||
uploadContribution(contribution = contribution)
|
||||
val sortedQueuedContributionsList: List<Contribution> =
|
||||
queuedContributions.sortedBy { it.dateUploadStartedInMillis() }
|
||||
|
||||
var contribution = sortedQueuedContributionsList.first()
|
||||
|
||||
if (contributionDao.getContribution(contribution.pageId) != null) {
|
||||
contribution.transferred = 0
|
||||
contribution.state = Contribution.STATE_IN_PROGRESS
|
||||
contributionDao.saveSynchronous(contribution)
|
||||
setProgressAsync(Data.Builder().putInt("progress", countUpload).build())
|
||||
countUpload++
|
||||
uploadContribution(contribution = contribution)
|
||||
}
|
||||
}
|
||||
//Dismiss the global notification
|
||||
notificationManager?.cancel(
|
||||
PROCESSING_UPLOADS_NOTIFICATION_TAG,
|
||||
PROCESSING_UPLOADS_NOTIFICATION_ID
|
||||
)
|
||||
}
|
||||
// Trigger WorkManager to process any new contributions that may have been added to the queue
|
||||
val updatedContributionQueue = withContext(Dispatchers.IO) {
|
||||
contributionDao.getContribution(statesToProcess).blockingGet()
|
||||
}
|
||||
if (updatedContributionQueue.isNotEmpty()) {
|
||||
return Result.retry()
|
||||
}
|
||||
//Dismiss the global notification
|
||||
notificationManager?.cancel(
|
||||
PROCESSING_UPLOADS_NOTIFICATION_TAG,
|
||||
PROCESSING_UPLOADS_NOTIFICATION_ID
|
||||
)
|
||||
}
|
||||
// Trigger WorkManager to process any new contributions that may have been added to the queue
|
||||
val updatedContributionQueue = withContext(Dispatchers.IO) {
|
||||
contributionDao.getContribution(statesToProcess).blockingGet()
|
||||
}
|
||||
if (updatedContributionQueue.isNotEmpty()) {
|
||||
return Result.retry()
|
||||
}
|
||||
|
||||
return Result.success()
|
||||
return Result.success()
|
||||
} catch (e: Exception) {
|
||||
Timber.e(e, "UploadWorker encountered an error.")
|
||||
return Result.failure()
|
||||
} finally {
|
||||
WorkRequestHelper.markUploadWorkerAsStopped()
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ package fr.free.nrw.commons.upload.worker
|
|||
import android.content.Context
|
||||
import androidx.work.*
|
||||
import androidx.work.WorkRequest.Companion.MIN_BACKOFF_MILLIS
|
||||
import timber.log.Timber
|
||||
import java.util.concurrent.TimeUnit
|
||||
|
||||
/**
|
||||
|
|
@ -11,7 +12,16 @@ import java.util.concurrent.TimeUnit
|
|||
class WorkRequestHelper {
|
||||
|
||||
companion object {
|
||||
@Volatile
|
||||
private var isUploadWorkerRunning = false
|
||||
|
||||
fun makeOneTimeWorkRequest(context: Context, existingWorkPolicy: ExistingWorkPolicy) {
|
||||
|
||||
if (isUploadWorkerRunning) {
|
||||
Timber.e("UploadWorker is already running. Cannot start another instance.")
|
||||
return
|
||||
}
|
||||
|
||||
/* Set backoff criteria for the work request
|
||||
The default backoff policy is EXPONENTIAL, but while testing we found that it
|
||||
too long for the uploads to finish. So, set the backoff policy as LINEAR with the
|
||||
|
|
@ -35,7 +45,13 @@ class WorkRequestHelper {
|
|||
WorkManager.getInstance(context).enqueueUniqueWork(
|
||||
UploadWorker::class.java.simpleName, existingWorkPolicy, uploadRequest
|
||||
)
|
||||
|
||||
isUploadWorkerRunning = true
|
||||
}
|
||||
|
||||
fun markUploadWorkerAsStopped() {
|
||||
isUploadWorkerRunning = false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -171,17 +171,6 @@ class ContributionViewHolderUnitTests {
|
|||
contributionViewHolder.init(0, contribution)
|
||||
}
|
||||
|
||||
@Test
|
||||
@Throws(Exception::class)
|
||||
fun testInitCaseNonNull_STATE_QUEUED_LIMITED_CONNECTION_MODE() {
|
||||
Shadows.shadowOf(Looper.getMainLooper()).idle()
|
||||
`when`(contribution.state).thenReturn(Contribution.STATE_QUEUED_LIMITED_CONNECTION_MODE)
|
||||
`when`(contribution.media).thenReturn(media)
|
||||
`when`(media.mostRelevantCaption).thenReturn("")
|
||||
`when`(media.author).thenReturn("")
|
||||
contributionViewHolder.init(0, contribution)
|
||||
}
|
||||
|
||||
@Test
|
||||
@Throws(Exception::class)
|
||||
fun testInitCaseNonNull_STATE_IN_PROGRESS() {
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue