forked from Github/Axter-Stash
Added logic to stop running multiple scan jobs.
100's of file changes at the same time caused FileMonitor to run many dozens of scan jobs. Added logic to have FileMonitor delay new scan jobs while last scan job is still running.
This commit is contained in:
@@ -21,31 +21,44 @@ parser.add_argument('--add_dup_tag', '-a', dest='dup_tag', action='store_true',
|
||||
parse_args = parser.parse_args()
|
||||
|
||||
settings = {
|
||||
"mergeDupFilename": True,
|
||||
"moveToTrashCan": False,
|
||||
"dupFileTag": "DuplicateMarkForDeletion",
|
||||
"dupWhiteListTag": "",
|
||||
"zxgraylist": "",
|
||||
"dupFileTag": "DuplicateMarkForDeletion",
|
||||
"mergeDupFilename": False,
|
||||
"permanentlyDelete": False,
|
||||
"whitelistDelDupInSameFolder": False,
|
||||
"zwhitelist": "",
|
||||
"zzblacklist": "",
|
||||
"zxgraylist": "",
|
||||
"zyblacklist": "",
|
||||
"zymaxDupToProcess": 0,
|
||||
"zzdebugTracing": False,
|
||||
}
|
||||
stash = StashPluginHelper(
|
||||
stash_url=parse_args.stash_url,
|
||||
debugTracing=parse_args.trace,
|
||||
settings=settings,
|
||||
config=config
|
||||
config=config,
|
||||
maxbytes=10*1024*1024,
|
||||
)
|
||||
stash.Status()
|
||||
stash.Log(f"\nStarting (__file__={__file__}) (stash.CALLED_AS_STASH_PLUGIN={stash.CALLED_AS_STASH_PLUGIN}) (stash.DEBUG_TRACING={stash.DEBUG_TRACING}) (stash.PLUGIN_TASK_NAME={stash.PLUGIN_TASK_NAME})************************************************")
|
||||
|
||||
stash.Trace(f"(stashPaths={stash.STASH_PATHS})")
|
||||
|
||||
listSeparator = stash.pluginConfig['listSeparator'] if stash.pluginConfig['listSeparator'] != "" else ','
|
||||
addPrimaryDupPathToDetails = stash.pluginConfig['addPrimaryDupPathToDetails']
|
||||
mergeDupFilename = stash.pluginSettings['mergeDupFilename']
|
||||
moveToTrashCan = stash.pluginSettings['moveToTrashCan']
|
||||
alternateTrashCanPath = stash.pluginConfig['dup_path']
|
||||
listSeparator = stash.pluginConfig['listSeparator'] if stash.pluginConfig['listSeparator'] != "" else ','
|
||||
addPrimaryDupPathToDetails = stash.pluginConfig['addPrimaryDupPathToDetails']
|
||||
mergeDupFilename = stash.pluginSettings['mergeDupFilename']
|
||||
moveToTrashCan = False if stash.pluginSettings['permanentlyDelete'] else True
|
||||
alternateTrashCanPath = stash.pluginConfig['dup_path']
|
||||
whitelistDelDupInSameFolder = stash.pluginSettings['whitelistDelDupInSameFolder']
|
||||
maxDupToProcess = stash.pluginSettings['zymaxDupToProcess']
|
||||
duplicateMarkForDeletion = stash.pluginSettings['dupFileTag']
|
||||
if duplicateMarkForDeletion == "":
|
||||
duplicateMarkForDeletion = 'DuplicateMarkForDeletion'
|
||||
duplicateWhitelistTag = stash.pluginSettings['dupWhiteListTag']
|
||||
|
||||
excludeMergeTags = [duplicateMarkForDeletion]
|
||||
if duplicateWhitelistTag != "":
|
||||
excludeMergeTags = excludeMergeTags + [duplicateWhitelistTag]
|
||||
|
||||
def realpath(path):
|
||||
"""
|
||||
@@ -134,12 +147,13 @@ def createTagId(tagName, tagName_descp, deleteIfExist = False):
|
||||
|
||||
def setTagId(tagId, tagName, sceneDetails, PrimeDuplicateScene = ""):
|
||||
if PrimeDuplicateScene != "" and addPrimaryDupPathToDetails:
|
||||
if sceneDetails['details'].startswith(f"Primary Duplicate = {PrimeDuplicateScene}"):
|
||||
BaseDupStr = f"BaseDup={PrimeDuplicateScene}"
|
||||
if sceneDetails['details'].startswith(BaseDupStr) or sceneDetails['details'].startswith(f"Primary Duplicate = {PrimeDuplicateScene}"):
|
||||
PrimeDuplicateScene = ""
|
||||
elif sceneDetails['details'] == "":
|
||||
PrimeDuplicateScene = f"Primary Duplicate = {PrimeDuplicateScene}"
|
||||
PrimeDuplicateScene = BaseDupStr
|
||||
else:
|
||||
PrimeDuplicateScene = f"Primary Duplicate = {PrimeDuplicateScene};\n{sceneDetails['details']}"
|
||||
PrimeDuplicateScene = f"{BaseDupStr};\n{sceneDetails['details']}"
|
||||
for tag in sceneDetails['tags']:
|
||||
if tag['name'] == tagName:
|
||||
if PrimeDuplicateScene != "" and addPrimaryDupPathToDetails:
|
||||
@@ -158,17 +172,98 @@ def isInList(listToCk, pathToCk):
|
||||
return True
|
||||
return False
|
||||
|
||||
def hasSameDir(path1, path2):
|
||||
if pathlib.Path(path1).resolve().parent == pathlib.Path(path2).resolve().parent:
|
||||
return True
|
||||
return False
|
||||
|
||||
def prnt(data):
|
||||
return ascii(data) # return data.encode('ascii','ignore')
|
||||
|
||||
def mergeData(SrcData, DestData):
|
||||
# Merge tags
|
||||
dataAdded = ""
|
||||
for tag in SrcData['tags']:
|
||||
if tag not in DestData['tags'] and tag['name'] not in excludeMergeTags:
|
||||
stash.update_scene({'id' : DestData['id'], 'tag_ids' : tag['id']})
|
||||
dataAdded += f"{tag['name']} "
|
||||
if dataAdded != "":
|
||||
stash.Trace(f"Added tags ({dataAdded}) to file {prnt(DestData['files'][0]['path'])}")
|
||||
# Merge URLs
|
||||
dataAdded = ""
|
||||
listToAdd = DestData['urls']
|
||||
for url in SrcData['urls']:
|
||||
if url not in DestData['urls'] and not url.startswith(stash.STASH_URL):
|
||||
listToAdd += [url]
|
||||
dataAdded += f"{url} "
|
||||
if dataAdded != "":
|
||||
stash.update_scene({'id' : DestData['id'], 'urls' : listToAdd})
|
||||
stash.Trace(f"Added urls ({dataAdded}) to file {prnt(DestData['files'][0]['path'])}")
|
||||
# Merge performers
|
||||
dataAdded = ""
|
||||
listToAdd = []
|
||||
for performer in SrcData['performers']:
|
||||
if performer not in DestData['performers']:
|
||||
listToAdd += [performer['id']]
|
||||
dataAdded += f"{performer['id']} "
|
||||
if dataAdded != "":
|
||||
for performer in DestData['performers']:
|
||||
listToAdd += [performer['id']]
|
||||
stash.update_scene({'id' : DestData['id'], 'performer_ids' : listToAdd})
|
||||
stash.Trace(f"Added performers ({dataAdded}) to file {prnt(DestData['files'][0]['path'])}")
|
||||
# Merge studio
|
||||
if DestData['studio'] == None and SrcData['studio'] != None:
|
||||
stash.update_scene({'id' : DestData['id'], 'studio_id' : SrcData['studio']['id']})
|
||||
# Merge galleries
|
||||
dataAdded = ""
|
||||
listToAdd = []
|
||||
for gallery in SrcData['galleries']:
|
||||
if gallery not in DestData['galleries']:
|
||||
listToAdd += [gallery['id']]
|
||||
dataAdded += f"{gallery['id']} "
|
||||
if dataAdded != "":
|
||||
for gallery in DestData['galleries']:
|
||||
listToAdd += [gallery['id']]
|
||||
stash.update_scene({'id' : DestData['id'], 'gallery_ids' : listToAdd})
|
||||
stash.Trace(f"Added galleries ({dataAdded}) to file {prnt(DestData['files'][0]['path'])}")
|
||||
# Merge title
|
||||
if DestData['title'] == "" and SrcData['title'] != "":
|
||||
stash.update_scene({'id' : DestData['id'], 'title' : SrcData['title']})
|
||||
# Merge director
|
||||
if DestData['director'] == "" and SrcData['director'] != "":
|
||||
stash.update_scene({'id' : DestData['id'], 'director' : SrcData['director']})
|
||||
# Merge date
|
||||
if DestData['date'] == None and SrcData['date'] != None:
|
||||
stash.update_scene({'id' : DestData['id'], 'date' : SrcData['date']})
|
||||
# Merge details
|
||||
if DestData['details'] == "" and SrcData['details'] != "":
|
||||
stash.update_scene({'id' : DestData['id'], 'details' : SrcData['details']})
|
||||
# Merge movies
|
||||
dataAdded = ""
|
||||
listToAdd = []
|
||||
for movie in SrcData['movies']:
|
||||
if movie not in DestData['movies']:
|
||||
listToAdd += [{"movie_id" : movie['movie']['id'], "scene_index" : movie['scene_index']}]
|
||||
dataAdded += f"{movie['movie']['id']} "
|
||||
if dataAdded != "":
|
||||
for movie in DestData['movies']:
|
||||
listToAdd += [{"movie_id" : movie['movie']['id'], "scene_index" : movie['scene_index']}]
|
||||
stash.update_scene({'id' : DestData['id'], 'movies' : listToAdd})
|
||||
stash.Trace(f"Added movies ({dataAdded}) to file {prnt(DestData['files'][0]['path'])}")
|
||||
# Merge rating100
|
||||
if DestData['rating100'] == None and SrcData['rating100'] != None:
|
||||
stash.update_scene({'id' : DestData['id'], 'rating100' : SrcData['rating100']})
|
||||
# Merge code (Studio Code)
|
||||
if DestData['code'] == "" and SrcData['code'] != "":
|
||||
stash.update_scene({'id' : DestData['id'], 'code' : SrcData['code']})
|
||||
|
||||
def mangeDupFiles(merge=False, deleteDup=False, tagDuplicates=False):
|
||||
duration_diff = 10.00
|
||||
duplicateMarkForDeletion = stash.pluginSettings['dupFileTag']
|
||||
duplicateMarkForDeletion_descp = 'Tag added to duplicate scenes so-as to tag them for deletion.'
|
||||
if duplicateMarkForDeletion == "":
|
||||
duplicateMarkForDeletion = 'DuplicateMarkForDeletion'
|
||||
stash.Log(f"duplicateMarkForDeletion = {duplicateMarkForDeletion}")
|
||||
dupTagId = createTagId(duplicateMarkForDeletion, duplicateMarkForDeletion_descp)
|
||||
stash.Trace(f"dupTagId={dupTagId} name={duplicateMarkForDeletion}")
|
||||
|
||||
duplicateWhitelistTag = stash.pluginSettings['dupWhiteListTag']
|
||||
dupWhitelistTagId = None
|
||||
if duplicateWhitelistTag != "":
|
||||
stash.Log(f"duplicateWhitelistTag = {duplicateWhitelistTag}")
|
||||
@@ -185,7 +280,7 @@ def mangeDupFiles(merge=False, deleteDup=False, tagDuplicates=False):
|
||||
whitelist = [item.lower() for item in whitelist]
|
||||
if whitelist == [""] : whitelist = []
|
||||
stash.Log(f"whitelist = {whitelist}")
|
||||
blacklist = stash.pluginSettings['zzblacklist'].split(listSeparator)
|
||||
blacklist = stash.pluginSettings['zyblacklist'].split(listSeparator)
|
||||
blacklist = [item.lower() for item in blacklist]
|
||||
if blacklist == [""] : blacklist = []
|
||||
stash.Log(f"blacklist = {blacklist}")
|
||||
@@ -212,7 +307,7 @@ def mangeDupFiles(merge=False, deleteDup=False, tagDuplicates=False):
|
||||
QtyDup+=1
|
||||
Scene = stash.find_scene(DupFile['id'])
|
||||
sceneData = f"Scene = {Scene}"
|
||||
stash.Trace(sceneData.encode('ascii','ignore'))
|
||||
stash.Trace(prnt(sceneData))
|
||||
DupFileDetailList = DupFileDetailList + [Scene]
|
||||
if DupFileToKeep != "":
|
||||
if DupFileToKeep['files'][0]['duration'] == Scene['files'][0]['duration']:
|
||||
@@ -236,22 +331,23 @@ def mangeDupFiles(merge=False, deleteDup=False, tagDuplicates=False):
|
||||
DupFileToKeep = Scene
|
||||
else:
|
||||
DupFileToKeep = Scene
|
||||
# stash.Log(f"DupFileToKeep = {DupFileToKeep}")
|
||||
stash.Trace(f"KeepID={DupFileToKeep['id']}, ID={DupFile['id']} duration=({Scene['files'][0]['duration']}), Size=({Scene['files'][0]['size']}), Res=({Scene['files'][0]['width']} x {Scene['files'][0]['height']}) Name={Scene['files'][0]['path'].encode('ascii','ignore')}")
|
||||
# stash.Trace(f"DupFileToKeep = {DupFileToKeep}")
|
||||
stash.Trace(f"KeepID={DupFileToKeep['id']}, ID={DupFile['id']} duration=({Scene['files'][0]['duration']}), Size=({Scene['files'][0]['size']}), Res=({Scene['files'][0]['width']} x {Scene['files'][0]['height']}) Name={prnt(Scene['files'][0]['path'])}, KeepPath={prnt(DupFileToKeep['files'][0]['path'])}")
|
||||
|
||||
for DupFile in DupFileDetailList:
|
||||
if DupFile['id'] != DupFileToKeep['id']:
|
||||
if isInList(whitelist, DupFile['files'][0]['path']):
|
||||
stash.Log(f"NOT tagging duplicate, because it's in whitelist. '{DupFile['files'][0]['path'].encode('ascii','ignore')}'")
|
||||
if isInList(whitelist, DupFile['files'][0]['path']) and (not whitelistDelDupInSameFolder or not hasSameDir(DupFile['files'][0]['path'], DupFileToKeep['files'][0]['path'])):
|
||||
stash.Log(f"NOT tagging duplicate, because it's in whitelist. '{prnt(DupFile['files'][0]['path'])}'")
|
||||
if dupWhitelistTagId and tagDuplicates:
|
||||
setTagId(dupWhitelistTagId, duplicateWhitelistTag, DupFile, DupFileToKeep['files'][0]['path'])
|
||||
QtySkipForDel+=1
|
||||
else:
|
||||
if merge:
|
||||
mergeData(DupFile, DupFileToKeep)
|
||||
if deleteDup:
|
||||
DupFileName = DupFile['files'][0]['path']
|
||||
DupFileNameOnly = pathlib.Path(DupFileName).stem
|
||||
stash.Log(f"Deleting duplicate '{DupFileName.encode('ascii','ignore')}'")
|
||||
# ToDo: Add logic to check if tag merging is needed before performing deletion.
|
||||
stash.Log(f"Deleting duplicate '{prnt(DupFileName)}'")
|
||||
if alternateTrashCanPath != "":
|
||||
shutil.move(DupFileName, f"{alternateTrashCanPath }{os.sep}{DupFileNameOnly}")
|
||||
elif moveToTrashCan:
|
||||
@@ -261,13 +357,13 @@ def mangeDupFiles(merge=False, deleteDup=False, tagDuplicates=False):
|
||||
os.remove(DupFileName)
|
||||
elif tagDuplicates:
|
||||
if QtyTagForDel == 0:
|
||||
stash.Log(f"Tagging duplicate {DupFile['files'][0]['path'].encode('ascii','ignore')} for deletion with tag {duplicateMarkForDeletion}.")
|
||||
stash.Log(f"Tagging duplicate {prnt(DupFile['files'][0]['path'])} for deletion with tag {duplicateMarkForDeletion}.")
|
||||
else:
|
||||
stash.Log(f"Tagging duplicate {DupFile['files'][0]['path'].encode('ascii','ignore')} for deletion.")
|
||||
stash.Log(f"Tagging duplicate {prnt(DupFile['files'][0]['path'])} for deletion.")
|
||||
setTagId(dupTagId, duplicateMarkForDeletion, DupFile, DupFileToKeep['files'][0]['path'])
|
||||
QtyTagForDel+=1
|
||||
stash.Log(SepLine)
|
||||
if QtyDup > 20:
|
||||
if maxDupToProcess > 0 and QtyDup > maxDupToProcess:
|
||||
break
|
||||
|
||||
stash.Log(f"QtyDupSet={QtyDupSet}, QtyDup={QtyDup}, QtyTagForDel={QtyTagForDel}, QtySkipForDel={QtySkipForDel}, QtyExactDup={QtyExactDup}, QtyAlmostDup={QtyAlmostDup}")
|
||||
@@ -289,10 +385,10 @@ elif stash.PLUGIN_TASK_NAME == "delete_duplicates":
|
||||
mangeDupFiles(deleteDup=True)
|
||||
stash.Trace(f"{stash.PLUGIN_TASK_NAME} EXIT")
|
||||
elif parse_args.dup_tag:
|
||||
mangeDupFiles(tagDuplicates=True)
|
||||
mangeDupFiles(tagDuplicates=True, merge=mergeDupFilename)
|
||||
stash.Trace(f"Tag duplicate EXIT")
|
||||
elif parse_args.remove:
|
||||
mangeDupFiles(deleteDup=True)
|
||||
mangeDupFiles(deleteDup=True, merge=mergeDupFilename)
|
||||
stash.Trace(f"Delete duplicate EXIT")
|
||||
|
||||
else:
|
||||
|
||||
@@ -3,14 +3,6 @@ description: Manages duplicate files.
|
||||
version: 0.1.0
|
||||
url: https://github.com/David-Maisonave/Axter-Stash/tree/main/plugins/DupFileManager
|
||||
settings:
|
||||
mergeDupFilename:
|
||||
displayName: Merge Duplicate Tags
|
||||
description: Before deletion, merge potential source in the duplicate file names for tag names, performers, and studios.
|
||||
type: BOOLEAN
|
||||
moveToTrashCan:
|
||||
displayName: Trash Can
|
||||
description: Enable to move files to trash can instead of permanently delete file.
|
||||
type: BOOLEAN
|
||||
dupFileTag:
|
||||
displayName: Duplicate File Tag Name
|
||||
description: (Default = DuplicateMarkForDeletion) Tag used to tag duplicates with lower resolution, duration, and file name length.
|
||||
@@ -19,6 +11,18 @@ settings:
|
||||
displayName: Duplicate Whitelist Tag Name
|
||||
description: If populated, a tag name used to tag duplicates in the whitelist. E.g. DuplicateWhitelistFile
|
||||
type: STRING
|
||||
mergeDupFilename:
|
||||
displayName: Merge Duplicate Tags
|
||||
description: Before deletion, merge metadata from duplicate. E.g. Tag names, performers, studios, title, galleries, rating, details, etc...
|
||||
type: BOOLEAN
|
||||
permanentlyDelete:
|
||||
displayName: Permanent Delete
|
||||
description: (Default=false) Enable to permanently delete files, instead of moving files to trash can.
|
||||
type: BOOLEAN
|
||||
whitelistDelDupInSameFolder:
|
||||
displayName: Whitelist Delete In Same Folder
|
||||
description: (Default=false) Allow whitelist deletion of duplicates within the same whitelist folder.
|
||||
type: BOOLEAN
|
||||
zwhitelist:
|
||||
displayName: White List
|
||||
description: A comma seperated list of paths NOT to be deleted. E.g. C:\Favorite\,E:\MustKeep\
|
||||
@@ -27,10 +31,14 @@ settings:
|
||||
displayName: Gray List
|
||||
description: List of preferential paths to determine which duplicate should be the primary. E.g. C:\2nd_Favorite\,H:\ShouldKeep\
|
||||
type: STRING
|
||||
zzblacklist:
|
||||
zyblacklist:
|
||||
displayName: Black List
|
||||
description: List of LEAST preferential paths to determine primary candidates for deletion. E.g. C:\Downloads\,F:\DeleteMeFirst\
|
||||
type: STRING
|
||||
zymaxDupToProcess:
|
||||
displayName: Max Dup Process
|
||||
description: Maximum number of duplicates to process. If 0, infinity
|
||||
type: NUMBER
|
||||
zzdebugTracing:
|
||||
displayName: Debug Tracing
|
||||
description: (Default=false) [***For Advanced Users***] Enable debug tracing. When enabled, additional tracing logging is added to Stash\plugins\DupFileManager\DupFileManager.log
|
||||
|
||||
@@ -65,6 +65,8 @@ RUN_CLEAN_AFTER_DELETE = stash.pluginConfig["runCleanAfterDelete"]
|
||||
RUN_GENERATE_CONTENT = stash.pluginConfig['runGenerateContent']
|
||||
SCAN_ON_ANY_EVENT = stash.pluginConfig['onAnyEvent']
|
||||
SIGNAL_TIMEOUT = stash.pluginConfig['timeOut'] if stash.pluginConfig['timeOut'] > 0 else 1
|
||||
MAX_TIMEOUT_FOR_DELAY_PATH_PROCESS = stash.pluginConfig['timeOutDelayProcess']
|
||||
MAX_SECONDS_WAIT_SCANJOB_COMPLETE = stash.pluginConfig['maxWaitTimeJobFinish']
|
||||
|
||||
CREATE_SPECIAL_FILE_TO_EXIT = stash.pluginConfig['createSpecFileToExit']
|
||||
DELETE_SPECIAL_FILE_ON_STOP = stash.pluginConfig['deleteSpecFileInStop']
|
||||
@@ -399,10 +401,19 @@ class StashScheduler: # Stash Scheduler
|
||||
schedule.run_pending()
|
||||
stash.TraceOnce("Pending check complete.")
|
||||
|
||||
TargetPaths = []
|
||||
TargetPaths = []
|
||||
lastScanJob = {
|
||||
"id": -1,
|
||||
"TargetPaths": [],
|
||||
"DelayedProcessTargetPaths": [],
|
||||
"timeAddedToTaskQueue": None,
|
||||
"lastStatus" : ""
|
||||
}
|
||||
|
||||
def start_library_monitor():
|
||||
global shouldUpdate
|
||||
global TargetPaths
|
||||
global lastScanJob
|
||||
try:
|
||||
# Create shared memory buffer which can be used as singleton logic or to get a signal to quit task from external script
|
||||
shm_a = shared_memory.SharedMemory(name=SHAREDMEMORY_NAME, create=True, size=4)
|
||||
@@ -529,9 +540,17 @@ def start_library_monitor():
|
||||
break
|
||||
if stash.pluginSettings['turnOnScheduler']:
|
||||
stashScheduler.checkSchedulePending()
|
||||
stash.LogOnce("Waiting for a file change-trigger.")
|
||||
signal.wait(timeout=SIGNAL_TIMEOUT)
|
||||
if stash.pluginSettings['turnOnScheduler'] and not shouldUpdate:
|
||||
timeOutInSeconds = SIGNAL_TIMEOUT
|
||||
if lastScanJob['DelayedProcessTargetPaths'] != [] and timeOutInSeconds > MAX_TIMEOUT_FOR_DELAY_PATH_PROCESS:
|
||||
timeOutInSeconds = MAX_TIMEOUT_FOR_DELAY_PATH_PROCESS
|
||||
stash.LogOnce(f"Awaiting file change-trigger, with a short timeout ({timeOutInSeconds} seconds), because of active delay path processing.")
|
||||
else:
|
||||
stash.LogOnce(f"Waiting for a file change-trigger. Timeout = {timeOutInSeconds} seconds.")
|
||||
signal.wait(timeout=timeOutInSeconds)
|
||||
if lastScanJob['DelayedProcessTargetPaths'] != []:
|
||||
stash.TraceOnce(f"Processing delay scan for path(s) {lastScanJob['DelayedProcessTargetPaths']}")
|
||||
break
|
||||
elif stash.pluginSettings['turnOnScheduler'] and not shouldUpdate:
|
||||
stash.TraceOnce("Checking the scheduler.")
|
||||
elif shouldUpdate:
|
||||
stash.LogOnce("File change trigger occurred.")
|
||||
@@ -555,12 +574,39 @@ def start_library_monitor():
|
||||
stash.Log(f"[SpFl]Detected trigger file to kill FileMonitor. {SPECIAL_FILE_NAME}", printTo = stash.LOG_TO_FILE + stash.LOG_TO_CONSOLE + stash.LOG_TO_STASH)
|
||||
TargetPaths = []
|
||||
TmpTargetPaths = list(set(TmpTargetPaths))
|
||||
if TmpTargetPaths != []:
|
||||
if TmpTargetPaths != [] or lastScanJob['DelayedProcessTargetPaths'] != []:
|
||||
stash.Log(f"Triggering Stash scan for path(s) {TmpTargetPaths}")
|
||||
if len(TmpTargetPaths) > 1 or TmpTargetPaths[0] != SPECIAL_FILE_DIR:
|
||||
if lastScanJob['DelayedProcessTargetPaths'] != [] or len(TmpTargetPaths) > 1 or TmpTargetPaths[0] != SPECIAL_FILE_DIR:
|
||||
if not stash.DRY_RUN:
|
||||
# ToDo: Consider using create_scene, update_scene, and destroy_scene over general method metadata_scan
|
||||
stash.metadata_scan(paths=TmpTargetPaths)
|
||||
if lastScanJob['id'] > -1:
|
||||
lastScanJob['lastStatus'] = stash.find_job(lastScanJob['id'])
|
||||
stash.Trace(f"Last Scan Job ({lastScanJob['id']}); result = {lastScanJob['lastStatus']}")
|
||||
elapsedTime = time.time() - lastScanJob['timeAddedToTaskQueue']
|
||||
if ('status' in lastScanJob['lastStatus'] and lastScanJob['lastStatus']['status'] == "FINISHED") or elapsedTime > MAX_SECONDS_WAIT_SCANJOB_COMPLETE:
|
||||
if elapsedTime > MAX_SECONDS_WAIT_SCANJOB_COMPLETE:
|
||||
stash.Warn(f"Timeout occurred waiting for scan job {lastScanJob['id']} to complete. Elapse-Time = {elapsedTime}; Max-Time={MAX_SECONDS_WAIT_SCANJOB_COMPLETE}; Scan-Path(s) = {lastScanJob['TargetPaths']}")
|
||||
lastScanJob['id'] = -1
|
||||
if len(lastScanJob['DelayedProcessTargetPaths']) > 0:
|
||||
stash.Trace(f"Adding {lastScanJob['DelayedProcessTargetPaths']} to {TmpTargetPaths}")
|
||||
for path in lastScanJob['DelayedProcessTargetPaths']:
|
||||
if path not in TmpTargetPaths:
|
||||
TmpTargetPaths.append(path)
|
||||
# TmpTargetPaths += [lastScanJob['DelayedProcessTargetPaths']]
|
||||
stash.Trace(f"TmpTargetPaths = {TmpTargetPaths}")
|
||||
lastScanJob['DelayedProcessTargetPaths'] = []
|
||||
else:
|
||||
if TmpTargetPaths != []:
|
||||
stash.Trace(f"Adding {TmpTargetPaths} to {lastScanJob['DelayedProcessTargetPaths']}")
|
||||
for path in TmpTargetPaths:
|
||||
if path not in lastScanJob['DelayedProcessTargetPaths']:
|
||||
lastScanJob['DelayedProcessTargetPaths'].append(path)
|
||||
stash.Trace(f"lastScanJob['DelayedProcessTargetPaths'] = {lastScanJob['DelayedProcessTargetPaths']}")
|
||||
if lastScanJob['id'] == -1:
|
||||
stash.Trace(f"Calling metadata_scan for paths '{TmpTargetPaths}'")
|
||||
lastScanJob['id'] = int(stash.metadata_scan(paths=TmpTargetPaths))
|
||||
lastScanJob['TargetPaths'] = TmpTargetPaths
|
||||
lastScanJob['timeAddedToTaskQueue'] = time.time()
|
||||
stash.Trace(f"metadata_scan JobId = {lastScanJob['id']}, Start-Time = {lastScanJob['timeAddedToTaskQueue']}, paths = {lastScanJob['TargetPaths']}")
|
||||
if RUN_CLEAN_AFTER_DELETE and RunCleanMetadata:
|
||||
stash.metadata_clean(paths=TmpTargetPaths, dry_run=stash.DRY_RUN)
|
||||
if RUN_GENERATE_CONTENT:
|
||||
|
||||
@@ -46,6 +46,11 @@ config = {
|
||||
|
||||
# Timeout in seconds. This is how often FileMonitor will check the scheduler and (in-plugin mode) check if another job (Task) is in the queue.
|
||||
"timeOut": 60,
|
||||
# Timeout in seconds for delay processing of path scan jobs. This value should always be smaller than timeOut
|
||||
"timeOutDelayProcess": 3,
|
||||
# Maximum time to wait for a scan job to complete. Need this incase Stash gets restarted in the middle of a scan job.
|
||||
"maxWaitTimeJobFinish": 30 * 60, # Wait 30 minutes max
|
||||
|
||||
# ApiKey only needed when Stash credentials are set and while calling FileMonitor via command line.
|
||||
"apiKey" : "", # Example: "eyJabccideJIUfg1NigRInD345I6dfpXVCfd.eyJ1abcDEfGheHRlHJiJklMonPQ32FsVewtsfSIsImlhdCI6MTcyMzg2NzkwOH0.5bkHU6sfs3532dsryu1ki3iFBwnd_4AHs325yHljsPw"
|
||||
# Enable to run metadata clean task after file deletion.
|
||||
|
||||
Reference in New Issue
Block a user