forked from Github/Axter-Stash
Added daily scheduler logic using weekday pattern
This commit is contained in:
@@ -4,8 +4,10 @@
|
|||||||
# Note: To call this script outside of Stash, pass any argument.
|
# Note: To call this script outside of Stash, pass any argument.
|
||||||
# Example: python DupFileManager.py start
|
# Example: python DupFileManager.py start
|
||||||
|
|
||||||
|
# Research:
|
||||||
# Research following links to complete this plugin:
|
# Research following links to complete this plugin:
|
||||||
# https://github.com/WithoutPants/stash-plugin-duplicate-finder
|
# https://github.com/WithoutPants/stash-plugin-duplicate-finder
|
||||||
|
# Look at stash API find_duplicate_scenes
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
import time
|
import time
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ From the GUI, FileMonitor can be started as a service or as a plugin. The recomm
|
|||||||
- 
|
- 
|
||||||
- **Important Note**: At first, this will show up as a plugin in the Task Queue momentarily. It will then disappear from the Task Queue and run in the background as a service.
|
- **Important Note**: At first, this will show up as a plugin in the Task Queue momentarily. It will then disappear from the Task Queue and run in the background as a service.
|
||||||
- To stop FileMonitor click on [Stop Library Monitor] button.
|
- To stop FileMonitor click on [Stop Library Monitor] button.
|
||||||
- The **[Monitor as a Plugin]** option is mainaly available for backwards compatibility and for test purposes.
|
- The **[Monitor as a Plugin]** option is mainly available for backwards compatibility and for test purposes.
|
||||||
|
|
||||||
|
|
||||||
## Using FileMonitor as a script
|
## Using FileMonitor as a script
|
||||||
@@ -63,7 +63,7 @@ To configure the schedule or to add new task, edit the **task_scheduler** sectio
|
|||||||
|
|
||||||
# Note:
|
# Note:
|
||||||
# The below example tasks are done using hours and minutes because the task is easily disabled (deactivated) by a zero value entry.
|
# The below example tasks are done using hours and minutes because the task is easily disabled (deactivated) by a zero value entry.
|
||||||
# Any of these task types can be converted to a weekly/monthly sysntax.
|
# Any of these task types can be converted to a weekly/monthly syntax.
|
||||||
|
|
||||||
# Example task for calling another Stash plugin, which needs plugin name and plugin ID.
|
# Example task for calling another Stash plugin, which needs plugin name and plugin ID.
|
||||||
{"task" : "PluginButtonName_Here", "pluginId" : "PluginId_Here", "hours" : 0}, # The zero frequency value makes this task disabled.
|
{"task" : "PluginButtonName_Here", "pluginId" : "PluginId_Here", "hours" : 0}, # The zero frequency value makes this task disabled.
|
||||||
@@ -93,8 +93,8 @@ To configure the schedule or to add new task, edit the **task_scheduler** sectio
|
|||||||
- The frequency field does support **days** and **seconds**.
|
- The frequency field does support **days** and **seconds**.
|
||||||
- **seconds** is mainly used for test purposes.
|
- **seconds** is mainly used for test purposes.
|
||||||
- The use of **days** is discourage, because it only works if FileMonitor is running for X many days non-stop.
|
- The use of **days** is discourage, because it only works if FileMonitor is running for X many days non-stop.
|
||||||
- For example, if days is used with 30 days, FileMonitor would have to be running non-stop for 30 days before the task is activated. If it's restarted at any time durring the 30 days, the count down restarts.
|
- For example, if days is used with 30 days, FileMonitor would have to be running non-stop for 30 days before the task is activated. If it's restarted at any time during the 30 days, the count down restarts.
|
||||||
- It's recommended to use weekday based syntax over using days, because many restarts can occur durring the week or month, and the task will still get started as long as FileMonitor is running durring the scheduled activation time.
|
- It's recommended to use weekday based syntax over using days, because many restarts can occur during the week or month, and the task will still get started as long as FileMonitor is running during the scheduled activation time.
|
||||||
- **weekday Based**
|
- **weekday Based**
|
||||||
- Use the weekday based syntax for weekly and monthly schedules.
|
- Use the weekday based syntax for weekly and monthly schedules.
|
||||||
- Both weekly and monthly schedules must have a **weekday** field and a **time** field, which specifies the day of the week and the time to start the task.
|
- Both weekly and monthly schedules must have a **weekday** field and a **time** field, which specifies the day of the week and the time to start the task.
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
import stashapi.log as stashLog # stashapi.log by default for error and critical logging
|
|
||||||
from stashapi.stashapp import StashInterface
|
from stashapi.stashapp import StashInterface
|
||||||
from logging.handlers import RotatingFileHandler
|
from logging.handlers import RotatingFileHandler
|
||||||
import inspect, sys, os, pathlib, logging, json
|
import inspect, sys, os, pathlib, logging, json
|
||||||
|
from stashapi.stash_types import PhashDistance
|
||||||
import __main__
|
import __main__
|
||||||
|
|
||||||
# StashPluginHelper (By David Maisonave aka Axter)
|
# StashPluginHelper (By David Maisonave aka Axter)
|
||||||
@@ -11,7 +11,6 @@ import __main__
|
|||||||
# Logging includes source code line number
|
# Logging includes source code line number
|
||||||
# Sets a maximum plugin log file size
|
# Sets a maximum plugin log file size
|
||||||
# Stash Interface Features:
|
# Stash Interface Features:
|
||||||
# Sets STASH_INTERFACE with StashInterface
|
|
||||||
# Gets STASH_URL value from command line argument and/or from STDIN_READ
|
# Gets STASH_URL value from command line argument and/or from STDIN_READ
|
||||||
# Sets FRAGMENT_SERVER based on command line arguments or STDIN_READ
|
# Sets FRAGMENT_SERVER based on command line arguments or STDIN_READ
|
||||||
# Sets PLUGIN_ID based on the main script file name (in lower case)
|
# Sets PLUGIN_ID based on the main script file name (in lower case)
|
||||||
@@ -22,14 +21,14 @@ import __main__
|
|||||||
# Gets DEBUG_TRACING value from command line argument and/or from UI and/or from config file
|
# Gets DEBUG_TRACING value from command line argument and/or from UI and/or from config file
|
||||||
# Sets RUNNING_IN_COMMAND_LINE_MODE to True if detects multiple arguments
|
# Sets RUNNING_IN_COMMAND_LINE_MODE to True if detects multiple arguments
|
||||||
# Sets CALLED_AS_STASH_PLUGIN to True if it's able to read from STDIN_READ
|
# Sets CALLED_AS_STASH_PLUGIN to True if it's able to read from STDIN_READ
|
||||||
class StashPluginHelper:
|
class StashPluginHelper(StashInterface):
|
||||||
# Primary Members for external reference
|
# Primary Members for external reference
|
||||||
PLUGIN_TASK_NAME = None
|
PLUGIN_TASK_NAME = None
|
||||||
PLUGIN_ID = None
|
PLUGIN_ID = None
|
||||||
PLUGIN_CONFIGURATION = None
|
PLUGIN_CONFIGURATION = None
|
||||||
pluginSettings = None
|
pluginSettings = None
|
||||||
pluginConfig = None
|
pluginConfig = None
|
||||||
STASH_INTERFACE = None
|
STASH_INTERFACE_INIT = False
|
||||||
STASH_URL = None
|
STASH_URL = None
|
||||||
STASH_CONFIGURATION = None
|
STASH_CONFIGURATION = None
|
||||||
JSON_INPUT = None
|
JSON_INPUT = None
|
||||||
@@ -37,6 +36,7 @@ class StashPluginHelper:
|
|||||||
DRY_RUN = False
|
DRY_RUN = False
|
||||||
CALLED_AS_STASH_PLUGIN = False
|
CALLED_AS_STASH_PLUGIN = False
|
||||||
RUNNING_IN_COMMAND_LINE_MODE = False
|
RUNNING_IN_COMMAND_LINE_MODE = False
|
||||||
|
FRAGMENT_SERVER = None
|
||||||
|
|
||||||
# printTo argument
|
# printTo argument
|
||||||
LOG_TO_FILE = 1
|
LOG_TO_FILE = 1
|
||||||
@@ -54,8 +54,7 @@ class StashPluginHelper:
|
|||||||
LOG_FILE_DIR = None
|
LOG_FILE_DIR = None
|
||||||
LOG_FILE_NAME = None
|
LOG_FILE_NAME = None
|
||||||
STDIN_READ = None
|
STDIN_READ = None
|
||||||
FRAGMENT_SERVER = None
|
pluginLog = None
|
||||||
logger = None
|
|
||||||
logLinePreviousHits = []
|
logLinePreviousHits = []
|
||||||
|
|
||||||
# Prefix message value
|
# Prefix message value
|
||||||
@@ -91,7 +90,8 @@ class StashPluginHelper:
|
|||||||
fragmentServer = None,
|
fragmentServer = None,
|
||||||
stash_url = None, # Stash URL (endpoint URL) Example: http://localhost:9999
|
stash_url = None, # Stash URL (endpoint URL) Example: http://localhost:9999
|
||||||
DebugTraceFieldName = "zzdebugTracing",
|
DebugTraceFieldName = "zzdebugTracing",
|
||||||
DryRunFieldName = "zzdryRun"):
|
DryRunFieldName = "zzdryRun",
|
||||||
|
setStashLoggerAsPluginLogger = False):
|
||||||
if logToWrnSet: self.log_to_wrn_set = logToWrnSet
|
if logToWrnSet: self.log_to_wrn_set = logToWrnSet
|
||||||
if logToErrSet: self.log_to_err_set = logToErrSet
|
if logToErrSet: self.log_to_err_set = logToErrSet
|
||||||
if logToNormSet: self.log_to_norm = logToNormSet
|
if logToNormSet: self.log_to_norm = logToNormSet
|
||||||
@@ -138,7 +138,8 @@ class StashPluginHelper:
|
|||||||
self.FRAGMENT_SERVER['Scheme'] = endpointUrlArr[0]
|
self.FRAGMENT_SERVER['Scheme'] = endpointUrlArr[0]
|
||||||
self.FRAGMENT_SERVER['Host'] = endpointUrlArr[1][2:]
|
self.FRAGMENT_SERVER['Host'] = endpointUrlArr[1][2:]
|
||||||
self.FRAGMENT_SERVER['Port'] = endpointUrlArr[2]
|
self.FRAGMENT_SERVER['Port'] = endpointUrlArr[2]
|
||||||
self.STASH_INTERFACE = self.ExtendStashInterface(self.FRAGMENT_SERVER)
|
super().__init__(self.FRAGMENT_SERVER)
|
||||||
|
self.STASH_INTERFACE_INIT = True
|
||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
self.STDIN_READ = sys.stdin.read()
|
self.STDIN_READ = sys.stdin.read()
|
||||||
@@ -151,11 +152,12 @@ class StashPluginHelper:
|
|||||||
self.PLUGIN_TASK_NAME = self.JSON_INPUT["args"]["mode"]
|
self.PLUGIN_TASK_NAME = self.JSON_INPUT["args"]["mode"]
|
||||||
self.FRAGMENT_SERVER = self.JSON_INPUT["server_connection"]
|
self.FRAGMENT_SERVER = self.JSON_INPUT["server_connection"]
|
||||||
self.STASH_URL = f"{self.FRAGMENT_SERVER['Scheme']}://{self.FRAGMENT_SERVER['Host']}:{self.FRAGMENT_SERVER['Port']}"
|
self.STASH_URL = f"{self.FRAGMENT_SERVER['Scheme']}://{self.FRAGMENT_SERVER['Host']}:{self.FRAGMENT_SERVER['Port']}"
|
||||||
self.STASH_INTERFACE = self.ExtendStashInterface(self.FRAGMENT_SERVER)
|
super().__init__(self.FRAGMENT_SERVER)
|
||||||
|
self.STASH_INTERFACE_INIT = True
|
||||||
|
|
||||||
if self.STASH_INTERFACE:
|
if self.STASH_INTERFACE_INIT:
|
||||||
self.PLUGIN_CONFIGURATION = self.STASH_INTERFACE.get_configuration()["plugins"]
|
self.PLUGIN_CONFIGURATION = self.get_configuration()["plugins"]
|
||||||
self.STASH_CONFIGURATION = self.STASH_INTERFACE.get_configuration()["general"]
|
self.STASH_CONFIGURATION = self.get_configuration()["general"]
|
||||||
if settings:
|
if settings:
|
||||||
self.pluginSettings = settings
|
self.pluginSettings = settings
|
||||||
if self.PLUGIN_ID in self.PLUGIN_CONFIGURATION:
|
if self.PLUGIN_ID in self.PLUGIN_CONFIGURATION:
|
||||||
@@ -167,7 +169,9 @@ class StashPluginHelper:
|
|||||||
if self.DEBUG_TRACING: self.LOG_LEVEL = logging.DEBUG
|
if self.DEBUG_TRACING: self.LOG_LEVEL = logging.DEBUG
|
||||||
|
|
||||||
logging.basicConfig(level=self.LOG_LEVEL, format=logFormat, datefmt=dateFmt, handlers=[RFH])
|
logging.basicConfig(level=self.LOG_LEVEL, format=logFormat, datefmt=dateFmt, handlers=[RFH])
|
||||||
self.logger = logging.getLogger(pathlib.Path(self.MAIN_SCRIPT_NAME).stem)
|
self.pluginLog = logging.getLogger(pathlib.Path(self.MAIN_SCRIPT_NAME).stem)
|
||||||
|
if setStashLoggerAsPluginLogger:
|
||||||
|
self.log = self.pluginLog
|
||||||
|
|
||||||
def Log(self, logMsg, printTo = 0, logLevel = logging.INFO, lineNo = -1, levelStr = "", logAlways = False):
|
def Log(self, logMsg, printTo = 0, logLevel = logging.INFO, lineNo = -1, levelStr = "", logAlways = False):
|
||||||
if printTo == 0:
|
if printTo == 0:
|
||||||
@@ -187,24 +191,24 @@ class StashPluginHelper:
|
|||||||
# print(f"{LN_Str}, {logAlways}, {self.LOG_LEVEL}, {logging.DEBUG}, {levelStr}, {logMsg}")
|
# print(f"{LN_Str}, {logAlways}, {self.LOG_LEVEL}, {logging.DEBUG}, {levelStr}, {logMsg}")
|
||||||
if logLevel == logging.DEBUG and (logAlways == False or self.LOG_LEVEL == logging.DEBUG):
|
if logLevel == logging.DEBUG and (logAlways == False or self.LOG_LEVEL == logging.DEBUG):
|
||||||
if levelStr == "": levelStr = self.LEV_DBG
|
if levelStr == "": levelStr = self.LEV_DBG
|
||||||
if printTo & self.LOG_TO_FILE: self.logger.debug(f"{LN_Str} {levelStr}{logMsg}")
|
if printTo & self.LOG_TO_FILE: self.pluginLog.debug(f"{LN_Str} {levelStr}{logMsg}")
|
||||||
if printTo & self.LOG_TO_STASH: stashLog.debug(f"{LN_Str} {levelStr}{logMsg}")
|
if printTo & self.LOG_TO_STASH: self.log.debug(f"{LN_Str} {levelStr}{logMsg}")
|
||||||
elif logLevel == logging.INFO or logLevel == logging.DEBUG:
|
elif logLevel == logging.INFO or logLevel == logging.DEBUG:
|
||||||
if levelStr == "": levelStr = self.LEV_INF if logLevel == logging.INFO else self.LEV_DBG
|
if levelStr == "": levelStr = self.LEV_INF if logLevel == logging.INFO else self.LEV_DBG
|
||||||
if printTo & self.LOG_TO_FILE: self.logger.info(f"{LN_Str} {levelStr}{logMsg}")
|
if printTo & self.LOG_TO_FILE: self.pluginLog.info(f"{LN_Str} {levelStr}{logMsg}")
|
||||||
if printTo & self.LOG_TO_STASH: stashLog.info(f"{LN_Str} {levelStr}{logMsg}")
|
if printTo & self.LOG_TO_STASH: self.log.info(f"{LN_Str} {levelStr}{logMsg}")
|
||||||
elif logLevel == logging.WARN:
|
elif logLevel == logging.WARN:
|
||||||
if levelStr == "": levelStr = self.LEV_WRN
|
if levelStr == "": levelStr = self.LEV_WRN
|
||||||
if printTo & self.LOG_TO_FILE: self.logger.warning(f"{LN_Str} {levelStr}{logMsg}")
|
if printTo & self.LOG_TO_FILE: self.pluginLog.warning(f"{LN_Str} {levelStr}{logMsg}")
|
||||||
if printTo & self.LOG_TO_STASH: stashLog.warning(f"{LN_Str} {levelStr}{logMsg}")
|
if printTo & self.LOG_TO_STASH: self.log.warning(f"{LN_Str} {levelStr}{logMsg}")
|
||||||
elif logLevel == logging.ERROR:
|
elif logLevel == logging.ERROR:
|
||||||
if levelStr == "": levelStr = self.LEV_ERR
|
if levelStr == "": levelStr = self.LEV_ERR
|
||||||
if printTo & self.LOG_TO_FILE: self.logger.error(f"{LN_Str} {levelStr}{logMsg}")
|
if printTo & self.LOG_TO_FILE: self.pluginLog.error(f"{LN_Str} {levelStr}{logMsg}")
|
||||||
if printTo & self.LOG_TO_STASH: stashLog.error(f"{LN_Str} {levelStr}{logMsg}")
|
if printTo & self.LOG_TO_STASH: self.log.error(f"{LN_Str} {levelStr}{logMsg}")
|
||||||
elif logLevel == logging.CRITICAL:
|
elif logLevel == logging.CRITICAL:
|
||||||
if levelStr == "": levelStr = self.LEV_CRITICAL
|
if levelStr == "": levelStr = self.LEV_CRITICAL
|
||||||
if printTo & self.LOG_TO_FILE: self.logger.critical(f"{LN_Str} {levelStr}{logMsg}")
|
if printTo & self.LOG_TO_FILE: self.pluginLog.critical(f"{LN_Str} {levelStr}{logMsg}")
|
||||||
if printTo & self.LOG_TO_STASH: stashLog.error(f"{LN_Str} {levelStr}{logMsg}")
|
if printTo & self.LOG_TO_STASH: self.log.error(f"{LN_Str} {levelStr}{logMsg}")
|
||||||
if (printTo & self.LOG_TO_CONSOLE) and (logLevel != logging.DEBUG or self.DEBUG_TRACING or logAlways):
|
if (printTo & self.LOG_TO_CONSOLE) and (logLevel != logging.DEBUG or self.DEBUG_TRACING or logAlways):
|
||||||
print(f"{LN_Str} {levelStr}{logMsg}")
|
print(f"{LN_Str} {levelStr}{logMsg}")
|
||||||
if (printTo & self.LOG_TO_STDERR) and (logLevel != logging.DEBUG or self.DEBUG_TRACING or logAlways):
|
if (printTo & self.LOG_TO_STDERR) and (logLevel != logging.DEBUG or self.DEBUG_TRACING or logAlways):
|
||||||
@@ -280,7 +284,6 @@ class StashPluginHelper:
|
|||||||
return self.ExecuteProcess(argsWithPython)
|
return self.ExecuteProcess(argsWithPython)
|
||||||
|
|
||||||
# Extends class StashInterface with functions which are not yet in the class
|
# Extends class StashInterface with functions which are not yet in the class
|
||||||
class ExtendStashInterface(StashInterface):
|
|
||||||
def metadata_autotag(self, paths:list=[], performers:list=[], studios:list=[], tags:list=[]):
|
def metadata_autotag(self, paths:list=[], performers:list=[], studios:list=[], tags:list=[]):
|
||||||
query = """
|
query = """
|
||||||
mutation MetadataAutoTag($input:AutoTagMetadataInput!) {
|
mutation MetadataAutoTag($input:AutoTagMetadataInput!) {
|
||||||
@@ -319,3 +322,26 @@ class StashPluginHelper:
|
|||||||
}
|
}
|
||||||
result = self.call_GQL(query, {"input": clean_metadata_input})
|
result = self.call_GQL(query, {"input": clean_metadata_input})
|
||||||
return result
|
return result
|
||||||
|
# def find_duplicate_scenes(self, distance: PhashDistance=PhashDistance.EXACT, fragment=None, duration_diff=0):
|
||||||
|
# query = """
|
||||||
|
# query FindDuplicateScenes($distance: Int) {
|
||||||
|
# findDuplicateScenes(distance: $distance, duration_diff: $duration_diff) {
|
||||||
|
# ...SceneSlim
|
||||||
|
# }
|
||||||
|
# }
|
||||||
|
# """
|
||||||
|
# if fragment:
|
||||||
|
# query = re.sub(r'\.\.\.SceneSlim', fragment, query)
|
||||||
|
# else:
|
||||||
|
# query = """
|
||||||
|
# query FindDuplicateScenes($distance: Int) {
|
||||||
|
# findDuplicateScenes(distance: $distance, duration_diff: $duration_diff) {
|
||||||
|
# }
|
||||||
|
# }
|
||||||
|
# """
|
||||||
|
# variables = {
|
||||||
|
# "distance": distance,
|
||||||
|
# "duration_diff": duration_diff
|
||||||
|
# }
|
||||||
|
# result = self.call_GQL(query, variables)
|
||||||
|
# return result['findDuplicateScenes']
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
# Description: This is a Stash plugin which updates Stash if any changes occurs in the Stash library paths.
|
# Description: This is a Stash plugin which updates Stash if any changes occurs in the Stash library paths, and runs a scheduler.
|
||||||
# By David Maisonave (aka Axter) Jul-2024 (https://www.axter.com/)
|
# By David Maisonave (aka Axter) Jul-2024 (https://www.axter.com/)
|
||||||
# Get the latest developers version from following link: https://github.com/David-Maisonave/Axter-Stash/tree/main/plugins/FileMonitor
|
# Get the latest developers version from following link: https://github.com/David-Maisonave/Axter-Stash/tree/main/plugins/FileMonitor
|
||||||
# Note: To call this script outside of Stash, pass argument --url and the Stash URL.
|
# Note: To call this script outside of Stash, pass argument --url and the Stash URL.
|
||||||
@@ -34,7 +34,7 @@ settings = {
|
|||||||
"zmaximumBackups": 0,
|
"zmaximumBackups": 0,
|
||||||
"zzdebugTracing": False
|
"zzdebugTracing": False
|
||||||
}
|
}
|
||||||
plugin = StashPluginHelper(
|
stash = StashPluginHelper(
|
||||||
stash_url=parse_args.stash_url,
|
stash_url=parse_args.stash_url,
|
||||||
debugTracing=parse_args.trace,
|
debugTracing=parse_args.trace,
|
||||||
settings=settings,
|
settings=settings,
|
||||||
@@ -42,8 +42,8 @@ plugin = StashPluginHelper(
|
|||||||
logToErrSet=logToErrSet,
|
logToErrSet=logToErrSet,
|
||||||
logToNormSet=logToNormSet
|
logToNormSet=logToNormSet
|
||||||
)
|
)
|
||||||
plugin.Status()
|
stash.Status()
|
||||||
plugin.Log(f"\nStarting (__file__={__file__}) (plugin.CALLED_AS_STASH_PLUGIN={plugin.CALLED_AS_STASH_PLUGIN}) (plugin.DEBUG_TRACING={plugin.DEBUG_TRACING}) (plugin.DRY_RUN={plugin.DRY_RUN}) (plugin.PLUGIN_TASK_NAME={plugin.PLUGIN_TASK_NAME})************************************************")
|
stash.Log(f"\nStarting (__file__={__file__}) (stash.CALLED_AS_STASH_PLUGIN={stash.CALLED_AS_STASH_PLUGIN}) (stash.DEBUG_TRACING={stash.DEBUG_TRACING}) (stash.DRY_RUN={stash.DRY_RUN}) (stash.PLUGIN_TASK_NAME={stash.PLUGIN_TASK_NAME})************************************************")
|
||||||
|
|
||||||
exitMsg = "Change success!!"
|
exitMsg = "Change success!!"
|
||||||
mutex = Lock()
|
mutex = Lock()
|
||||||
@@ -52,16 +52,16 @@ shouldUpdate = False
|
|||||||
TargetPaths = []
|
TargetPaths = []
|
||||||
|
|
||||||
SHAREDMEMORY_NAME = "DavidMaisonaveAxter_FileMonitor" # Unique name for shared memory
|
SHAREDMEMORY_NAME = "DavidMaisonaveAxter_FileMonitor" # Unique name for shared memory
|
||||||
RECURSIVE = plugin.pluginSettings["recursiveDisabled"] == False
|
RECURSIVE = stash.pluginSettings["recursiveDisabled"] == False
|
||||||
SCAN_MODIFIED = plugin.pluginConfig["scanModified"]
|
SCAN_MODIFIED = stash.pluginConfig["scanModified"]
|
||||||
RUN_CLEAN_AFTER_DELETE = plugin.pluginConfig["runCleanAfterDelete"]
|
RUN_CLEAN_AFTER_DELETE = stash.pluginConfig["runCleanAfterDelete"]
|
||||||
RUN_GENERATE_CONTENT = plugin.pluginConfig['runGenerateContent']
|
RUN_GENERATE_CONTENT = stash.pluginConfig['runGenerateContent']
|
||||||
SCAN_ON_ANY_EVENT = plugin.pluginConfig['onAnyEvent']
|
SCAN_ON_ANY_EVENT = stash.pluginConfig['onAnyEvent']
|
||||||
SIGNAL_TIMEOUT = plugin.pluginConfig['timeOut'] if plugin.pluginConfig['timeOut'] > 0 else 1
|
SIGNAL_TIMEOUT = stash.pluginConfig['timeOut'] if stash.pluginConfig['timeOut'] > 0 else 1
|
||||||
|
|
||||||
CREATE_SPECIAL_FILE_TO_EXIT = plugin.pluginConfig['createSpecFileToExit']
|
CREATE_SPECIAL_FILE_TO_EXIT = stash.pluginConfig['createSpecFileToExit']
|
||||||
DELETE_SPECIAL_FILE_ON_STOP = plugin.pluginConfig['deleteSpecFileInStop']
|
DELETE_SPECIAL_FILE_ON_STOP = stash.pluginConfig['deleteSpecFileInStop']
|
||||||
SPECIAL_FILE_DIR = f"{plugin.LOG_FILE_DIR}{os.sep}working"
|
SPECIAL_FILE_DIR = f"{stash.LOG_FILE_DIR}{os.sep}working"
|
||||||
if CREATE_SPECIAL_FILE_TO_EXIT and not os.path.exists(SPECIAL_FILE_DIR):
|
if CREATE_SPECIAL_FILE_TO_EXIT and not os.path.exists(SPECIAL_FILE_DIR):
|
||||||
os.makedirs(SPECIAL_FILE_DIR)
|
os.makedirs(SPECIAL_FILE_DIR)
|
||||||
# Unique name to trigger shutting down FileMonitor
|
# Unique name to trigger shutting down FileMonitor
|
||||||
@@ -69,19 +69,19 @@ SPECIAL_FILE_NAME = f"{SPECIAL_FILE_DIR}{os.sep}trigger_to_kill_filemonitor_by_d
|
|||||||
if CREATE_SPECIAL_FILE_TO_EXIT and os.path.isfile(SPECIAL_FILE_NAME):
|
if CREATE_SPECIAL_FILE_TO_EXIT and os.path.isfile(SPECIAL_FILE_NAME):
|
||||||
os.remove(SPECIAL_FILE_NAME)
|
os.remove(SPECIAL_FILE_NAME)
|
||||||
|
|
||||||
STASHPATHSCONFIG = plugin.STASH_CONFIGURATION['stashes']
|
STASHPATHSCONFIG = stash.STASH_CONFIGURATION['stashes']
|
||||||
stashPaths = []
|
stashPaths = []
|
||||||
for item in STASHPATHSCONFIG:
|
for item in STASHPATHSCONFIG:
|
||||||
stashPaths.append(item["path"])
|
stashPaths.append(item["path"])
|
||||||
plugin.Trace(f"(stashPaths={stashPaths})")
|
stash.Trace(f"(stashPaths={stashPaths})")
|
||||||
|
|
||||||
if plugin.DRY_RUN:
|
if stash.DRY_RUN:
|
||||||
plugin.Log("Dry run mode is enabled.")
|
stash.Log("Dry run mode is enabled.")
|
||||||
plugin.Trace(f"(SCAN_MODIFIED={SCAN_MODIFIED}) (SCAN_ON_ANY_EVENT={SCAN_ON_ANY_EVENT}) (RECURSIVE={RECURSIVE})")
|
stash.Trace(f"(SCAN_MODIFIED={SCAN_MODIFIED}) (SCAN_ON_ANY_EVENT={SCAN_ON_ANY_EVENT}) (RECURSIVE={RECURSIVE})")
|
||||||
|
|
||||||
StartFileMonitorAsAPluginTaskName = "Monitor as a Plugin"
|
StartFileMonitorAsAPluginTaskName = "Monitor as a Plugin"
|
||||||
StartFileMonitorAsAServiceTaskName = "Start Library Monitor Service"
|
StartFileMonitorAsAServiceTaskName = "Start Library Monitor Service"
|
||||||
FileMonitorPluginIsOnTaskQue = plugin.CALLED_AS_STASH_PLUGIN
|
FileMonitorPluginIsOnTaskQue = stash.CALLED_AS_STASH_PLUGIN
|
||||||
StopLibraryMonitorWaitingInTaskQueue = False
|
StopLibraryMonitorWaitingInTaskQueue = False
|
||||||
JobIdInTheQue = 0
|
JobIdInTheQue = 0
|
||||||
def isJobWaitingToRun():
|
def isJobWaitingToRun():
|
||||||
@@ -90,9 +90,9 @@ def isJobWaitingToRun():
|
|||||||
global FileMonitorPluginIsOnTaskQue
|
global FileMonitorPluginIsOnTaskQue
|
||||||
FileMonitorPluginIsOnTaskQue = False
|
FileMonitorPluginIsOnTaskQue = False
|
||||||
jobIsWaiting = False
|
jobIsWaiting = False
|
||||||
taskQue = plugin.STASH_INTERFACE.job_queue()
|
taskQue = stash.job_queue()
|
||||||
for jobDetails in taskQue:
|
for jobDetails in taskQue:
|
||||||
plugin.Trace(f"(Job ID({jobDetails['id']})={jobDetails})")
|
stash.Trace(f"(Job ID({jobDetails['id']})={jobDetails})")
|
||||||
if jobDetails['status'] == "READY":
|
if jobDetails['status'] == "READY":
|
||||||
if jobDetails['description'] == "Running plugin task: Stop Library Monitor":
|
if jobDetails['description'] == "Running plugin task: Stop Library Monitor":
|
||||||
StopLibraryMonitorWaitingInTaskQueue = True
|
StopLibraryMonitorWaitingInTaskQueue = True
|
||||||
@@ -103,107 +103,117 @@ def isJobWaitingToRun():
|
|||||||
JobIdInTheQue = 0
|
JobIdInTheQue = 0
|
||||||
return jobIsWaiting
|
return jobIsWaiting
|
||||||
|
|
||||||
if plugin.CALLED_AS_STASH_PLUGIN:
|
if stash.CALLED_AS_STASH_PLUGIN:
|
||||||
plugin.Trace(f"isJobWaitingToRun() = {isJobWaitingToRun()})")
|
stash.Trace(f"isJobWaitingToRun() = {isJobWaitingToRun()})")
|
||||||
|
|
||||||
class StashScheduler: # Stash Scheduler
|
class StashScheduler: # Stash Scheduler
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
import schedule # pip install schedule # https://github.com/dbader/schedule
|
import schedule # pip install schedule # https://github.com/dbader/schedule
|
||||||
dayOfTheWeek = ["monday", "tuesday", "wednesday", "thursday", "friday", "saturday", "sunday"]
|
for task in stash.pluginConfig['task_scheduler']:
|
||||||
for task in plugin.pluginConfig['task_scheduler']:
|
|
||||||
if 'hours' in task and task['hours'] > 0:
|
if 'hours' in task and task['hours'] > 0:
|
||||||
plugin.Log(f"Adding to reoccurring scheduler task '{task['task']}' at {task['hours']} hours interval")
|
stash.Log(f"Adding to scheduler task '{task['task']}' at {task['hours']} hours interval")
|
||||||
schedule.every(task['hours']).hours.do(self.runTask, task)
|
schedule.every(task['hours']).hours.do(self.runTask, task)
|
||||||
elif 'minutes' in task and task['minutes'] > 0:
|
elif 'minutes' in task and task['minutes'] > 0:
|
||||||
plugin.Log(f"Adding to reoccurring scheduler task '{task['task']}' at {task['minutes']} minutes interval")
|
stash.Log(f"Adding to scheduler task '{task['task']}' at {task['minutes']} minutes interval")
|
||||||
schedule.every(task['minutes']).minutes.do(self.runTask, task)
|
schedule.every(task['minutes']).minutes.do(self.runTask, task)
|
||||||
elif 'days' in task and task['days'] > 0: # Left here for backward compatibility, but should use weekday logic instead.
|
elif 'days' in task and task['days'] > 0: # Left here for backward compatibility, but should use weekday logic instead.
|
||||||
plugin.Log(f"Adding to reoccurring scheduler task '{task['task']}' at {task['days']} days interval")
|
stash.Log(f"Adding to scheduler task '{task['task']}' at {task['days']} days interval")
|
||||||
schedule.every(task['days']).days.do(self.runTask, task)
|
schedule.every(task['days']).days.do(self.runTask, task)
|
||||||
elif 'seconds' in task and task['seconds'] > 0: # This is mainly here for test purposes only
|
elif 'seconds' in task and task['seconds'] > 0: # This is mainly here for test purposes only
|
||||||
plugin.Log(f"Adding to reoccurring scheduler task '{task['task']}' at {task['seconds']} seconds interval")
|
if SIGNAL_TIMEOUT > task['seconds']:
|
||||||
|
stash.Log(f"Changing SIGNAL_TIMEOUT from value {SIGNAL_TIMEOUT} to {task['seconds']} to allow '{task['task']}' to get triggered timely")
|
||||||
|
SIGNAL_TIMEOUT = task['seconds']
|
||||||
|
stash.Log(f"Adding to scheduler task '{task['task']}' at {task['seconds']} seconds interval")
|
||||||
schedule.every(task['seconds']).seconds.do(self.runTask, task)
|
schedule.every(task['seconds']).seconds.do(self.runTask, task)
|
||||||
elif 'weekday' in task and task['weekday'].lower() in dayOfTheWeek and 'time' in task:
|
elif 'weekday' in task and 'time' in task:
|
||||||
|
weekDays = task['weekday'].lower()
|
||||||
if 'monthly' in task:
|
if 'monthly' in task:
|
||||||
plugin.Log(f"Adding to reoccurring scheduler task '{task['task']}' monthly on number {task['monthly']} {task['weekday']} at {task['time']}")
|
stash.Log(f"Adding to scheduler task '{task['task']}' monthly on number {task['monthly']} {task['weekday']} at {task['time']}")
|
||||||
else:
|
else:
|
||||||
plugin.Log(f"Adding to reoccurring scheduler task '{task['task']}' (weekly) every {task['weekday']} at {task['time']}")
|
stash.Log(f"Adding to scheduler task '{task['task']}' (weekly) every {task['weekday']} at {task['time']}")
|
||||||
if task['weekday'].lower() == "monday":
|
|
||||||
|
if "monday" in weekDays:
|
||||||
schedule.every().monday.at(task['time']).do(self.runTask, task)
|
schedule.every().monday.at(task['time']).do(self.runTask, task)
|
||||||
elif task['weekday'].lower() == "tuesday":
|
if "tuesday" in weekDays:
|
||||||
schedule.every().tuesday.at(task['time']).do(self.runTask, task)
|
schedule.every().tuesday.at(task['time']).do(self.runTask, task)
|
||||||
elif task['weekday'].lower() == "wednesday":
|
if "wednesday" in weekDays:
|
||||||
schedule.every().wednesday.at(task['time']).do(self.runTask, task)
|
schedule.every().wednesday.at(task['time']).do(self.runTask, task)
|
||||||
elif task['weekday'].lower() == "thursday":
|
if "thursday" in weekDays:
|
||||||
schedule.every().thursday.at(task['time']).do(self.runTask, task)
|
schedule.every().thursday.at(task['time']).do(self.runTask, task)
|
||||||
elif task['weekday'].lower() == "friday":
|
if "friday" in weekDays:
|
||||||
schedule.every().friday.at(task['time']).do(self.runTask, task)
|
schedule.every().friday.at(task['time']).do(self.runTask, task)
|
||||||
elif task['weekday'].lower() == "saturday":
|
if "saturday" in weekDays:
|
||||||
schedule.every().saturday.at(task['time']).do(self.runTask, task)
|
schedule.every().saturday.at(task['time']).do(self.runTask, task)
|
||||||
elif task['weekday'].lower() == "sunday":
|
if "sunday" in weekDays:
|
||||||
schedule.every().sunday.at(task['time']).do(self.runTask, task)
|
schedule.every().sunday.at(task['time']).do(self.runTask, task)
|
||||||
self.checkSchedulePending()
|
self.checkSchedulePending()
|
||||||
|
|
||||||
def runTask(self, task):
|
def runTask(self, task):
|
||||||
import datetime
|
import datetime
|
||||||
plugin.Trace(f"Running task {task}")
|
stash.Trace(f"Running task {task}")
|
||||||
if 'monthly' in task:
|
if 'monthly' in task:
|
||||||
dayOfTheMonth = datetime.datetime.today().day
|
dayOfTheMonth = datetime.datetime.today().day
|
||||||
FirstAllowedDate = ((task['monthly'] - 1) * 7) + 1
|
FirstAllowedDate = ((task['monthly'] - 1) * 7) + 1
|
||||||
LastAllowedDate = task['monthly'] * 7
|
LastAllowedDate = task['monthly'] * 7
|
||||||
if dayOfTheMonth < FirstAllowedDate or dayOfTheMonth > LastAllowedDate:
|
if dayOfTheMonth < FirstAllowedDate or dayOfTheMonth > LastAllowedDate:
|
||||||
plugin.Log(f"Skipping task {task['task']} because today is not the right {task['weekday']} of the month. Target range is between {FirstAllowedDate} and {LastAllowedDate}.")
|
stash.Log(f"Skipping task {task['task']} because today is not the right {task['weekday']} of the month. Target range is between {FirstAllowedDate} and {LastAllowedDate}.")
|
||||||
return
|
return
|
||||||
if task['task'] == "Clean":
|
if task['task'] == "Clean":
|
||||||
plugin.STASH_INTERFACE.metadata_clean(paths=stashPaths, dry_run=plugin.DRY_RUN)
|
stash.metadata_clean(paths=stashPaths, dry_run=stash.DRY_RUN)
|
||||||
elif task['task'] == "Clean Generated Files":
|
elif task['task'] == "Clean Generated Files":
|
||||||
plugin.STASH_INTERFACE.metadata_clean_generated()
|
stash.metadata_clean_generated()
|
||||||
elif task['task'] == "Generate":
|
elif task['task'] == "Generate":
|
||||||
plugin.STASH_INTERFACE.metadata_generate()
|
stash.metadata_generate()
|
||||||
elif task['task'] == "Backup":
|
elif task['task'] == "Backup":
|
||||||
plugin.LogOnce("Note: Backup task does not get listed in the Task Queue, but user can verify that it started by looking in the Stash log file as an INFO level log line.")
|
stash.LogOnce("Note: Backup task does not get listed in the Task Queue, but user can verify that it started by looking in the Stash log file as an INFO level log line.")
|
||||||
plugin.STASH_INTERFACE.backup_database()
|
stash.backup_database()
|
||||||
if plugin.pluginSettings['zmaximumBackups'] > 1 and 'backupDirectoryPath' in plugin.STASH_CONFIGURATION:
|
if stash.pluginSettings['zmaximumBackups'] < 2:
|
||||||
if len(plugin.STASH_CONFIGURATION['backupDirectoryPath']) > 4 and os.path.exists(plugin.STASH_CONFIGURATION['backupDirectoryPath']):
|
stash.TraceOnce(f"Skipping DB backup file trim because zmaximumBackups={stash.pluginSettings['zmaximumBackups']}. Value has to be greater than 1.")
|
||||||
plugin.LogOnce(f"Checking quantity of DB backups if path {plugin.STASH_CONFIGURATION['backupDirectoryPath']} exceeds {plugin.pluginSettings['zmaximumBackups']} backup files.")
|
elif 'backupDirectoryPath' in stash.STASH_CONFIGURATION:
|
||||||
self.trimDbFiles(plugin.STASH_CONFIGURATION['backupDirectoryPath'], plugin.pluginSettings['zmaximumBackups'])
|
if len(stash.STASH_CONFIGURATION['backupDirectoryPath']) < 5:
|
||||||
|
stash.TraceOnce(f"Skipping DB backup file trim because backupDirectoryPath length is to short. Len={len(stash.STASH_CONFIGURATION['backupDirectoryPath'])}. Only support length greater than 4 characters.")
|
||||||
|
elif os.path.exists(stash.STASH_CONFIGURATION['backupDirectoryPath']):
|
||||||
|
stash.LogOnce(f"Checking quantity of DB backups if path {stash.STASH_CONFIGURATION['backupDirectoryPath']} exceeds {stash.pluginSettings['zmaximumBackups']} backup files.")
|
||||||
|
self.trimDbFiles(stash.STASH_CONFIGURATION['backupDirectoryPath'], stash.pluginSettings['zmaximumBackups'])
|
||||||
|
else:
|
||||||
|
stash.TraceOnce(f"Skipping DB backup file trim because backupDirectoryPath does NOT exist. backupDirectoryPath={stash.STASH_CONFIGURATION['backupDirectoryPath']}")
|
||||||
elif task['task'] == "Scan":
|
elif task['task'] == "Scan":
|
||||||
plugin.STASH_INTERFACE.metadata_scan(paths=stashPaths)
|
stash.metadata_scan(paths=stashPaths)
|
||||||
elif task['task'] == "Auto Tag":
|
elif task['task'] == "Auto Tag":
|
||||||
plugin.STASH_INTERFACE.metadata_autotag(paths=stashPaths)
|
stash.metadata_autotag(paths=stashPaths)
|
||||||
elif task['task'] == "Optimise Database":
|
elif task['task'] == "Optimise Database":
|
||||||
plugin.STASH_INTERFACE.optimise_database()
|
stash.optimise_database()
|
||||||
elif task['task'] == "GQL":
|
elif task['task'] == "GQL":
|
||||||
plugin.STASH_INTERFACE.call_GQL(task['input'])
|
stash.call_GQL(task['input'])
|
||||||
elif task['task'] == "python":
|
elif task['task'] == "python":
|
||||||
script = task['script'].replace("<plugin_path>", f"{pathlib.Path(__file__).resolve().parent}{os.sep}")
|
script = task['script'].replace("<plugin_path>", f"{pathlib.Path(__file__).resolve().parent}{os.sep}")
|
||||||
plugin.Log(f"Executing python script {script}.")
|
stash.Log(f"Executing python script {script}.")
|
||||||
args = [script]
|
args = [script]
|
||||||
if len(task['args']) > 0:
|
if len(task['args']) > 0:
|
||||||
args = args + [task['args']]
|
args = args + [task['args']]
|
||||||
plugin.ExecutePythonScript(args)
|
stash.ExecutePythonScript(args)
|
||||||
elif task['task'] == "execute":
|
elif task['task'] == "execute":
|
||||||
cmd = task['command'].replace("<plugin_path>", f"{pathlib.Path(__file__).resolve().parent}{os.sep}")
|
cmd = task['command'].replace("<plugin_path>", f"{pathlib.Path(__file__).resolve().parent}{os.sep}")
|
||||||
plugin.Log(f"Executing command {cmd}.")
|
stash.Log(f"Executing command {cmd}.")
|
||||||
args = [cmd]
|
args = [cmd]
|
||||||
if len(task['args']) > 0:
|
if len(task['args']) > 0:
|
||||||
args = args + [task['args']]
|
args = args + [task['args']]
|
||||||
plugin.ExecuteProcess(args)
|
stash.ExecuteProcess(args)
|
||||||
else:
|
else:
|
||||||
# ToDo: Add code to check if plugin is installed.
|
# ToDo: Add code to check if plugin is installed.
|
||||||
plugin.Trace(f"Running plugin task pluginID={task['pluginId']}, task name = {task['task']}")
|
stash.Trace(f"Running plugin task pluginID={task['pluginId']}, task name = {task['task']}")
|
||||||
try:
|
try:
|
||||||
plugin.STASH_INTERFACE.run_plugin_task(plugin_id=task['pluginId'], task_name=task['task'])
|
stash.run_plugin_task(plugin_id=task['pluginId'], task_name=task['task'])
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
plugin.LogOnce(f"Failed to call plugin {task['task']} with plugin-ID {task['pluginId']}. Error: {e}")
|
stash.LogOnce(f"Failed to call plugin {task['task']} with plugin-ID {task['pluginId']}. Error: {e}")
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def trimDbFiles(self, dbPath, maxFiles):
|
def trimDbFiles(self, dbPath, maxFiles):
|
||||||
if not os.path.exists(dbPath):
|
if not os.path.exists(dbPath):
|
||||||
plugin.LogOnce(f"Exiting trimDbFiles, because path {dbPath} does not exists.")
|
stash.LogOnce(f"Exiting trimDbFiles, because path {dbPath} does not exists.")
|
||||||
return
|
return
|
||||||
if len(dbPath) < 5: # For safety and security, short path not supported.
|
if len(dbPath) < 5: # For safety and security, short path not supported.
|
||||||
plugin.Warn(f"Exiting trimDbFiles, because path {dbPath} is to short. Len={len(dbPath)}. Path string must be at least 5 characters in length.")
|
stash.Warn(f"Exiting trimDbFiles, because path {dbPath} is to short. Len={len(dbPath)}. Path string must be at least 5 characters in length.")
|
||||||
return
|
return
|
||||||
stashPrefixSqlDbFileName = "stash-go.sqlite."
|
stashPrefixSqlDbFileName = "stash-go.sqlite."
|
||||||
dbFiles = sorted(os.listdir(dbPath))
|
dbFiles = sorted(os.listdir(dbPath))
|
||||||
@@ -211,10 +221,10 @@ class StashScheduler: # Stash Scheduler
|
|||||||
for i in range(0, n-maxFiles):
|
for i in range(0, n-maxFiles):
|
||||||
dbFilePath = f"{dbPath}{os.sep}{dbFiles[i]}"
|
dbFilePath = f"{dbPath}{os.sep}{dbFiles[i]}"
|
||||||
if dbFiles[i].startswith(stashPrefixSqlDbFileName):
|
if dbFiles[i].startswith(stashPrefixSqlDbFileName):
|
||||||
plugin.Warn(f"Deleting DB file {dbFilePath}")
|
stash.Warn(f"Deleting DB file {dbFilePath}")
|
||||||
os.remove(dbFilePath)
|
os.remove(dbFilePath)
|
||||||
else:
|
else:
|
||||||
plugin.LogOnce(f"Skipping deleting file '{dbFiles[i]}', because the file doesn't start with string '{stashPrefixSqlDbFileName}'.")
|
stash.LogOnce(f"Skipping deleting file '{dbFiles[i]}', because the file doesn't start with string '{stashPrefixSqlDbFileName}'.")
|
||||||
|
|
||||||
def checkSchedulePending(self):
|
def checkSchedulePending(self):
|
||||||
import schedule # pip install schedule # https://github.com/dbader/schedule
|
import schedule # pip install schedule # https://github.com/dbader/schedule
|
||||||
@@ -227,21 +237,21 @@ def start_library_monitor():
|
|||||||
# Create shared memory buffer which can be used as singleton logic or to get a signal to quit task from external script
|
# Create shared memory buffer which can be used as singleton logic or to get a signal to quit task from external script
|
||||||
shm_a = shared_memory.SharedMemory(name=SHAREDMEMORY_NAME, create=True, size=4)
|
shm_a = shared_memory.SharedMemory(name=SHAREDMEMORY_NAME, create=True, size=4)
|
||||||
except:
|
except:
|
||||||
plugin.Error(f"Could not open shared memory map ({SHAREDMEMORY_NAME}). Change File Monitor must be running. Can not run multiple instance of Change File Monitor. Stop FileMonitor before trying to start it again.")
|
stash.Error(f"Could not open shared memory map ({SHAREDMEMORY_NAME}). Change File Monitor must be running. Can not run multiple instance of Change File Monitor. Stop FileMonitor before trying to start it again.")
|
||||||
return
|
return
|
||||||
type(shm_a.buf)
|
type(shm_a.buf)
|
||||||
shm_buffer = shm_a.buf
|
shm_buffer = shm_a.buf
|
||||||
len(shm_buffer)
|
len(shm_buffer)
|
||||||
shm_buffer[0] = CONTINUE_RUNNING_SIG
|
shm_buffer[0] = CONTINUE_RUNNING_SIG
|
||||||
plugin.Trace(f"Shared memory map opended, and flag set to {shm_buffer[0]}")
|
stash.Trace(f"Shared memory map opended, and flag set to {shm_buffer[0]}")
|
||||||
RunCleanMetadata = False
|
RunCleanMetadata = False
|
||||||
stashScheduler = StashScheduler() if plugin.pluginSettings['turnOnScheduler'] else None
|
stashScheduler = StashScheduler() if stash.pluginSettings['turnOnScheduler'] else None
|
||||||
event_handler = watchdog.events.FileSystemEventHandler()
|
event_handler = watchdog.events.FileSystemEventHandler()
|
||||||
def on_created(event):
|
def on_created(event):
|
||||||
global shouldUpdate
|
global shouldUpdate
|
||||||
global TargetPaths
|
global TargetPaths
|
||||||
TargetPaths.append(event.src_path)
|
TargetPaths.append(event.src_path)
|
||||||
plugin.Log(f"CREATE *** '{event.src_path}'")
|
stash.Log(f"CREATE *** '{event.src_path}'")
|
||||||
with mutex:
|
with mutex:
|
||||||
shouldUpdate = True
|
shouldUpdate = True
|
||||||
signal.notify()
|
signal.notify()
|
||||||
@@ -251,7 +261,7 @@ def start_library_monitor():
|
|||||||
global TargetPaths
|
global TargetPaths
|
||||||
nonlocal RunCleanMetadata
|
nonlocal RunCleanMetadata
|
||||||
TargetPaths.append(event.src_path)
|
TargetPaths.append(event.src_path)
|
||||||
plugin.Log(f"DELETE *** '{event.src_path}'")
|
stash.Log(f"DELETE *** '{event.src_path}'")
|
||||||
with mutex:
|
with mutex:
|
||||||
shouldUpdate = True
|
shouldUpdate = True
|
||||||
RunCleanMetadata = True
|
RunCleanMetadata = True
|
||||||
@@ -262,19 +272,19 @@ def start_library_monitor():
|
|||||||
global TargetPaths
|
global TargetPaths
|
||||||
if SCAN_MODIFIED:
|
if SCAN_MODIFIED:
|
||||||
TargetPaths.append(event.src_path)
|
TargetPaths.append(event.src_path)
|
||||||
plugin.Log(f"MODIFIED *** '{event.src_path}'")
|
stash.Log(f"MODIFIED *** '{event.src_path}'")
|
||||||
with mutex:
|
with mutex:
|
||||||
shouldUpdate = True
|
shouldUpdate = True
|
||||||
signal.notify()
|
signal.notify()
|
||||||
else:
|
else:
|
||||||
plugin.TraceOnce(f"Ignoring modifications due to plugin UI setting. path='{event.src_path}'")
|
stash.TraceOnce(f"Ignoring modifications due to plugin UI setting. path='{event.src_path}'")
|
||||||
|
|
||||||
def on_moved(event):
|
def on_moved(event):
|
||||||
global shouldUpdate
|
global shouldUpdate
|
||||||
global TargetPaths
|
global TargetPaths
|
||||||
TargetPaths.append(event.src_path)
|
TargetPaths.append(event.src_path)
|
||||||
TargetPaths.append(event.dest_path)
|
TargetPaths.append(event.dest_path)
|
||||||
plugin.Log(f"MOVE *** from '{event.src_path}' to '{event.dest_path}'")
|
stash.Log(f"MOVE *** from '{event.src_path}' to '{event.dest_path}'")
|
||||||
with mutex:
|
with mutex:
|
||||||
shouldUpdate = True
|
shouldUpdate = True
|
||||||
signal.notify()
|
signal.notify()
|
||||||
@@ -283,13 +293,13 @@ def start_library_monitor():
|
|||||||
global shouldUpdate
|
global shouldUpdate
|
||||||
global TargetPaths
|
global TargetPaths
|
||||||
if SCAN_ON_ANY_EVENT or event.src_path == SPECIAL_FILE_DIR:
|
if SCAN_ON_ANY_EVENT or event.src_path == SPECIAL_FILE_DIR:
|
||||||
plugin.Log(f"Any-Event *** '{event.src_path}'")
|
stash.Log(f"Any-Event *** '{event.src_path}'")
|
||||||
TargetPaths.append(event.src_path)
|
TargetPaths.append(event.src_path)
|
||||||
with mutex:
|
with mutex:
|
||||||
shouldUpdate = True
|
shouldUpdate = True
|
||||||
signal.notify()
|
signal.notify()
|
||||||
else:
|
else:
|
||||||
plugin.TraceOnce("Ignoring on_any_event trigger.")
|
stash.TraceOnce("Ignoring on_any_event trigger.")
|
||||||
|
|
||||||
event_handler.on_created = on_created
|
event_handler.on_created = on_created
|
||||||
event_handler.on_deleted = on_deleted
|
event_handler.on_deleted = on_deleted
|
||||||
@@ -302,87 +312,87 @@ def start_library_monitor():
|
|||||||
# Iterate through stashPaths
|
# Iterate through stashPaths
|
||||||
for path in stashPaths:
|
for path in stashPaths:
|
||||||
observer.schedule(event_handler, path, recursive=RECURSIVE)
|
observer.schedule(event_handler, path, recursive=RECURSIVE)
|
||||||
plugin.Log(f"Observing {path}")
|
stash.Log(f"Observing {path}")
|
||||||
observer.schedule(event_handler, SPECIAL_FILE_DIR, recursive=RECURSIVE)
|
observer.schedule(event_handler, SPECIAL_FILE_DIR, recursive=RECURSIVE)
|
||||||
plugin.Trace(f"Observing FileMonitor path {SPECIAL_FILE_DIR}")
|
stash.Trace(f"Observing FileMonitor path {SPECIAL_FILE_DIR}")
|
||||||
observer.start()
|
observer.start()
|
||||||
JobIsRunning = False
|
JobIsRunning = False
|
||||||
PutPluginBackOnTaskQueAndExit = False
|
PutPluginBackOnTaskQueAndExit = False
|
||||||
plugin.Trace("Starting loop")
|
stash.Trace("Starting loop")
|
||||||
try:
|
try:
|
||||||
while True:
|
while True:
|
||||||
TmpTargetPaths = []
|
TmpTargetPaths = []
|
||||||
with mutex:
|
with mutex:
|
||||||
while not shouldUpdate:
|
while not shouldUpdate:
|
||||||
plugin.Trace("While not shouldUpdate")
|
stash.Trace("While not shouldUpdate")
|
||||||
if plugin.CALLED_AS_STASH_PLUGIN and isJobWaitingToRun():
|
if stash.CALLED_AS_STASH_PLUGIN and isJobWaitingToRun():
|
||||||
if FileMonitorPluginIsOnTaskQue:
|
if FileMonitorPluginIsOnTaskQue:
|
||||||
plugin.Log(f"Another task (JobID={JobIdInTheQue}) is waiting on the queue. Will restart FileMonitor to allow other task to run.")
|
stash.Log(f"Another task (JobID={JobIdInTheQue}) is waiting on the queue. Will restart FileMonitor to allow other task to run.")
|
||||||
JobIsRunning = True
|
JobIsRunning = True
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
plugin.Warn("Not restarting because FileMonitor is no longer on Task Queue")
|
stash.Warn("Not restarting because FileMonitor is no longer on Task Queue")
|
||||||
if shm_buffer[0] != CONTINUE_RUNNING_SIG:
|
if shm_buffer[0] != CONTINUE_RUNNING_SIG:
|
||||||
plugin.Log(f"Breaking out of loop. (shm_buffer[0]={shm_buffer[0]})")
|
stash.Log(f"Breaking out of loop. (shm_buffer[0]={shm_buffer[0]})")
|
||||||
break
|
break
|
||||||
if plugin.pluginSettings['turnOnScheduler']:
|
if stash.pluginSettings['turnOnScheduler']:
|
||||||
stashScheduler.checkSchedulePending()
|
stashScheduler.checkSchedulePending()
|
||||||
plugin.LogOnce("Waiting for a file change-trigger.")
|
stash.LogOnce("Waiting for a file change-trigger.")
|
||||||
signal.wait(timeout=SIGNAL_TIMEOUT)
|
signal.wait(timeout=SIGNAL_TIMEOUT)
|
||||||
if plugin.pluginSettings['turnOnScheduler'] and not shouldUpdate:
|
if stash.pluginSettings['turnOnScheduler'] and not shouldUpdate:
|
||||||
plugin.Trace("Checking the scheduler.")
|
stash.Trace("Checking the scheduler.")
|
||||||
elif shouldUpdate:
|
elif shouldUpdate:
|
||||||
plugin.Trace("File change trigger occurred.")
|
stash.Trace("File change trigger occurred.")
|
||||||
else:
|
else:
|
||||||
plugin.Trace("Wait timeourt occurred.")
|
stash.Trace("Wait timeout occurred.")
|
||||||
shouldUpdate = False
|
shouldUpdate = False
|
||||||
TmpTargetPaths = []
|
TmpTargetPaths = []
|
||||||
for TargetPath in TargetPaths:
|
for TargetPath in TargetPaths:
|
||||||
TmpTargetPaths.append(os.path.dirname(TargetPath))
|
TmpTargetPaths.append(os.path.dirname(TargetPath))
|
||||||
plugin.Trace(f"Added Path {os.path.dirname(TargetPath)}")
|
stash.Trace(f"Added Path {os.path.dirname(TargetPath)}")
|
||||||
if TargetPath == SPECIAL_FILE_NAME:
|
if TargetPath == SPECIAL_FILE_NAME:
|
||||||
if os.path.isfile(SPECIAL_FILE_NAME):
|
if os.path.isfile(SPECIAL_FILE_NAME):
|
||||||
shm_buffer[0] = STOP_RUNNING_SIG
|
shm_buffer[0] = STOP_RUNNING_SIG
|
||||||
plugin.Log(f"[SpFl]Detected trigger file to kill FileMonitor. {SPECIAL_FILE_NAME}", printTo = plugin.LOG_TO_FILE + plugin.LOG_TO_CONSOLE + plugin.LOG_TO_STASH)
|
stash.Log(f"[SpFl]Detected trigger file to kill FileMonitor. {SPECIAL_FILE_NAME}", printTo = stash.LOG_TO_FILE + stash.LOG_TO_CONSOLE + stash.LOG_TO_STASH)
|
||||||
else:
|
else:
|
||||||
plugin.Trace(f"[SpFl]Did not find file {SPECIAL_FILE_NAME}.")
|
stash.Trace(f"[SpFl]Did not find file {SPECIAL_FILE_NAME}.")
|
||||||
|
|
||||||
# Make sure special file does not exist, incase change was missed.
|
# Make sure special file does not exist, incase change was missed.
|
||||||
if CREATE_SPECIAL_FILE_TO_EXIT and os.path.isfile(SPECIAL_FILE_NAME) and shm_buffer[0] == CONTINUE_RUNNING_SIG:
|
if CREATE_SPECIAL_FILE_TO_EXIT and os.path.isfile(SPECIAL_FILE_NAME) and shm_buffer[0] == CONTINUE_RUNNING_SIG:
|
||||||
shm_buffer[0] = STOP_RUNNING_SIG
|
shm_buffer[0] = STOP_RUNNING_SIG
|
||||||
plugin.Log(f"[SpFl]Detected trigger file to kill FileMonitor. {SPECIAL_FILE_NAME}", printTo = plugin.LOG_TO_FILE + plugin.LOG_TO_CONSOLE + plugin.LOG_TO_STASH)
|
stash.Log(f"[SpFl]Detected trigger file to kill FileMonitor. {SPECIAL_FILE_NAME}", printTo = stash.LOG_TO_FILE + stash.LOG_TO_CONSOLE + stash.LOG_TO_STASH)
|
||||||
TargetPaths = []
|
TargetPaths = []
|
||||||
TmpTargetPaths = list(set(TmpTargetPaths))
|
TmpTargetPaths = list(set(TmpTargetPaths))
|
||||||
if TmpTargetPaths != []:
|
if TmpTargetPaths != []:
|
||||||
plugin.Log(f"Triggering Stash scan for path(s) {TmpTargetPaths}")
|
stash.Log(f"Triggering Stash scan for path(s) {TmpTargetPaths}")
|
||||||
if len(TmpTargetPaths) > 1 or TmpTargetPaths[0] != SPECIAL_FILE_DIR:
|
if len(TmpTargetPaths) > 1 or TmpTargetPaths[0] != SPECIAL_FILE_DIR:
|
||||||
if not plugin.DRY_RUN:
|
if not stash.DRY_RUN:
|
||||||
plugin.STASH_INTERFACE.metadata_scan(paths=TmpTargetPaths)
|
stash.metadata_scan(paths=TmpTargetPaths)
|
||||||
if RUN_CLEAN_AFTER_DELETE and RunCleanMetadata:
|
if RUN_CLEAN_AFTER_DELETE and RunCleanMetadata:
|
||||||
plugin.STASH_INTERFACE.metadata_clean(paths=TmpTargetPaths, dry_run=plugin.DRY_RUN)
|
stash.metadata_clean(paths=TmpTargetPaths, dry_run=stash.DRY_RUN)
|
||||||
if RUN_GENERATE_CONTENT:
|
if RUN_GENERATE_CONTENT:
|
||||||
plugin.STASH_INTERFACE.metadata_generate()
|
stash.metadata_generate()
|
||||||
if plugin.CALLED_AS_STASH_PLUGIN and shm_buffer[0] == CONTINUE_RUNNING_SIG and FileMonitorPluginIsOnTaskQue:
|
if stash.CALLED_AS_STASH_PLUGIN and shm_buffer[0] == CONTINUE_RUNNING_SIG and FileMonitorPluginIsOnTaskQue:
|
||||||
PutPluginBackOnTaskQueAndExit = True
|
PutPluginBackOnTaskQueAndExit = True
|
||||||
else:
|
else:
|
||||||
plugin.Trace("Nothing to scan.")
|
stash.Trace("Nothing to scan.")
|
||||||
|
|
||||||
if shm_buffer[0] != CONTINUE_RUNNING_SIG or StopLibraryMonitorWaitingInTaskQueue:
|
if shm_buffer[0] != CONTINUE_RUNNING_SIG or StopLibraryMonitorWaitingInTaskQueue:
|
||||||
plugin.Log(f"Exiting Change File Monitor. (shm_buffer[0]={shm_buffer[0]}) (StopLibraryMonitorWaitingInTaskQueue={StopLibraryMonitorWaitingInTaskQueue})")
|
stash.Log(f"Exiting Change File Monitor. (shm_buffer[0]={shm_buffer[0]}) (StopLibraryMonitorWaitingInTaskQueue={StopLibraryMonitorWaitingInTaskQueue})")
|
||||||
shm_a.close()
|
shm_a.close()
|
||||||
shm_a.unlink() # Call unlink only once to release the shared memory
|
shm_a.unlink() # Call unlink only once to release the shared memory
|
||||||
raise KeyboardInterrupt
|
raise KeyboardInterrupt
|
||||||
elif JobIsRunning or PutPluginBackOnTaskQueAndExit:
|
elif JobIsRunning or PutPluginBackOnTaskQueAndExit:
|
||||||
plugin.STASH_INTERFACE.run_plugin_task(plugin_id=plugin.PLUGIN_ID, task_name=StartFileMonitorAsAPluginTaskName)
|
stash.run_plugin_task(plugin_id=stash.PLUGIN_ID, task_name=StartFileMonitorAsAPluginTaskName)
|
||||||
plugin.Trace(f"Exiting plugin so that other task can run. (JobIsRunning={JobIsRunning}) (PutPluginBackOnTaskQueAndExit={PutPluginBackOnTaskQueAndExit})")
|
stash.Trace(f"Exiting plugin so that other task can run. (JobIsRunning={JobIsRunning}) (PutPluginBackOnTaskQueAndExit={PutPluginBackOnTaskQueAndExit})")
|
||||||
return
|
return
|
||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt:
|
||||||
observer.stop()
|
observer.stop()
|
||||||
plugin.Trace("Stopping observer")
|
stash.Trace("Stopping observer")
|
||||||
if os.path.isfile(SPECIAL_FILE_NAME):
|
if os.path.isfile(SPECIAL_FILE_NAME):
|
||||||
os.remove(SPECIAL_FILE_NAME)
|
os.remove(SPECIAL_FILE_NAME)
|
||||||
observer.join()
|
observer.join()
|
||||||
plugin.Trace("Exiting function")
|
stash.Trace("Exiting function")
|
||||||
|
|
||||||
# Example: python filemonitor.py --stop
|
# Example: python filemonitor.py --stop
|
||||||
def stop_library_monitor():
|
def stop_library_monitor():
|
||||||
@@ -392,18 +402,18 @@ def stop_library_monitor():
|
|||||||
pathlib.Path(SPECIAL_FILE_NAME).touch()
|
pathlib.Path(SPECIAL_FILE_NAME).touch()
|
||||||
if DELETE_SPECIAL_FILE_ON_STOP:
|
if DELETE_SPECIAL_FILE_ON_STOP:
|
||||||
os.remove(SPECIAL_FILE_NAME)
|
os.remove(SPECIAL_FILE_NAME)
|
||||||
plugin.Trace("Opening shared memory map.")
|
stash.Trace("Opening shared memory map.")
|
||||||
try:
|
try:
|
||||||
shm_a = shared_memory.SharedMemory(name=SHAREDMEMORY_NAME, create=False, size=4)
|
shm_a = shared_memory.SharedMemory(name=SHAREDMEMORY_NAME, create=False, size=4)
|
||||||
except:
|
except:
|
||||||
# If FileMonitor is running as plugin, then it's expected behavior that SharedMemory will not be avialable.
|
# If FileMonitor is running as plugin, then it's expected behavior that SharedMemory will not be available.
|
||||||
plugin.Trace(f"Could not open shared memory map ({SHAREDMEMORY_NAME}). Change File Monitor must not be running.")
|
stash.Trace(f"Could not open shared memory map ({SHAREDMEMORY_NAME}). Change File Monitor must not be running.")
|
||||||
return
|
return
|
||||||
type(shm_a.buf)
|
type(shm_a.buf)
|
||||||
shm_buffer = shm_a.buf
|
shm_buffer = shm_a.buf
|
||||||
len(shm_buffer)
|
len(shm_buffer)
|
||||||
shm_buffer[0] = STOP_RUNNING_SIG
|
shm_buffer[0] = STOP_RUNNING_SIG
|
||||||
plugin.Trace(f"Shared memory map opended, and flag set to {shm_buffer[0]}")
|
stash.Trace(f"Shared memory map opended, and flag set to {shm_buffer[0]}")
|
||||||
shm_a.close()
|
shm_a.close()
|
||||||
shm_a.unlink() # Call unlink only once to release the shared memory
|
shm_a.unlink() # Call unlink only once to release the shared memory
|
||||||
|
|
||||||
@@ -413,29 +423,29 @@ def start_library_monitor_service():
|
|||||||
shm_a = shared_memory.SharedMemory(name=SHAREDMEMORY_NAME, create=False, size=4)
|
shm_a = shared_memory.SharedMemory(name=SHAREDMEMORY_NAME, create=False, size=4)
|
||||||
shm_a.close()
|
shm_a.close()
|
||||||
shm_a.unlink()
|
shm_a.unlink()
|
||||||
plugin.Error("FileMonitor is already running. Need to stop FileMonitor before trying to start it again.")
|
stash.Error("FileMonitor is already running. Need to stop FileMonitor before trying to start it again.")
|
||||||
return
|
return
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
plugin.Trace("FileMonitor is not running, so it's safe to start it as a service.")
|
stash.Trace("FileMonitor is not running, so it's safe to start it as a service.")
|
||||||
args = [f"{pathlib.Path(__file__).resolve().parent}{os.sep}filemonitor.py", '--url', f"{plugin.STASH_URL}"]
|
args = [f"{pathlib.Path(__file__).resolve().parent}{os.sep}filemonitor.py", '--url', f"{stash.STASH_URL}"]
|
||||||
plugin.ExecutePythonScript(args)
|
stash.ExecutePythonScript(args)
|
||||||
|
|
||||||
if parse_args.stop or parse_args.restart or plugin.PLUGIN_TASK_NAME == "stop_library_monitor":
|
if parse_args.stop or parse_args.restart or stash.PLUGIN_TASK_NAME == "stop_library_monitor":
|
||||||
stop_library_monitor()
|
stop_library_monitor()
|
||||||
if parse_args.restart:
|
if parse_args.restart:
|
||||||
time.sleep(5)
|
time.sleep(5)
|
||||||
plugin.STASH_INTERFACE.run_plugin_task(plugin_id=plugin.PLUGIN_ID, task_name=StartFileMonitorAsAPluginTaskName)
|
stash.run_plugin_task(plugin_id=stash.PLUGIN_ID, task_name=StartFileMonitorAsAPluginTaskName)
|
||||||
plugin.Trace(f"Restart FileMonitor EXIT")
|
stash.Trace(f"Restart FileMonitor EXIT")
|
||||||
else:
|
else:
|
||||||
plugin.Trace(f"Stop FileMonitor EXIT")
|
stash.Trace(f"Stop FileMonitor EXIT")
|
||||||
elif plugin.PLUGIN_TASK_NAME == "start_library_monitor_service":
|
elif stash.PLUGIN_TASK_NAME == "start_library_monitor_service":
|
||||||
start_library_monitor_service()
|
start_library_monitor_service()
|
||||||
plugin.Trace(f"start_library_monitor_service EXIT")
|
stash.Trace(f"start_library_monitor_service EXIT")
|
||||||
elif plugin.PLUGIN_TASK_NAME == "start_library_monitor" or not plugin.CALLED_AS_STASH_PLUGIN:
|
elif stash.PLUGIN_TASK_NAME == "start_library_monitor" or not stash.CALLED_AS_STASH_PLUGIN:
|
||||||
start_library_monitor()
|
start_library_monitor()
|
||||||
plugin.Trace(f"start_library_monitor EXIT")
|
stash.Trace(f"start_library_monitor EXIT")
|
||||||
else:
|
else:
|
||||||
plugin.Log(f"Nothing to do!!! (plugin.PLUGIN_TASK_NAME={plugin.PLUGIN_TASK_NAME})")
|
stash.Log(f"Nothing to do!!! (stash.PLUGIN_TASK_NAME={stash.PLUGIN_TASK_NAME})")
|
||||||
|
|
||||||
plugin.Trace("\n*********************************\nEXITING ***********************\n*********************************")
|
stash.Trace("\n*********************************\nEXITING ***********************\n*********************************")
|
||||||
|
|||||||
@@ -3,22 +3,22 @@
|
|||||||
# Get the latest developers version from following link: https://github.com/David-Maisonave/Axter-Stash/tree/main/plugins/FileMonitor
|
# Get the latest developers version from following link: https://github.com/David-Maisonave/Axter-Stash/tree/main/plugins/FileMonitor
|
||||||
config = {
|
config = {
|
||||||
# The task scheduler list.
|
# The task scheduler list.
|
||||||
# Task can be scheduled to run monthly, weekly, hourly, and by minutes. For best results use the scheduler with FileMonitor running as a service.
|
# Task can be scheduled to run monthly, weekly, daily, hourly, and by minutes. For best results use the scheduler with FileMonitor running as a service.
|
||||||
# The frequency field can be in minutes or hours. A zero frequency value disables the task.
|
# For daily, weekly, and monthly task, use the weekday syntax.
|
||||||
# Note: Both seconds and days are also supported for the frequency field.
|
# The [Auto Tag] task is an example of a daily scheduled task.
|
||||||
# However, seconds is mainly used for test purposes.
|
# The [Generate] task is an example of a weekly scheduled task.
|
||||||
# And days usage is discourage, because it only works if FileMonitor is running for X many days non-stop.
|
# The [Backup] task is an example of a monthly scheduled task.
|
||||||
# For weekly and monthly task, use the syntax as done in the **Generate** and **Backup** task below.
|
# Note: The hour section in time MUST be a two digit number, and use military time format. Example: 1PM = "13:00" and 1AM = "01:00"
|
||||||
"task_scheduler": [
|
"task_scheduler": [
|
||||||
{"task" : "Auto Tag", "hours" : 24}, # Auto Tag -> [Auto Tag] (Daily)
|
# To create a daily task, include each day of the week for the weekday field.
|
||||||
{"task" : "Clean", "hours" : 48}, # Maintenance -> [Clean] (every 2 days)
|
{"task" : "Auto Tag", "weekday" : "monday,tuesday,wednesday,thursday,friday,saturday,sunday", "time" : "06:00"}, # Auto Tag -> [Auto Tag] (Daily at 6AM)
|
||||||
{"task" : "Clean Generated Files", "hours" : 48}, # Maintenance -> [Clean Generated Files] (every 2 days)
|
{"task" : "Optimise Database", "weekday" : "monday,tuesday,wednesday,thursday,friday,saturday,sunday", "time" : "07:00"}, # Maintenance -> [Optimise Database] (Daily at 7AM)
|
||||||
{"task" : "Optimise Database", "hours" : 24}, # Maintenance -> [Optimise Database] (Daily)
|
|
||||||
|
|
||||||
# The following is the syntax used for plugins. A plugin task requires the plugin name for the [task] field, and the plugin-ID for the [pluginId] field.
|
# The following task are scheduled for 3 days out of the week.
|
||||||
{"task" : "Create Tags", "pluginId" : "pathParser", "hours" : 24}, # This task requires plugin [Path Parser]. To enable this task change the zero to a positive number.
|
{"task" : "Clean", "weekday" : "monday,wednesday,friday", "time" : "08:00"}, # Maintenance -> [Clean] (3 days per week at 8AM)
|
||||||
|
{"task" : "Clean Generated Files", "weekday" : "tuesday,thursday,saturday", "time" : "08:00"}, # Maintenance -> [Clean Generated Files] (3 days per week at 8AM)
|
||||||
|
|
||||||
# Note: For a weekly task use the weekday method which is more reliable. The hour section in time MUST be a two digit number, and use military time format. Example: 1PM = "13:00"
|
# The following task are scheduled weekly
|
||||||
{"task" : "Generate", "weekday" : "sunday", "time" : "07:00"}, # Generated Content-> [Generate] (Every Sunday at 7AM)
|
{"task" : "Generate", "weekday" : "sunday", "time" : "07:00"}, # Generated Content-> [Generate] (Every Sunday at 7AM)
|
||||||
{"task" : "Scan", "weekday" : "sunday", "time" : "03:00"}, # Library -> [Scan] (Weekly) (Every Sunday at 3AM)
|
{"task" : "Scan", "weekday" : "sunday", "time" : "03:00"}, # Library -> [Scan] (Weekly) (Every Sunday at 3AM)
|
||||||
|
|
||||||
@@ -28,13 +28,21 @@ config = {
|
|||||||
# 2 = 2nd specified weekday of the month. Example 2nd monday of the month.
|
# 2 = 2nd specified weekday of the month. Example 2nd monday of the month.
|
||||||
# 3 = 3rd specified weekday of the month.
|
# 3 = 3rd specified weekday of the month.
|
||||||
# 4 = 4th specified weekday of the month.
|
# 4 = 4th specified weekday of the month.
|
||||||
# Example monthly method.
|
# The following task is scheduled monthly
|
||||||
{"task" : "Backup", "weekday" : "sunday", "time" : "01:00", "monthly" : 2}, # Backup -> [Backup] 2nd sunday of the month at 1AM (01:00)
|
{"task" : "Backup", "weekday" : "sunday", "time" : "01:00", "monthly" : 2}, # Backup -> [Backup] 2nd sunday of the month at 1AM (01:00)
|
||||||
# {"task" : "Backup", "seconds" : 30}, # Example commented out test task.
|
|
||||||
|
|
||||||
|
# The above weekday method is the more reliable method to schedule task, because it doesn't rely on FileMonitor running continuously (non-stop).
|
||||||
|
|
||||||
|
# The below examples use frequency field method which can work with minutes and hours. A zero frequency value disables the task.
|
||||||
|
# Note: Both seconds and days are also supported for the frequency field.
|
||||||
|
# However, seconds is mainly used for test purposes.
|
||||||
|
# And days usage is discourage, because it only works if FileMonitor is running for X many days non-stop.
|
||||||
# Note:
|
# Note:
|
||||||
# The below example tasks are done using hours and minutes because the task is easily disabled (deactivated) by a zero value entry.
|
# The below example tasks are done using hours and minutes because the task is easily disabled (deactivated) by a zero value entry.
|
||||||
# Any of these task types can be converted to a weekly/monthly sysntax.
|
# Any of these task types can be converted to a daily, weekly, or monthly syntax.
|
||||||
|
|
||||||
|
# The following is the syntax used for plugins. A plugin task requires the plugin name for the [task] field, and the plugin-ID for the [pluginId] field.
|
||||||
|
{"task" : "Create Tags", "pluginId" : "pathParser", "hours" : 0}, # This task requires plugin [Path Parser]. To enable this task change the zero to a positive number.
|
||||||
|
|
||||||
# Example task for calling another Stash plugin, which needs plugin name and plugin ID.
|
# Example task for calling another Stash plugin, which needs plugin name and plugin ID.
|
||||||
{"task" : "PluginButtonName_Here", "pluginId" : "PluginId_Here", "hours" : 0}, # The zero frequency value makes this task disabled.
|
{"task" : "PluginButtonName_Here", "pluginId" : "PluginId_Here", "hours" : 0}, # The zero frequency value makes this task disabled.
|
||||||
@@ -47,6 +55,9 @@ config = {
|
|||||||
|
|
||||||
# Example task to execute a command
|
# Example task to execute a command
|
||||||
{"task" : "execute", "command" : "C:\\MyPath\\HelloWorld.bat", "args" : "", "hours" : 0},
|
{"task" : "execute", "command" : "C:\\MyPath\\HelloWorld.bat", "args" : "", "hours" : 0},
|
||||||
|
|
||||||
|
# Commented out test task.
|
||||||
|
# {"task" : "Backup", "seconds" : 30},
|
||||||
],
|
],
|
||||||
|
|
||||||
# Timeout in seconds. This is how often FileMonitor will check the scheduler and (in-plugin mode) check if another job (Task) is in the queue.
|
# Timeout in seconds. This is how often FileMonitor will check the scheduler and (in-plugin mode) check if another job (Task) is in the queue.
|
||||||
@@ -68,7 +79,7 @@ config = {
|
|||||||
"scanModified": False, # Warning: Enabling this in Windows OS may cause excessive triggers when user is only viewing directory content.
|
"scanModified": False, # Warning: Enabling this in Windows OS may cause excessive triggers when user is only viewing directory content.
|
||||||
# Enable to exit FileMonitor by creating special file in plugin folder\working
|
# Enable to exit FileMonitor by creating special file in plugin folder\working
|
||||||
"createSpecFileToExit": True,
|
"createSpecFileToExit": True,
|
||||||
# Enable to delete special file imediately after it's created in stop process.
|
# Enable to delete special file immediately after it's created in stop process.
|
||||||
"deleteSpecFileInStop": False,
|
"deleteSpecFileInStop": False,
|
||||||
|
|
||||||
# When enabled, if CREATE flag is triggered, DupFileManager task is called if the plugin is installed.
|
# When enabled, if CREATE flag is triggered, DupFileManager task is called if the plugin is installed.
|
||||||
|
|||||||
Reference in New Issue
Block a user