Added option to include paths for scheduled task

Added rename_generated_files
This commit is contained in:
David Maisonave
2024-08-15 14:50:57 -04:00
parent bbeb0291da
commit 8a23918249
9 changed files with 643 additions and 276 deletions

View File

@@ -7,160 +7,43 @@
# Research:
# Research following links to complete this plugin:
# https://github.com/WithoutPants/stash-plugin-duplicate-finder
#
# Look at options in programs from the following link:
# https://video.stackexchange.com/questions/25302/how-can-i-find-duplicate-videos-by-content
#
# Python library for parse-reparsepoint
# https://pypi.org/project/parse-reparsepoint/
# pip install parse-reparsepoint
#
# Look at stash API find_duplicate_scenes
import os
import sys
import time
import shutil
import fileinput
import hashlib
import json
from pathlib import Path
import requests
import logging
from logging.handlers import RotatingFileHandler
import stashapi.log as log # Importing stashapi.log as log for critical events ONLY
from stashapi.stashapp import StashInterface
import os, sys, time, pathlib, argparse, platform
from StashPluginHelper import StashPluginHelper
from DupFileManager_config import config # Import config from DupFileManager_config.py
# **********************************************************************
# Constant global variables --------------------------------------------
LOG_FILE_PATH = log_file_path = f"{Path(__file__).resolve().parent}\\{Path(__file__).stem}.log"
FORMAT = "[%(asctime)s - LN:%(lineno)s] %(message)s"
PLUGIN_ARGS_MODE = False
PLUGIN_ID = Path(__file__).stem
parser = argparse.ArgumentParser()
parser.add_argument('--url', '-u', dest='stash_url', type=str, help='Add Stash URL')
parser.add_argument('--trace', '-t', dest='trace', action='store_true', help='Enables debug trace mode.')
parser.add_argument('--remove_dup', '-r', dest='remove', action='store_true', help='Remove (delete) duplicate files.')
parser.add_argument('--dryrun', '-d', dest='dryrun', action='store_true', help='Do dryrun for deleting duplicate files. No files are deleted, and only logging occurs.')
parse_args = parser.parse_args()
RFH = RotatingFileHandler(
filename=LOG_FILE_PATH,
mode='a',
maxBytes=2*1024*1024, # Configure logging for this script with max log file size of 2000K
backupCount=2,
encoding=None,
delay=0
)
TIMEOUT = 5
CONTINUE_RUNNING_SIG = 99
# **********************************************************************
# Global variables --------------------------------------------
exitMsg = "Change success!!"
runningInPluginMode = False
# Configure local log file for plugin within plugin folder having a limited max log file size
logging.basicConfig(level=logging.INFO, format=FORMAT, datefmt="%y%m%d %H:%M:%S", handlers=[RFH])
logger = logging.getLogger(Path(__file__).stem)
# **********************************************************************
# ----------------------------------------------------------------------
# Code section to fetch variables from Plugin UI and from DupFileManager_settings.py
# Check if being called as Stash plugin
gettingCalledAsStashPlugin = True
mangeDupFilesTask = True
StdInRead = None
try:
if len(sys.argv) == 1:
print(f"Attempting to read stdin. (len(sys.argv)={len(sys.argv)})", file=sys.stderr)
StdInRead = sys.stdin.read()
# for line in fileinput.input():
# StdInRead = line
# break
else:
raise Exception("Not called in plugin mode.")
except:
gettingCalledAsStashPlugin = False
print(f"Either len(sys.argv) not expected value OR sys.stdin.read() failed! (StdInRead={StdInRead}) (len(sys.argv)={len(sys.argv)})", file=sys.stderr)
pass
if gettingCalledAsStashPlugin and StdInRead:
print(f"StdInRead={StdInRead} (len(sys.argv)={len(sys.argv)})", file=sys.stderr)
runningInPluginMode = True
json_input = json.loads(StdInRead)
FRAGMENT_SERVER = json_input["server_connection"]
else:
runningInPluginMode = False
FRAGMENT_SERVER = {'Scheme': config['endpoint_Scheme'], 'Host': config['endpoint_Host'], 'Port': config['endpoint_Port'], 'SessionCookie': {'Name': 'session', 'Value': '', 'Path': '', 'Domain': '', 'Expires': '0001-01-01T00:00:00Z', 'RawExpires': '', 'MaxAge': 0, 'Secure': False, 'HttpOnly': False, 'SameSite': 0, 'Raw': '', 'Unparsed': None}, 'Dir': os.path.dirname(Path(__file__).resolve().parent), 'PluginDir': Path(__file__).resolve().parent}
print("Running in non-plugin mode!", file=sys.stderr)
stash = StashInterface(FRAGMENT_SERVER)
PLUGINCONFIGURATION = stash.get_configuration()["plugins"]
STASHCONFIGURATION = stash.get_configuration()["general"]
STASHPATHSCONFIG = STASHCONFIGURATION['stashes']
stashPaths = []
settings = {
"ignoreReparsepoints": True,
"ignoreSymbolicLinks": True,
"mergeDupFilename": True,
"moveToTrashCan": False,
"whitelist": [],
"zzdebugTracing": False,
"zzdryRun": False,
}
CanUpdatePluginConfigSettings = False
try:
plugins_configuration = stash.find_plugins_config()
CanUpdatePluginConfigSettings = True
except Exception as e:
logger.exception('Got exception on main handler')
logger.error('This exception most likely occurred because stashapp-tools needs to be upgraded. To fix this error, run the following command:\npip install --upgrade stashapp-tools')
pass
stash = StashPluginHelper(
stash_url=parse_args.stash_url,
debugTracing=parse_args.trace,
settings=settings,
config=config
)
stash.Status()
stash.Log(f"\nStarting (__file__={__file__}) (stash.CALLED_AS_STASH_PLUGIN={stash.CALLED_AS_STASH_PLUGIN}) (stash.DEBUG_TRACING={stash.DEBUG_TRACING}) (stash.DRY_RUN={stash.DRY_RUN}) (stash.PLUGIN_TASK_NAME={stash.PLUGIN_TASK_NAME})************************************************")
if PLUGIN_ID in PLUGINCONFIGURATION and (not CanUpdatePluginConfigSettings or 'INITIAL_VALUES_SET1' in PLUGINCONFIGURATION[PLUGIN_ID]):
settings.update(PLUGINCONFIGURATION[PLUGIN_ID])
# ----------------------------------------------------------------------
debugTracing = settings["zzdebugTracing"]
debugTracing = True
for item in STASHPATHSCONFIG:
stashPaths.append(item["path"])
# Extract dry_run setting from settings
DRY_RUN = settings["zzdryRun"]
dry_run_prefix = ''
try:
PLUGIN_ARGS_MODE = json_input['args']["mode"]
except:
pass
logger.info(f"\nStarting (runningInPluginMode={runningInPluginMode}) (debugTracing={debugTracing}) (DRY_RUN={DRY_RUN}) (PLUGIN_ARGS_MODE={PLUGIN_ARGS_MODE}) (stash.stash_version()={stash.stash_version()})************************************************")
if debugTracing: logger.info(f"Debug Tracing (stash.get_configuration()={stash.get_configuration()})")
if debugTracing: logger.info("settings: %s " % (settings,))
if debugTracing: logger.info(f"Debug Tracing (STASHCONFIGURATION={STASHCONFIGURATION})")
if debugTracing: logger.info(f"Debug Tracing (stashPaths={stashPaths})")
if debugTracing: logger.info(f"Debug Tracing (PLUGIN_ID={PLUGIN_ID})")
if debugTracing: logger.info(f"Debug Tracing (PLUGINCONFIGURATION={PLUGINCONFIGURATION})")
if PLUGIN_ID in PLUGINCONFIGURATION:
if 'INITIAL_VALUES_SET1' not in PLUGINCONFIGURATION[PLUGIN_ID]:
if debugTracing: logger.info(f"Initializing plugin ({PLUGIN_ID}) settings (PLUGINCONFIGURATION[PLUGIN_ID]={PLUGINCONFIGURATION[PLUGIN_ID]})")
try:
plugins_configuration = stash.find_plugins_config()
if debugTracing: logger.info(f"Debug Tracing (plugins_configuration={plugins_configuration})")
stash.configure_plugin(PLUGIN_ID, {"INITIAL_VALUES_SET1": True})
logger.info('Called stash.configure_plugin(PLUGIN_ID, {"INITIAL_VALUES_SET1": True})')
plugins_configuration = stash.find_plugins_config()
if debugTracing: logger.info(f"Debug Tracing (plugins_configuration={plugins_configuration})")
stash.configure_plugin(PLUGIN_ID, settings)
logger.info('Called stash.configure_plugin(PLUGIN_ID, settings)')
plugins_configuration = stash.find_plugins_config()
if debugTracing: logger.info(f"Debug Tracing (plugins_configuration={plugins_configuration})")
except Exception as e:
logger.exception('Got exception on main handler')
try:
if debugTracing: logger.info("Debug Tracing................")
stash.configure_plugin(plugin_id=PLUGIN_ID, values=[{"zzdebugTracing": False}], init_defaults=True)
if debugTracing: logger.info("Debug Tracing................")
except Exception as e:
logger.exception('Got exception on main handler')
pass
pass
# stash.configure_plugin(PLUGIN_ID, settings) # , init_defaults=True
if debugTracing: logger.info("Debug Tracing................")
if DRY_RUN:
logger.info("Dry run mode is enabled.")
dry_run_prefix = "Would've "
if debugTracing: logger.info("Debug Tracing................")
# ----------------------------------------------------------------------
# **********************************************************************
stash.Trace(f"(stashPaths={stash.STASH_PATHS})")
def realpath(path):
"""
@@ -184,60 +67,72 @@ def realpath(path):
def isReparsePoint(path):
import win32api
import win32con
from parse_reparsepoint import Navigator
FinalPathname = realpath(path)
logger.info(f"(path='{path}') (FinalPathname='{FinalPathname}')")
stash.Log(f"(path='{path}') (FinalPathname='{FinalPathname}')")
if FinalPathname != path:
logger.info(f"Symbolic link '{path}'")
stash.Log(f"Symbolic link '{path}'")
return True
if not os.path.isdir(path):
path = os.path.dirname(path)
return win32api.GetFileAttributes(path) & win32con.FILE_ATTRIBUTE_REPARSE_POINT
def mangeDupFiles():
import platform
if debugTracing: logger.info(f"Debug Tracing (platform.system()={platform.system()})")
def mangeDupFiles(merge=False, deleteDup=False, DryRun=False):
stash.Trace(f"Debug Tracing (platform.system()={platform.system()})")
myTestPath1 = r"B:\V\V\Tip\POV - Holly Molly petite ginger anal slut - RedTube.mp4" # not a reparse point or symbolic link
myTestPath2 = r"B:\_\SpecialSet\Amateur Anal Attempts\BRCC test studio name.m2ts" # reparse point
myTestPath3 = r"B:\_\SpecialSet\Amateur Anal Attempts\Amateur Anal Attempts 4.mp4" #symbolic link
myTestPath4 = r"E:\Stash\plugins\RenameFile\README.md" #symbolic link
myTestPath5 = r"E:\_\David-Maisonave\Axter-Stash\plugins\RenameFile\README.md" #symbolic link
myTestPath6 = r"E:\_\David-Maisonave\Axter-Stash\plugins\DeleteMe\Renamer\README.md" # not reparse point
logger.info(f"Testing '{myTestPath1}'")
stash.Log(f"Testing '{myTestPath1}'")
if isReparsePoint(myTestPath1):
logger.info(f"isSymLink '{myTestPath1}'")
stash.Log(f"isSymLink '{myTestPath1}'")
else:
logger.info(f"Not isSymLink '{myTestPath1}'")
stash.Log(f"Not isSymLink '{myTestPath1}'")
if isReparsePoint(myTestPath2):
logger.info(f"isSymLink '{myTestPath2}'")
stash.Log(f"isSymLink '{myTestPath2}'")
else:
logger.info(f"Not isSymLink '{myTestPath2}'")
stash.Log(f"Not isSymLink '{myTestPath2}'")
if isReparsePoint(myTestPath3):
logger.info(f"isSymLink '{myTestPath3}'")
stash.Log(f"isSymLink '{myTestPath3}'")
else:
logger.info(f"Not isSymLink '{myTestPath3}'")
stash.Log(f"Not isSymLink '{myTestPath3}'")
if isReparsePoint(myTestPath4):
logger.info(f"isSymLink '{myTestPath4}'")
stash.Log(f"isSymLink '{myTestPath4}'")
else:
logger.info(f"Not isSymLink '{myTestPath4}'")
stash.Log(f"Not isSymLink '{myTestPath4}'")
if isReparsePoint(myTestPath5):
logger.info(f"isSymLink '{myTestPath5}'")
stash.Log(f"isSymLink '{myTestPath5}'")
else:
logger.info(f"Not isSymLink '{myTestPath5}'")
stash.Log(f"Not isSymLink '{myTestPath5}'")
if isReparsePoint(myTestPath6):
logger.info(f"isSymLink '{myTestPath6}'")
stash.Log(f"isSymLink '{myTestPath6}'")
else:
logger.info(f"Not isSymLink '{myTestPath6}'")
stash.Log(f"Not isSymLink '{myTestPath6}'")
return
if mangeDupFilesTask:
mangeDupFiles()
if debugTracing: logger.info(f"stop_library_monitor EXIT................")
if stash.PLUGIN_TASK_NAME == "merge_dup_filename_task":
mangeDupFiles(merge=True)
stash.Trace(f"{stash.PLUGIN_TASK_NAME} EXIT")
elif stash.PLUGIN_TASK_NAME == "delete_duplicates":
mangeDupFiles(deleteDup=True)
stash.Trace(f"{stash.PLUGIN_TASK_NAME} EXIT")
elif stash.PLUGIN_TASK_NAME == "dryrun_delete_duplicates":
mangeDupFiles(deleteDup=True, DryRun=True)
stash.Trace(f"{stash.PLUGIN_TASK_NAME} EXIT")
elif parse_args.remove:
mangeDupFiles(deleteDup=True, DryRun=parse_args.dryrun)
stash.Trace(f"Delete duplicate (DryRun={parse_args.dryrun}) EXIT")
elif parse_args.dryrun:
mangeDupFiles(deleteDup=True, DryRun=parse_args.dryrun)
stash.Trace(f"Dryrun delete duplicate EXIT")
else:
logger.info(f"Nothing to do!!! (PLUGIN_ARGS_MODE={PLUGIN_ARGS_MODE})")
stash.Log(f"Nothing to do!!! (PLUGIN_ARGS_MODE={PLUGIN_ARGS_MODE})")
if debugTracing: logger.info("\n*********************************\nEXITING ***********************\n*********************************")
stash.Trace("\n*********************************\nEXITING ***********************\n*********************************")

View File

@@ -3,14 +3,6 @@ description: Manages duplicate files.
version: 0.1.0
url: https://github.com/David-Maisonave/Axter-Stash/tree/main/plugins/DupFileManager
settings:
ignoreReparsepoints:
displayName: Ignore Reparse Points
description: Enable to ignore reparse-points when deleting duplicates.
type: BOOLEAN
ignoreSymbolicLinks:
displayName: Ignore Symbolic Links
description: Enable to ignore symbolic links when deleting duplicates.
type: BOOLEAN
mergeDupFilename:
displayName: Before deletion, merge potential source in the duplicate file names for tag names, performers, and studios.
description: Enable to
@@ -19,6 +11,10 @@ settings:
displayName: Trash Can
description: Enable to move files to trash can instead of permanently delete file.
type: BOOLEAN
whitelist:
displayName: White List
description: A comma seperated list of preferential paths to determine which duplicate should be the primary. Listed in order of preference.
type: STRING
zzdebugTracing:
displayName: Debug Tracing
description: (Default=false) [***For Advanced Users***] Enable debug tracing. When enabled, additional tracing logging is added to Stash\plugins\DupFileManager\DupFileManager.log

View File

@@ -2,16 +2,41 @@
# By David Maisonave (aka Axter) Jul-2024 (https://www.axter.com/)
# Get the latest developers version from following link: https://github.com/David-Maisonave/Axter-Stash/tree/main/plugins/DupFileManager
config = {
# Define white list of preferential paths to determine which duplicate should be the primary.
"whitelist_paths": [], #Example: "whitelist_paths": ['C:\SomeMediaPath\subpath', 'E:\YetAnotherPath\subpath', 'E:\YetAnotherPath\secondSubPath']
# Define black list to determine which duplicates should be deleted first.
"blacklist_paths": [], #Example: "blacklist_paths": ['C:\SomeMediaPath\subpath', 'E:\YetAnotherPath\subpath', 'E:\YetAnotherPath\secondSubPath']
"blacklist_paths": [], #Example: "blacklist_paths": ['C:\\SomeMediaPath\\subpath', "E:\\YetAnotherPath\\subpath', "E:\\YetAnotherPath\\secondSubPath']
# If enabled, ignore reparsepoints. For Windows NT drives only.
"ignoreReparsepoints" : True,
# If enabled, ignore symbolic links.
"ignoreSymbolicLinks" : True,
# If enabled, swap higher resolution duplicate files to preferred path.
"swapHighRes" : True,
# If enabled, swap longer length media files to preferred path. Longer will be determine by significantLongerTime value.
"swapLongLength" : True,
# If enabled, swap longer file name to preferred path.
"swapLongFileName" : False,
# If enabled, when finding exact duplicate files, keep file with the shorter name. The default is to keep file name with the longer name.
"keepShorterFileName" : False,
# If enabled, when finding duplicate files, keep media with the shorter time length. The default is to keep media with longer time length.
"keepShorterLength" : False,
# If enabled, when finding duplicate files, keep media with the lower resolution. The default is to keep media with higher resolution.
"keepLowerResolution" : False,
# If enabled, keep duplicate media with high resolution over media with significant longer time.
"keepHighResOverLen" : False, # Requires keepBothHighResAndLongerLen = False
# The threshold as to what percentage is consider a significant longer time. Default is 15% longer.
"significantLongerTime" : 15, # 15% longer time
# If enabled, keep both duplicate files if the LOWER resolution file is significantly longer.
"keepBothHighResAndLongerLen" : True,
# Define ignore list to avoid specific directories. No action is taken on any file in the ignore list.
"ignore_paths": [], #Example: "ignore_paths": ['C:\SomeMediaPath\subpath', 'E:\YetAnotherPath\subpath', 'E:\YetAnotherPath\secondSubPath']
"ignore_paths": [], #Example: "ignore_paths": ['C:\\SomeMediaPath\\subpath', "E:\\YetAnotherPath\\subpath', "E:\\YetAnotherPath\\secondSubPath']
# Keep empty to check all paths, or populate it with the only paths to check for duplicates
"onlyCheck_paths": [], #Example: "onlyCheck_paths": ['C:\SomeMediaPath\subpath', 'E:\YetAnotherPath\subpath', 'E:\YetAnotherPath\secondSubPath']
"onlyCheck_paths": [], #Example: "onlyCheck_paths": ['C:\\SomeMediaPath\\subpath', "E:\\YetAnotherPath\\subpath', "E:\\YetAnotherPath\\secondSubPath']
# Alternative path to move duplicate files. Path needs to be in the same drive as the duplicate file.
"dup_path": "", #Example: "C:\TempDeleteFolder"
"dup_path": "", #Example: "C:\\TempDeleteFolder"
# The following fields are ONLY used when running DupFileManager in script mode
"endpoint_Scheme" : "http", # Define endpoint to use when contacting the Stash server

View File

@@ -0,0 +1,354 @@
from stashapi.stashapp import StashInterface
from logging.handlers import RotatingFileHandler
import inspect, sys, os, pathlib, logging, json
from stashapi.stash_types import PhashDistance
import __main__
# StashPluginHelper (By David Maisonave aka Axter)
# See end of this file for example usage
# Log Features:
# Can optionally log out to multiple outputs for each Log or Trace call.
# Logging includes source code line number
# Sets a maximum plugin log file size
# Stash Interface Features:
# Gets STASH_URL value from command line argument and/or from STDIN_READ
# Sets FRAGMENT_SERVER based on command line arguments or STDIN_READ
# Sets PLUGIN_ID based on the main script file name (in lower case)
# Gets PLUGIN_TASK_NAME value
# Sets pluginSettings (The plugin UI settings)
# Misc Features:
# Gets DRY_RUN value from command line argument and/or from UI and/or from config file
# Gets DEBUG_TRACING value from command line argument and/or from UI and/or from config file
# Sets RUNNING_IN_COMMAND_LINE_MODE to True if detects multiple arguments
# Sets CALLED_AS_STASH_PLUGIN to True if it's able to read from STDIN_READ
class StashPluginHelper(StashInterface):
# Primary Members for external reference
PLUGIN_TASK_NAME = None
PLUGIN_ID = None
PLUGIN_CONFIGURATION = None
pluginSettings = None
pluginConfig = None
STASH_INTERFACE_INIT = False
STASH_URL = None
STASH_CONFIGURATION = None
JSON_INPUT = None
DEBUG_TRACING = False
DRY_RUN = False
CALLED_AS_STASH_PLUGIN = False
RUNNING_IN_COMMAND_LINE_MODE = False
FRAGMENT_SERVER = None
STASHPATHSCONFIG = None
STASH_PATHS = []
# printTo argument
LOG_TO_FILE = 1
LOG_TO_CONSOLE = 2 # Note: Only see output when running in command line mode. In plugin mode, this output is lost.
LOG_TO_STDERR = 4 # Note: In plugin mode, output to StdErr ALWAYS gets sent to stash logging as an error.
LOG_TO_STASH = 8
LOG_TO_WARN = 16
LOG_TO_ERROR = 32
LOG_TO_CRITICAL = 64
LOG_TO_ALL = LOG_TO_FILE + LOG_TO_CONSOLE + LOG_TO_STDERR + LOG_TO_STASH
# Misc class variables
MAIN_SCRIPT_NAME = None
LOG_LEVEL = logging.INFO
LOG_FILE_DIR = None
LOG_FILE_NAME = None
STDIN_READ = None
pluginLog = None
logLinePreviousHits = []
# Prefix message value
LEV_TRACE = "TRACE: "
LEV_DBG = "DBG: "
LEV_INF = "INF: "
LEV_WRN = "WRN: "
LEV_ERR = "ERR: "
LEV_CRITICAL = "CRITICAL: "
# Default format
LOG_FORMAT = "[%(asctime)s] %(message)s"
# Externally modifiable variables
log_to_err_set = LOG_TO_FILE + LOG_TO_STDERR # This can be changed by the calling source in order to customize what targets get error messages
log_to_norm = LOG_TO_FILE + LOG_TO_CONSOLE # Can be change so-as to set target output for normal logging
# Warn message goes to both plugin log file and stash when sent to Stash log file.
log_to_wrn_set = LOG_TO_STASH # This can be changed by the calling source in order to customize what targets get warning messages
def __init__(self,
debugTracing = None, # Set debugTracing to True so as to output debug and trace logging
logFormat = LOG_FORMAT, # Plugin log line format
dateFmt = "%y%m%d %H:%M:%S", # Date format when logging to plugin log file
maxbytes = 2*1024*1024, # Max size of plugin log file
backupcount = 2, # Backup counts when log file size reaches max size
logToWrnSet = 0, # Customize the target output set which will get warning logging
logToErrSet = 0, # Customize the target output set which will get error logging
logToNormSet = 0, # Customize the target output set which will get normal logging
logFilePath = "", # Plugin log file. If empty, the log file name will be set based on current python file name and path
mainScriptName = "", # The main plugin script file name (full path)
pluginID = "",
settings = None, # Default settings for UI fields
config = None, # From pluginName_config.py or pluginName_setting.py
fragmentServer = None,
stash_url = None, # Stash URL (endpoint URL) Example: http://localhost:9999
DebugTraceFieldName = "zzdebugTracing",
DryRunFieldName = "zzdryRun",
setStashLoggerAsPluginLogger = False):
if logToWrnSet: self.log_to_wrn_set = logToWrnSet
if logToErrSet: self.log_to_err_set = logToErrSet
if logToNormSet: self.log_to_norm = logToNormSet
if stash_url and len(stash_url): self.STASH_URL = stash_url
self.MAIN_SCRIPT_NAME = mainScriptName if mainScriptName != "" else __main__.__file__
self.PLUGIN_ID = pluginID if pluginID != "" else pathlib.Path(self.MAIN_SCRIPT_NAME).stem.lower()
# print(f"self.MAIN_SCRIPT_NAME={self.MAIN_SCRIPT_NAME}, self.PLUGIN_ID={self.PLUGIN_ID}", file=sys.stderr)
self.LOG_FILE_NAME = logFilePath if logFilePath != "" else f"{pathlib.Path(self.MAIN_SCRIPT_NAME).resolve().parent}{os.sep}{pathlib.Path(self.MAIN_SCRIPT_NAME).stem}.log"
self.LOG_FILE_DIR = pathlib.Path(self.LOG_FILE_NAME).resolve().parent
RFH = RotatingFileHandler(
filename=self.LOG_FILE_NAME,
mode='a',
maxBytes=maxbytes,
backupCount=backupcount,
encoding=None,
delay=0
)
if fragmentServer:
self.FRAGMENT_SERVER = fragmentServer
else:
self.FRAGMENT_SERVER = {'Scheme': 'http', 'Host': '0.0.0.0', 'Port': '9999', 'SessionCookie': {'Name': 'session', 'Value': '', 'Path': '', 'Domain': '', 'Expires': '0001-01-01T00:00:00Z', 'RawExpires': '', 'MaxAge': 0, 'Secure': False, 'HttpOnly': False, 'SameSite': 0, 'Raw': '', 'Unparsed': None}, 'Dir': os.path.dirname(pathlib.Path(self.MAIN_SCRIPT_NAME).resolve().parent), 'PluginDir': pathlib.Path(self.MAIN_SCRIPT_NAME).resolve().parent}
if debugTracing: self.DEBUG_TRACING = debugTracing
if config:
self.pluginConfig = config
if DebugTraceFieldName in self.pluginConfig:
self.DEBUG_TRACING = self.pluginConfig[DebugTraceFieldName]
if DryRunFieldName in self.pluginConfig:
self.DRY_RUN = self.pluginConfig[DryRunFieldName]
if len(sys.argv) > 1:
RUNNING_IN_COMMAND_LINE_MODE = True
if not debugTracing or not stash_url:
for argValue in sys.argv[1:]:
if argValue.lower() == "--trace":
self.DEBUG_TRACING = True
elif argValue.lower() == "--dry_run" or argValue.lower() == "--dryrun":
self.DRY_RUN = True
elif ":" in argValue and not self.STASH_URL:
self.STASH_URL = argValue
if self.STASH_URL:
endpointUrlArr = self.STASH_URL.split(":")
if len(endpointUrlArr) == 3:
self.FRAGMENT_SERVER['Scheme'] = endpointUrlArr[0]
self.FRAGMENT_SERVER['Host'] = endpointUrlArr[1][2:]
self.FRAGMENT_SERVER['Port'] = endpointUrlArr[2]
super().__init__(self.FRAGMENT_SERVER)
self.STASH_INTERFACE_INIT = True
else:
try:
self.STDIN_READ = sys.stdin.read()
self.CALLED_AS_STASH_PLUGIN = True
except:
pass
if self.STDIN_READ:
self.JSON_INPUT = json.loads(self.STDIN_READ)
if "args" in self.JSON_INPUT and "mode" in self.JSON_INPUT["args"]:
self.PLUGIN_TASK_NAME = self.JSON_INPUT["args"]["mode"]
self.FRAGMENT_SERVER = self.JSON_INPUT["server_connection"]
self.STASH_URL = f"{self.FRAGMENT_SERVER['Scheme']}://{self.FRAGMENT_SERVER['Host']}:{self.FRAGMENT_SERVER['Port']}"
super().__init__(self.FRAGMENT_SERVER)
self.STASH_INTERFACE_INIT = True
if self.STASH_INTERFACE_INIT:
self.PLUGIN_CONFIGURATION = self.get_configuration()["plugins"]
self.STASH_CONFIGURATION = self.get_configuration()["general"]
self.STASHPATHSCONFIG = self.STASH_CONFIGURATION['stashes']
for item in self.STASHPATHSCONFIG:
self.STASH_PATHS.append(item["path"])
if settings:
self.pluginSettings = settings
if self.PLUGIN_ID in self.PLUGIN_CONFIGURATION:
self.pluginSettings.update(self.PLUGIN_CONFIGURATION[self.PLUGIN_ID])
if DebugTraceFieldName in self.pluginSettings:
self.DEBUG_TRACING = self.pluginSettings[DebugTraceFieldName]
if DryRunFieldName in self.pluginSettings:
self.DRY_RUN = self.pluginSettings[DryRunFieldName]
if self.DEBUG_TRACING: self.LOG_LEVEL = logging.DEBUG
logging.basicConfig(level=self.LOG_LEVEL, format=logFormat, datefmt=dateFmt, handlers=[RFH])
self.pluginLog = logging.getLogger(pathlib.Path(self.MAIN_SCRIPT_NAME).stem)
if setStashLoggerAsPluginLogger:
self.log = self.pluginLog
def Log(self, logMsg, printTo = 0, logLevel = logging.INFO, lineNo = -1, levelStr = "", logAlways = False):
if printTo == 0:
printTo = self.log_to_norm
elif printTo == self.LOG_TO_ERROR and logLevel == logging.INFO:
logLevel = logging.ERROR
printTo = self.log_to_err_set
elif printTo == self.LOG_TO_CRITICAL and logLevel == logging.INFO:
logLevel = logging.CRITICAL
printTo = self.log_to_err_set
elif printTo == self.LOG_TO_WARN and logLevel == logging.INFO:
logLevel = logging.WARN
printTo = self.log_to_wrn_set
if lineNo == -1:
lineNo = inspect.currentframe().f_back.f_lineno
LN_Str = f"[LN:{lineNo}]"
# print(f"{LN_Str}, {logAlways}, {self.LOG_LEVEL}, {logging.DEBUG}, {levelStr}, {logMsg}")
if logLevel == logging.DEBUG and (logAlways == False or self.LOG_LEVEL == logging.DEBUG):
if levelStr == "": levelStr = self.LEV_DBG
if printTo & self.LOG_TO_FILE: self.pluginLog.debug(f"{LN_Str} {levelStr}{logMsg}")
if printTo & self.LOG_TO_STASH: self.log.debug(f"{LN_Str} {levelStr}{logMsg}")
elif logLevel == logging.INFO or logLevel == logging.DEBUG:
if levelStr == "": levelStr = self.LEV_INF if logLevel == logging.INFO else self.LEV_DBG
if printTo & self.LOG_TO_FILE: self.pluginLog.info(f"{LN_Str} {levelStr}{logMsg}")
if printTo & self.LOG_TO_STASH: self.log.info(f"{LN_Str} {levelStr}{logMsg}")
elif logLevel == logging.WARN:
if levelStr == "": levelStr = self.LEV_WRN
if printTo & self.LOG_TO_FILE: self.pluginLog.warning(f"{LN_Str} {levelStr}{logMsg}")
if printTo & self.LOG_TO_STASH: self.log.warning(f"{LN_Str} {levelStr}{logMsg}")
elif logLevel == logging.ERROR:
if levelStr == "": levelStr = self.LEV_ERR
if printTo & self.LOG_TO_FILE: self.pluginLog.error(f"{LN_Str} {levelStr}{logMsg}")
if printTo & self.LOG_TO_STASH: self.log.error(f"{LN_Str} {levelStr}{logMsg}")
elif logLevel == logging.CRITICAL:
if levelStr == "": levelStr = self.LEV_CRITICAL
if printTo & self.LOG_TO_FILE: self.pluginLog.critical(f"{LN_Str} {levelStr}{logMsg}")
if printTo & self.LOG_TO_STASH: self.log.error(f"{LN_Str} {levelStr}{logMsg}")
if (printTo & self.LOG_TO_CONSOLE) and (logLevel != logging.DEBUG or self.DEBUG_TRACING or logAlways):
print(f"{LN_Str} {levelStr}{logMsg}")
if (printTo & self.LOG_TO_STDERR) and (logLevel != logging.DEBUG or self.DEBUG_TRACING or logAlways):
print(f"StdErr: {LN_Str} {levelStr}{logMsg}", file=sys.stderr)
def Trace(self, logMsg = "", printTo = 0, logAlways = False, lineNo = -1):
if printTo == 0: printTo = self.LOG_TO_FILE
if lineNo == -1:
lineNo = inspect.currentframe().f_back.f_lineno
logLev = logging.INFO if logAlways else logging.DEBUG
if self.DEBUG_TRACING or logAlways:
if logMsg == "":
logMsg = f"Line number {lineNo}..."
self.Log(logMsg, printTo, logLev, lineNo, self.LEV_TRACE, logAlways)
# Log once per session. Only logs the first time called from a particular line number in the code.
def TraceOnce(self, logMsg = "", printTo = 0, logAlways = False):
lineNo = inspect.currentframe().f_back.f_lineno
if self.DEBUG_TRACING or logAlways:
FuncAndLineNo = f"{inspect.currentframe().f_back.f_code.co_name}:{lineNo}"
if FuncAndLineNo in self.logLinePreviousHits:
return
self.logLinePreviousHits.append(FuncAndLineNo)
self.Trace(logMsg, printTo, logAlways, lineNo)
# Log INFO on first call, then do Trace on remaining calls.
def LogOnce(self, logMsg = "", printTo = 0, logAlways = False, traceOnRemainingCalls = True):
if printTo == 0: printTo = self.LOG_TO_FILE
lineNo = inspect.currentframe().f_back.f_lineno
FuncAndLineNo = f"{inspect.currentframe().f_back.f_code.co_name}:{lineNo}"
if FuncAndLineNo in self.logLinePreviousHits:
if traceOnRemainingCalls:
self.Trace(logMsg, printTo, logAlways, lineNo)
else:
self.logLinePreviousHits.append(FuncAndLineNo)
self.Log(logMsg, printTo, logging.INFO, lineNo)
def Warn(self, logMsg, printTo = 0):
if printTo == 0: printTo = self.log_to_wrn_set
lineNo = inspect.currentframe().f_back.f_lineno
self.Log(logMsg, printTo, logging.WARN, lineNo)
def Error(self, logMsg, printTo = 0):
if printTo == 0: printTo = self.log_to_err_set
lineNo = inspect.currentframe().f_back.f_lineno
self.Log(logMsg, printTo, logging.ERROR, lineNo)
def Status(self, printTo = 0, logLevel = logging.INFO, lineNo = -1):
if printTo == 0: printTo = self.log_to_norm
if lineNo == -1:
lineNo = inspect.currentframe().f_back.f_lineno
self.Log(f"StashPluginHelper Status: (CALLED_AS_STASH_PLUGIN={self.CALLED_AS_STASH_PLUGIN}), (RUNNING_IN_COMMAND_LINE_MODE={self.RUNNING_IN_COMMAND_LINE_MODE}), (DEBUG_TRACING={self.DEBUG_TRACING}), (DRY_RUN={self.DRY_RUN}), (PLUGIN_ID={self.PLUGIN_ID}), (PLUGIN_TASK_NAME={self.PLUGIN_TASK_NAME}), (STASH_URL={self.STASH_URL}), (MAIN_SCRIPT_NAME={self.MAIN_SCRIPT_NAME})",
printTo, logLevel, lineNo)
def ExecuteProcess(self, args):
import platform, subprocess
is_windows = any(platform.win32_ver())
pid = None
self.Trace(f"is_windows={is_windows} args={args}")
if is_windows:
self.Trace("Executing process using Windows DETACHED_PROCESS")
DETACHED_PROCESS = 0x00000008
pid = subprocess.Popen(args,creationflags=DETACHED_PROCESS, shell=True).pid
else:
self.Trace("Executing process using normal Popen")
pid = subprocess.Popen(args).pid
self.Trace(f"pid={pid}")
return pid
def ExecutePythonScript(self, args):
PythonExe = f"{sys.executable}"
argsWithPython = [f"{PythonExe}"] + args
return self.ExecuteProcess(argsWithPython)
# Extends class StashInterface with functions which are not yet in the class
def metadata_autotag(self, paths:list=[], performers:list=[], studios:list=[], tags:list=[]):
query = """
mutation MetadataAutoTag($input:AutoTagMetadataInput!) {
metadataAutoTag(input: $input)
}
"""
metadata_autotag_input = {
"paths":paths,
"performers": performers,
"studios":studios,
"tags":tags,
}
result = self.call_GQL(query, {"input": metadata_autotag_input})
return result
def backup_database(self):
return self.call_GQL("mutation { backupDatabase(input: {download: false})}")
def optimise_database(self):
return self.call_GQL("mutation OptimiseDatabase { optimiseDatabase }")
def metadata_clean_generated(self, blobFiles=True, dryRun=False, imageThumbnails=True, markers=True, screenshots=True, sprites=True, transcodes=True):
query = """
mutation MetadataCleanGenerated($input: CleanGeneratedInput!) {
metadataCleanGenerated(input: $input)
}
"""
clean_metadata_input = {
"blobFiles": blobFiles,
"dryRun": dryRun,
"imageThumbnails": imageThumbnails,
"markers": markers,
"screenshots": screenshots,
"sprites": sprites,
"transcodes": transcodes,
}
result = self.call_GQL(query, {"input": clean_metadata_input})
return result
def rename_generated_files(self):
return self.call_GQL("mutation MigrateHashNaming {migrateHashNaming}")
# def find_duplicate_scenes(self, distance: PhashDistance=PhashDistance.EXACT, fragment=None):
# query = """
# query FindDuplicateScenes($distance: Int) {
# findDuplicateScenes(distance: $distance) {
# ...SceneSlim
# }
# }
# """
# if fragment:
# query = re.sub(r'\.\.\.SceneSlim', fragment, query)
# else:
# query = """
# query FindDuplicateScenes($distance: Int) {
# findDuplicateScenes(distance: $distance)
# }
# """
# variables = {
# "distance": distance
# }
# result = self.call_GQL(query, variables)
# return result['findDuplicateScenes']