diff --git a/.github/ISSUE_TEMPLATE/bug-or-crash-report.md b/.github/ISSUE_TEMPLATE/bug-or-crash-report.md index 66ac50ca..6e1e643d 100644 --- a/.github/ISSUE_TEMPLATE/bug-or-crash-report.md +++ b/.github/ISSUE_TEMPLATE/bug-or-crash-report.md @@ -7,10 +7,11 @@ assignees: '' --- -**Describe the bug** +### Describe the bug -**To Reproduce** +### To Reproduce + Steps to reproduce the behavior: 1. Go to '...' @@ -18,27 +19,28 @@ Steps to reproduce the behavior: 3. Scroll down to '....' 4. See error -**Expected behavior** +### Expected behavior -**Screenshots and recordings** +### Screenshots and recordings -**Traceback or Crash Report** +### Traceback or Crash Report + If AutoSplit showed an exception traceback, please paste it here: -``` +```py ``` -**Version (please complete the following information):** +### Version (please complete the following information) - OS: [e.g. Windows 10.0.19045] - AutoSplit: [e.g. v2.0.0] -**AutoSplit Profile and Split Images** +### AutoSplit Profile and Split Images -**Additional context** +### Additional context diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md index 546da8b0..7826d5a1 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -7,15 +7,15 @@ assignees: '' --- -**Is your feature request related to a problem? Please describe.** +### Is your feature request related to a problem? Please describe -**Describe the solution you'd like** +### Describe the solution you'd like -**Describe alternatives you've considered** +### Describe alternatives you've considered -**Additional context** +### Additional context diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 85ffffdf..3cff918a 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -13,10 +13,10 @@ name: "CodeQL" on: push: - branches: [main, master, develop, dev] + branches: [main, dev*] pull_request: # The branches below must be a subset of the branches above - branches: [develop, dev] + branches: [dev*] schedule: - cron: "26 13 * * 6" @@ -35,11 +35,11 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v2 + uses: actions/checkout@v4 # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@v2 + uses: github/codeql-action/init@v3 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -50,7 +50,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@v2 + uses: github/codeql-action/autobuild@v3 # ℹ️ Command-line programs to run using the OS shell. # 📚 https://git.io/JvXDl @@ -64,4 +64,4 @@ jobs: # make release - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v2 + uses: github/codeql-action/analyze@v3 diff --git a/.github/workflows/lint-and-build.yml b/.github/workflows/lint-and-build.yml index a5dcd70e..91d6cbd9 100644 --- a/.github/workflows/lint-and-build.yml +++ b/.github/workflows/lint-and-build.yml @@ -11,7 +11,6 @@ on: push: branches: - main - - master - dev* paths: - "**.py" @@ -21,7 +20,6 @@ on: pull_request: branches: - main - - master - dev* paths: - "**.py" @@ -40,59 +38,69 @@ concurrency: jobs: ruff: - runs-on: windows-latest - strategy: - fail-fast: false - # Ruff is version and platform sensible - matrix: - python-version: ["3.10", "3.11", "3.12"] + runs-on: ubuntu-22.04 steps: - name: Checkout ${{ github.repository }}/${{ github.ref }} - uses: actions/checkout@v3 - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 - with: - python-version: ${{ matrix.python-version }} - cache: "pip" - cache-dependency-path: "scripts/requirements*.txt" - - run: scripts/install.ps1 + uses: actions/checkout@v4 + - name: Get Ruff version + id: ruff_version + run: | + $Env:RUFF_VERSION=Select-String -path scripts/requirements-dev.txt -pattern 'ruff ?([=<>~]?= ?[\d\.]+)' | %{ $_.Matches[0].Groups[1].Value } + echo $Env:RUFF_VERSION + echo "RUFF_VERSION=$Env:RUFF_VERSION" >> $Env:GITHUB_OUTPUT shell: pwsh - - run: ruff check . + - uses: astral-sh/ruff-action@v3 + with: + version: ${{ steps.ruff_version.outputs.RUFF_VERSION }} + - run: ruff format --check Pyright: - runs-on: windows-latest + runs-on: ${{ matrix.os }} strategy: fail-fast: false # Pyright is version and platform sensible matrix: - python-version: ["3.10", "3.11", "3.12"] + os: [windows-latest, ubuntu-22.04] + python-version: ["3.11", "3.12", "3.13"] steps: - name: Checkout ${{ github.repository }}/${{ github.ref }} - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} cache: "pip" cache-dependency-path: "scripts/requirements*.txt" - run: scripts/install.ps1 shell: pwsh + - name: Get pyright version + id: pyright_version + run: | + PYRIGHT_VERSION=$(grep '$pyrightVersion = ' 'scripts/lint.ps1' | cut -d "#" -f 1 | cut -d = -f 2 | tr -d " '") + echo pyright version: "${PYRIGHT_VERSION}" + echo PYRIGHT_VERSION="${PYRIGHT_VERSION}" >> "${GITHUB_OUTPUT}" + shell: bash - name: Analysing the code with Pyright - uses: jakebailey/pyright-action@v1 + uses: jakebailey/pyright-action@v2 with: + version: ${{ steps.pyright_version.outputs.PYRIGHT_VERSION }} working-directory: src/ python-version: ${{ matrix.python-version }} Build: - runs-on: windows-latest + runs-on: ${{ matrix.os }} strategy: fail-fast: false # Only the Python version we plan on shipping matters. matrix: - python-version: ["3.11", "3.12"] + os: [windows-latest, ubuntu-22.04] + python-version: ["3.12", "3.13"] + include: + - os: ubuntu-22.04 + python-version: "3.11" # I had some Qt Wayland issues on 3.12 for ubuntu-22.04 iirc. TODO: test it steps: - name: Checkout ${{ github.repository }}/${{ github.ref }} - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} cache: "pip" @@ -102,15 +110,15 @@ jobs: - run: scripts/build.ps1 shell: pwsh - name: Upload Build Artifact - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: - name: AutoSplit (Python ${{ matrix.python-version }}) + name: AutoSplit for ${{ matrix.os }} (Python ${{ matrix.python-version }}) path: dist/AutoSplit* if-no-files-found: error - name: Upload Build logs - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: - name: Build logs (Python ${{ matrix.python-version }}) + name: Build logs for ${{ matrix.os }} (Python ${{ matrix.python-version }}) path: | build/AutoSplit/*.toc build/AutoSplit/*.txt diff --git a/.github/workflows/printenv.yml b/.github/workflows/printenv.yml index ed1adcbf..1b032639 100644 --- a/.github/workflows/printenv.yml +++ b/.github/workflows/printenv.yml @@ -10,7 +10,7 @@ on: type: boolean push: branches: - - master + - main - dev env: @@ -22,5 +22,5 @@ jobs: runs-on: windows-latest steps: - name: Checkout ${{ github.repository }}/${{ github.ref }} - uses: actions/checkout@v3 + uses: actions/checkout@v4 - run: printenv diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index ffd3a619..db39db1e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,6 +1,6 @@ repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.5.0 + rev: v5.0.0 hooks: - id: pretty-format-json exclude: ".vscode/.*" # Exclude jsonc @@ -9,31 +9,22 @@ repos: args: [--markdown-linebreak-ext=md] - id: end-of-file-fixer - id: mixed-line-ending - args: [--fix=crlf] + args: [--fix=lf] - id: check-case-conflict - repo: https://github.com/macisamuele/language-formatters-pre-commit-hooks - rev: v2.11.0 + rev: v2.14.0 hooks: + - id: pretty-format-yaml + args: [--autofix, --indent, "2", --offset, "2", --preserve-quotes, --line-width, "100"] - id: pretty-format-ini args: [--autofix] - repo: https://github.com/astral-sh/ruff-pre-commit - rev: "v0.1.7" # Must match requirements-dev.txt + rev: v0.8.5 # Must match requirements-dev.txt hooks: - id: ruff args: [--fix] - - repo: https://github.com/hhatto/autopep8 - rev: "v2.0.4" # Must match requirements-dev.txt - hooks: - - id: autopep8 - - repo: https://github.com/asottile/add-trailing-comma - rev: v3.1.0 # Must match requirements-dev.txt - hooks: - - id: add-trailing-comma + - id: ruff-format ci: autoupdate_branch: dev - autoupdate_schedule: monthly - skip: - # Ignore until Linux support. We don't want lf everywhere yet - # And crlf fails on CI because pre-commit runs on linux - - "mixed-line-ending" + autoupdate_schedule: quarterly diff --git a/.sonarcloud.properties b/.sonarcloud.properties deleted file mode 100644 index 56192197..00000000 --- a/.sonarcloud.properties +++ /dev/null @@ -1 +0,0 @@ -sonar.python.version=3.10, 3.11, 3.12 diff --git a/.vscode/extensions.json b/.vscode/extensions.json index 790ee33f..ecd26dfc 100644 --- a/.vscode/extensions.json +++ b/.vscode/extensions.json @@ -1,11 +1,10 @@ // Keep in alphabetical order { "recommendations": [ + "charliermarsh.ruff", "davidanson.vscode-markdownlint", "eamodio.gitlens", - "emeraldwalk.runonsave", "github.vscode-github-actions", - "ms-python.autopep8", "ms-python.python", "ms-python.vscode-pylance", "ms-vscode.powershell", @@ -32,11 +31,11 @@ // Don't recommend to autoinstall // // // Use Ruff instead + "ms-python.autopep8", + "ms-python.black-formatter", "ms-python.flake8", "ms-python.isort", "ms-python.pylint", - // We use autopep8 - "ms-python.black-formatter", // This is a Git project "johnstoncode.svn-scm", // Prefer using VSCode itself as a text editor diff --git a/.vscode/launch.json b/.vscode/launch.json index 957b8d50..270f80e1 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -6,7 +6,7 @@ "configurations": [ { "name": "Python: AutoSplit (debug non-user code)", - "type": "python", + "type": "debugpy", "request": "launch", "preLaunchTask": "Compile resources", "program": "src/AutoSplit.py", @@ -15,7 +15,7 @@ }, { "name": "Python: AutoSplit", - "type": "python", + "type": "debugpy", "request": "launch", "preLaunchTask": "Compile resources", "program": "src/AutoSplit.py", @@ -24,7 +24,7 @@ }, { "name": "Python: AutoSplit --auto-controlled", - "type": "python", + "type": "debugpy", "request": "launch", "preLaunchTask": "Compile resources", "program": "src/AutoSplit.py", diff --git a/.vscode/settings.json b/.vscode/settings.json index d735a0ed..d3e7500a 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -1,7 +1,7 @@ { "editor.rulers": [ 80, - 120 + 100 ], "[git-commit]": { "editor.rulers": [ @@ -25,14 +25,6 @@ // Let dedicated linter (Ruff) organize imports "source.organizeImports": "never" }, - "emeraldwalk.runonsave": { - "commands": [ - { - "match": "\\.pyi?", - "cmd": "add-trailing-comma ${file}" - }, - ] - }, "files.associations": { ".flake8": "properties", "*.qrc": "xml", @@ -62,16 +54,19 @@ "[json][jsonc]": { "editor.defaultFormatter": "vscode.json-language-features", }, + "[yaml]": { + "editor.defaultFormatter": "redhat.vscode-yaml" + }, + "yaml.format.printWidth": 100, "[python]": { - // Ruff as a formatter doesn't fully satisfy our needs yet: https://github.com/astral-sh/ruff/discussions/7310 - "editor.defaultFormatter": "ms-python.autopep8", + "editor.defaultFormatter": "charliermarsh.ruff", "editor.tabSize": 4, "editor.rulers": [ 72, // PEP8-17 docstrings // 79, // PEP8-17 default max // 88, // Black default // 99, // PEP8-17 acceptable max - 120, // Our hard rule + 100, // Our hard rule ], }, "mypy-type-checker.importStrategy": "fromEnvironment", @@ -87,6 +82,9 @@ ], "python.analysis.diagnosticMode": "workspace", "ruff.importStrategy": "fromEnvironment", + "ruff.enable": true, + "ruff.fixAll": true, + "ruff.organizeImports": true, // Use the Ruff extension instead "isort.check": false, "powershell.codeFormatting.pipelineIndentationStyle": "IncreaseIndentationForFirstPipeline", @@ -97,6 +95,8 @@ "powershell.codeFormatting.whitespaceBetweenParameters": true, "powershell.integratedConsole.showOnStartup": false, "terminal.integrated.defaultProfile.windows": "PowerShell", + "terminal.integrated.defaultProfile.linux": "pwsh", + "terminal.integrated.defaultProfile.osx": "pwsh", "xml.codeLens.enabled": true, "xml.format.spaceBeforeEmptyCloseTag": false, "xml.format.preserveSpace": [ diff --git a/.vscode/tasks.json b/.vscode/tasks.json index 65ac2dc5..a38720e1 100644 --- a/.vscode/tasks.json +++ b/.vscode/tasks.json @@ -13,6 +13,28 @@ } } }, + "linux": { + "options": { + "shell": { + "executable": "pwsh", + "args": [ + "-NoProfile", + "-Command" + ] + } + } + }, + "osx": { + "options": { + "shell": { + "executable": "pwsh", + "args": [ + "-NoProfile", + "-Command" + ] + } + } + }, "tasks": [ { "label": "Compile resources", diff --git a/PyInstaller/hooks/hook-requests.py b/PyInstaller/hooks/hook-requests.py deleted file mode 100644 index 13de4b6b..00000000 --- a/PyInstaller/hooks/hook-requests.py +++ /dev/null @@ -1,4 +0,0 @@ -from PyInstaller.utils.hooks import collect_data_files - -# Get the cacert.pem -datas = collect_data_files("certifi") diff --git a/README.md b/README.md index f11092e0..d5e85359 100644 --- a/README.md +++ b/README.md @@ -2,205 +2,60 @@ # LiveSplit AutoSplit [![CodeQL](/../../actions/workflows/codeql-analysis.yml/badge.svg)](/../../actions/workflows/codeql-analysis.yml) [![Lint and build](/../../actions/workflows/lint-and-build.yml/badge.svg)](/../../actions/workflows/lint-and-build.yml) [![SemVer](https://badgen.net/badge/_/SemVer%20compliant/grey?label)](https://semver.org/) -[![Ruff](https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/astral-sh/ruff/main/assets/badge/v2.json)](https://github.com/astral-sh/ruff) -[![autopep8](https://badgen.net/badge/code%20style/autopep8/blue)](https://github.com/hhatto/autopep8) +[![Ruff](https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/astral-sh/ruff/main/assets/badge/v2.json)](https://docs.astral.sh/ruff/linter/) +[![Ruff format](https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/astral-sh/ruff/main/assets/badge/format.json)](https://docs.astral.sh/ruff/formatter/) [![Checked with pyright](https://microsoft.github.io/pyright/img/pyright_badge.svg)](https://microsoft.github.io/pyright/) [![Checked with mypy](https://www.mypy-lang.org/static/mypy_badge.svg)](https://mypy-lang.org/) Easy to use image comparison based auto splitter for speedrunning on console or PC. -This program can be used to automatically start, split, and reset your preferred speedrun timer by comparing images to a capture region. This allows you to focus more on your speedrun and less on managing your timer. It also improves the accuracy of your splits. It can be used in tandem with any speedrun timer that accepts hotkeys (LiveSplit, wsplit, etc.), and can be integrated with LiveSplit. +This program can be used to automatically start, split, and reset your preferred speedrun timer by comparing images to a capture region. This allows you to focus more on your speedrun and less on managing your timer. It also improves the accuracy of your splits. It can be used in tandem with any speedrun timer that accepts hotkeys (LiveSplit, WSplit, etc.), and can be integrated with LiveSplit. -![Example](/docs/2.0.0_gif.gif) +

+ ![Example](/docs/2.2.2.gif) +

-# TUTORIAL +## Tutorial -## DOWNLOAD AND OPEN +To understand how to use AutoSplit and how it works in-depth, please read the [tutorial](/docs/tutorial.md). + +## Download and open - Download the [latest version](/../../releases/latest) - You can also check out the [latest dev builds](/../../actions/workflows/lint-and-build.yml?query=event%3Apush+is%3Asuccess) (requires a GitHub account) (If you don't have a GitHub account, you can try [nightly.link](https://nightly.link/Toufool/AutoSplit/workflows/lint-and-build/dev)) +- Linux users must ensure they are in the `tty` and `input` groups and have write access to `/dev/uinput`. You can run the following commands to do so: + + + + ```shell + sudo usermod -a -G tty,input $USER + sudo touch /dev/uinput + sudo chmod +0666 /dev/uinput + echo 'KERNEL=="uinput", TAG+="uaccess""' | sudo tee /etc/udev/rules.d/50-uinput.rules + echo 'SUBSYSTEM=="input", MODE="0666" GROUP="plugdev"' | sudo tee /etc/udev/rules.d/12-input.rules + echo 'SUBSYSTEM=="misc", MODE="0666" GROUP="plugdev"' | sudo tee -a /etc/udev/rules.d/12-input.rules + echo 'SUBSYSTEM=="tty", MODE="0666" GROUP="plugdev"' | sudo tee -a /etc/udev/rules.d/12-input.rules + loginctl terminate-user $USER + ``` + + + All screen capture method are incompatible with Wayland. Follow [this guide](https://linuxconfig.org/how-to-enable-disable-wayland-on-ubuntu-22-04-desktop) to disable it. ### Compatibility - Windows 10 and 11. -- Python 3.10+ (Not required for normal use. Refer to the [build instructions](/docs/build%20instructions.md) if you'd like run the application directly in Python). - -## OPTIONS - -#### Split Image Folder - -- Supported image file types: PNG, JPEG, bitmaps, WebP, and [more](https://docs.opencv.org/4.8.0/d4/da8/group__imgcodecs.html#imread). -- Images can be any size and ratio. -- Images are matched in alphanumerical order. -- Recommended filenaming convention: `001_SplitName.png, 002_SplitName.png, 003_SplitName.png`... -- Custom split image settings are handled in the filename. See how [here](#custom-split-image-settings). -- To create split images, it is recommended to use AutoSplit's Take Screenshot button for accuracy. However, images can be created using any method including Print Screen and [Snipping Tool](https://support.microsoft.com/en-us/help/4027213/windows-10-open-snipping-tool-and-take-a-screenshot). - -#### Capture Region - -- This is the region that your split images are compared to. Usually, this is going to be the full game screen. -- Click "Select Region". -- Click and drag to form a rectangle over the region you want to capture. -- Adjust the x, y, width, and height of the capture region manually to make adjustments as needed. -- If you want to align your capture region by using a reference image, click "Align Region". -- You can freely move the window that the program is capturing, but resizing the window will cause the capture region to change. -- Once you are happy with your capture region, you may unselect Live Capture Region to decrease CPU usage if you wish. -- You can save a screenshot of the capture region to your split image folder using the Take Screenshot button. - -#### Avg. FPS - -- Calculates the average comparison rate of the capture region to split images. This value will likely be much higher than needed, so it is highly recommended to limit your FPS depending on the frame rate of the game you are capturing. - -### Settings - -#### Comparison Method - -- There are three comparison methods to choose from: L2 Norm, Histograms, and Perceptual Hash (or pHash). - - L2 Norm: This method should be fine to use for most cases. It finds the difference between each pixel, squares it, sums it over the entire image and takes the square root. This is very fast but is a problem if your image is high frequency. Any translational movement or rotation can cause similarity to be very different. - - Histograms: An explanation on Histograms comparison can be found [here](https://mpatacchiola.github.io/blog/2016/11/12/the-simplest-classifier-histogram-intersection.html). This is a great method to use if you are using several masked images. - > This algorithm is particular reliable when the colour is a strong predictor of the object identity. The histogram intersection [...] is robust to occluding objects in the foreground. - - Perceptual Hash: An explanation on pHash comparison can be found [here](http://www.hackerfactor.com/blog/index.php?/archives/432-Looks-Like-It.html). It is highly recommended to NOT use pHash if you use masked images, or it'll be very inaccurate. - -#### Capture Method - - -- **Windows Graphics Capture** (fast, most compatible, capped at 60fps) - Only available in Windows 10.0.17134 and up. - Due to current technical limitations, Windows versions below 10.0.0.17763 require having at least one audio or video Capture Device connected and enabled. - Allows recording UWP apps, Hardware Accelerated and Exclusive Fullscreen windows. - Adds a yellow border on Windows 10 (not on Windows 11). - Caps at around 60 FPS. -- **BitBlt** (fastest, least compatible) - The best option when compatible. But it cannot properly record OpenGL, Hardware Accelerated or Exclusive Fullscreen windows. - The smaller the selected region, the more efficient it is. -- **Direct3D Desktop Duplication** (slower, bound to display) - Duplicates the desktop using Direct3D. - It can record OpenGL and Hardware Accelerated windows. - About 10-15x slower than BitBlt. Not affected by window size. - Overlapping windows will show up and can't record across displays. - This option may not be available for hybrid GPU laptops, see [D3DDD-Note-Laptops.md](/docs/D3DDD-Note-Laptops.md) for a solution. -- **Force Full Content Rendering** (very slow, can affect rendering) - Uses BitBlt behind the scene, but passes a special flag to PrintWindow to force rendering the entire desktop. - About 10-15x slower than BitBlt based on original window size and can mess up some applications' rendering pipelines. -- **Video Capture Device** - Uses a Video Capture Device, like a webcam, virtual cam, or capture card. - -#### Capture Device - -Select the Video Capture Device that you wanna use if selecting the `Video Capture Device` Capture Method. - - -#### Show Live Similarity - -- Displays the live similarity between the capture region and the current split image. This number is between 0 and 1, with 1 being a perfect match. - -#### Show Highest Similarity - -- Shows the highest similarity between the capture region and current split image. - -#### Current Similarity Threshold - -- When the live similarity goes above this value, the program hits your split hotkey and moves to the next split image. - -#### Default Similarity Threshold - -- This value will be set as the threshold for an image if there is no custom threshold set for that image. - -#### Default Delay Time - -- Time in milliseconds that the program waits before hitting the split hotkey for that specific split if there is no custom Delay Time set for that image. - -#### Default Pause Time - -- Time in seconds that the program stops comparison after a split if there is no custom Pause Time set for that image. Useful for if you have two of the same split images in a row and want to avoid double-splitting. Also useful for reducing CPU usage. - -#### Dummy splits when undoing / skipping - -AutoSplit will group dummy splits together with a real split when undoing/skipping. This basically allows you to tie one or more dummy splits to a real split to keep it as in sync as possible with the real splits in LiveSplit/wsplit. If they are out of sync, you can always use "Previous Image" and "Next Image". - -Examples: -Given these splits: 1 dummy, 2 normal, 3 dummy, 4 dummy, 5 normal, 6 normal. - -In this situation you would have only 3 splits in LiveSplit/wsplit (even though there are 6 split images, only 3 are "real" splits). This basically results in 3 groups of splits: 1st split is images 1 and 2. 2nd split is images 3, 4 and 5. 3rd split is image 6. - -- If you are in the 1st or 2nd image and press the skip key, it will end up on the 3rd image -- If you are in the 3rd, 4th or 5th image and press the undo key, it will end up on the 2nd image -- If you are in the 3rd, 4th or 5th image and press the skip key, it will end up on the 6th image -- If you are in the 6th image and press the undo key, it will end up on the 5th image - -#### Loop last Split Image to first Split Image - -If this option is enabled, when the last split meets the threshold and splits, AutoSplit will loop back to the first split image and continue comparisons. -If this option is disabled, when the last split meets the threshold and splits, AutoSplit will stop running comparisons. -This option does not loop single, specific images. See the Custom Split Image Settings section above for this feature. - -#### Start also Resets - -If this option is enabled, a "Start" command (ie: from the Start Image) will also send the "Reset" command. This is useful if you want to automatically restart your timer using the Start Image. Since AutoSplit won't be running and won't be checking for the Reset Image. - -Having the reset image check be active at all time would be a better, more organic solution in the future. But that is dependent on migrating to an observer pattern () and being able to reload all images. - -#### Enable auto Reset Image - -This option is mainly meant to be toggled with the `Toggle auto Reset Image` hotkey. You can enable it to temporarily disable the Reset Image if you make a mistake in your run that would cause the Reset Image to trigger. Like exiting back to the game's menu (aka Save&Quit). - -### Custom Split Image Settings - -- Each split image can have different thresholds, pause times, delay split times, loop amounts, and can be flagged. -- These settings are handled in the image's filename. -- **Custom thresholds** are place between parenthesis `()` in the filename. This value will override the default threshold. -- **Custom pause times** are placed between square brackets `[]` in the filename. This value will override the default pause time. -- **Custom delay times** are placed between hash signs `##` in the filename. Note that these are in milliseconds. For example, a 10 second split delay would be `#10000#`. You cannot skip or undo splits during split delays. -- A different **comparison method** can be specified with their 0-base index between carets `^^`: - - `^0^`: L2 Norm - - `^1^`: Histogram - - `^2^`: Perceptual Hash -- **Image loop** amounts are placed between at symbols `@@` in the filename. For example, a specific image that you want to split 5 times in a row would be `@5@`. The current loop # is conveniently located beneath the current split image. -- **Flags** are placed between curly brackets `{}` in the filename. Multiple flags are placed in the same set of curly brackets. Current available flags: - - `{d}` **dummy split image**. When matched, it moves to the next image without hitting your split hotkey. - - `{b}` split when **similarity goes below** the threshold rather than above. When a split image filename has this flag, the split image similarity will go above the threshold, do nothing, and then split the next time the similarity goes below the threshold. - - `{p}` **pause flag**. When a split image filename has this flag, it will hit your pause hotkey rather than your split hokey. -- Filename examples: - - `001_SplitName_(0.9)_[10].png` is a split image with a threshold of 0.9 and a pause time of 10 seconds. - - `002_SplitName_(0.9)_[10]_{d}.png` is the second split image with a threshold of 0.9, pause time of 10, and is a dummy split. - - `003_SplitName_(0.85)_[20]_#3500#.png` is the third split image with a threshold of 0.85, pause time of 20 and has a delay split time of 3.5 seconds. - - `004_SplitName_(0.9)_[10]_#3500#_@3@_{b}.png` is the fourth split image with a threshold of 0.9, pause time of 10 seconds, delay split time of 3.5 seconds, will loop 3 times, and will split when similarity is below the threshold rather than above. - -## Special images - -### How to Create a Masked Image - -Masked images are very useful if only a certain part of the capture region is consistent (for example, consistent text on the screen, but the background is always different). Histogram or L2 norm comparison is recommended if you use any masked images. It is highly recommended that you do NOT use pHash comparison if you use any masked images, or it'll be very inaccurate. - -The best way to create a masked image is to set your capture region as the entire game screen, take a screenshot, and use a program like [paint.net](https://www.getpaint.net/) to "erase" (make transparent) everything you don't want the program to compare. More on creating images with transparency using paint.net can be found in [this tutorial](https://www.youtube.com/watch?v=v53kkUYFVn8). For visualization, here is what the capture region compared to a masked split image looks like if you would want to split on "Shine Get!" text in Super Mario Sunshine: - -![Mask Example](/docs/mask_example_image.png) - -### Reset Image - -You can have one (and only one) image with the keyword `reset` in its name. AutoSplit will press the reset button when it finds this image. This image will only be used for resets and it will not be tied to any split. You can set a threshold and pause time for it. The pause time is the amount of seconds AutoSplit will wait before checking for the Reset Image once the run starts. For example: `Reset_(0.95)_[10].png`. - -### Start Image - -The Start Image is similar to the Reset Image. You can only have one Start Image with the keyword `start_auto_splitter`.You can reload the image using the "`Reload Start Image`" button. The pause time is the amount of seconds AutoSplit will wait before starting comparisons of the first split image. Delay times will be used to delay starting your timer after the threshold is met. - -### Profiles - - -- Profiles use the extension `.toml`. Profiles can be saved and loaded by using `File -> Save Profile As...` and `File -> Load Profile`. -- The profile contains all of your settings, including information about the capture region. -- You can save multiple profiles, which is useful if you speedrun multiple games. -- If you change your display setup (like using a new monitor, or upgrading to Windows 11), you may need to readjust or reselect your Capture Region. +- Linux (still in early development) + - Should work on Ubuntu 20.04+ (Only tested on Ubuntu 22.04) + - Wayland is not currently supported + - WSL2/WSLg requires an additional Desktop Environment, external X11 server, and/or systemd +- Python 3.11+ (Not required for normal use. Refer to the [build instructions](/docs/build%20instructions.md) if you'd like run the application directly in Python). ## Timer Integration ### Timer Global Hotkeys -- Click "Set Hotkey" on each hotkey to set the hotkeys to AutoSplit. The Start / Split hotkey and Pause hotkey must be the same as the one used in your preferred timer program in order for the splitting/pausing to work properly. -- Make sure that Global Hotkeys are enabled in your speedrun timer. -- All of these actions can also be handled by their corresponding buttons. -- Note that pressing your Pause Hotkey does not serve any function in AutoSplit itself and is strictly used for the Pause flag. +Out of the box, AutoSplit works by listening for keyboard events and sending virtual keystrokes. This makes AutoSplit compatible with any timer by configuring your hotkeys to be the same. See the [Timer Global Hotkeys Tutorial](/docs/tutorial.md#timer-global-hotkeys). ### LiveSplit Integration @@ -209,20 +64,13 @@ The AutoSplit LiveSplit Component will directly connect AutoSplit with LiveSplit - Use hotkeys directly from LiveSplit to control AutoSplit and LiveSplit together - Load AutoSplit and any AutoSplit profile automatically when opening a LiveSplit layout. -#### LiveSplit Integration Tutorial - -- Click [here](https://github.com/Toufool/LiveSplit.AutoSplitIntegration/raw/main/update/Components/LiveSplit.AutoSplitIntegration.dll) to download the latest component. -- Place the .dll file into your `[...]\LiveSplit\Components` folder. -- Open LiveSplit -> Right Click -> Edit Layout -> Plus Button -> Control -> AutoSplit Integration. -- Click Layout Settings -> AutoSplit Integration -- Click the Browse buttons to locate your AutoSplit Path (path to AutoSplit executable) and Profile Path (path to your AutoSplit `.toml` profile file) respectively. - - If you have not yet set saved a profile, you can do so using AutoSplit, and then go back and set your Settings Path. -- Once set, click OK, and then OK again to close the Layout Editor. Right click LiveSplit -> Save Layout to save your layout. AutoSplit and your selected profile will now open automatically when opening that LiveSplit Layout `.lsl` file. +See the [installation instructions](https://github.com/Toufool/LiveSplit.AutoSplitIntegration#installation). ## Known Limitations - For many games, it will be difficult to find a split image for the last split of the run. - The window of the capture region cannot be minimized. +- Linux support is incomplete and we're [looking for contributors](../../issues?q=is%3Aissue+is%3Aopen+label%3A"help+wanted"+label%3ALinux+). ## Resources @@ -240,23 +88,36 @@ See [CONTRIBUTING.md](/docs/CONTRIBUTING.md) for our contributing standards. Refer to the [build instructions](/docs/build%20instructions.md) if you're interested in building the application yourself or running it in Python. Not a developer? You can still help through the following methods: - + - Donating (see link below) -- [Upvoting feature requests](../../issues?q=is%3Aissue+is%3Aopen+sort%3Areactions-%2B1-desc+label%3Aenhancement) you are interested in +- [Upvoting 👍 feature requests](../../issues?q=is%3Aissue+is%3Aopen+sort%3Areactions-%2B1-desc+label%3Aenhancement) you are interested in - Sharing AutoSplit with other speedrunners -- Upvoting the following upstream issues in libraries and tools we use: +- Upvoting 👍 the following upstream issues in libraries and tools we use: - - - - - + - + - + - + - + - + - + - - - - + - + - + - + - - - + - + - + - + - - - - - - + - ## Credits diff --git a/docs/2.0.0_gif.gif b/docs/2.0.0_gif.gif deleted file mode 100644 index b06242dc..00000000 Binary files a/docs/2.0.0_gif.gif and /dev/null differ diff --git a/docs/2.2.2.gif b/docs/2.2.2.gif new file mode 100644 index 00000000..0cc67d7e Binary files /dev/null and b/docs/2.2.2.gif differ diff --git a/docs/CONTRIBUTING.md b/docs/CONTRIBUTING.md index 41f66bbe..7f22d1e8 100644 --- a/docs/CONTRIBUTING.md +++ b/docs/CONTRIBUTING.md @@ -1,4 +1,4 @@ - + # Contributing guidelines @@ -8,8 +8,9 @@ Refer to the [build instructions](/docs/build%20instructions.md) if you're inter ## Linting and formatting -The project is setup to automatically configure VSCode witht he proper extensions and settings. Linters and formatters will be run on save. +The project is setup to automatically configure VSCode with the proper extensions and settings. Fixers and formatters will be run on save. If you use a different IDE or for some reason cannot / don't want to use the recommended extensions, you can run `scripts/lint.ps1`. +Project configurations for other IDEs are welcome. If you like to use pre-commit hooks, `.pre-commit-config.yaml` is setup for such uses. @@ -28,6 +29,16 @@ Your Pull Request has to pass all checks ot be accepted. If it is still a work-i Most coding standards will be enforced by automated tooling. As time goes on, project-specific standards and "gotchas" in the frameworks we use will be listed here. +### Keep shipped dependencies and bundle size low + +The bigger the bundle, the longer it takes to boot single-file executables. That is because we need to ship everything and the bootloader basically has to extract it all. +Our main use case is a single-file that is as easy to use as possible for the end user. +Keeping install time, build time and bandwith as low as possible is also a nice-to-have. + +You should also consider whether the work the dependency is doing is simple enough that you could implement it yourself. + +For these reasons, it's important to consider the impacts of adding any new dependency bundled with AutoSplit. + ### Magic numbers Please avoid using magic numbers and prefer constants and enums that have a meaningful name when possible. @@ -38,6 +49,10 @@ For image shape and channels, please use `utils.ImageShape` and `utils.ColorChan To avoid image shape mismatch issues, and to keep code simpler, we standardize the image color format to BGRA. This should always be done early in the pipeline, so whatever functionality takes care of obtaining an image should also ensure its color format. You can do so with `cv2.cvtColor` (ie: `cv2.cvtColor(image, cv2.COLOR_RGBA2BGRA)` or `cv2.cvtColor(image, cv2.COLOR_BGR2BGRA)`). +### Split-specific setting overrides + +Whenever a split image overrides a default global setting, we add a getter that handles the logic of checking for a split-specific override, then falling back to globals. This avoids repeating the fallback logic in multiple places. See `AutoSpitImage.get_*` methods for examples. + ## Testing -None 😦 Please help us create test suites, we lack the time, but we really want (need!) them. +None 😦 Please help us create test suites, we lack the time, but we really want (*need!*) them. diff --git a/docs/build instructions.md b/docs/build instructions.md index 3d700c87..0a157923 100644 --- a/docs/build instructions.md +++ b/docs/build instructions.md @@ -6,12 +6,18 @@ - Microsoft Visual C++ 14.0 or greater may be required to build the executable. Get it with [Microsoft C++ Build Tools](https://visualstudio.microsoft.com/visual-cpp-build-tools/). +### Linux + +- You need to be part of the `input` and `tty` groups, as well as have permissions on a few files and folders. + If you are missing from either groups, the install script will take care of it on its first run, but you'll need to restart your session. + ### All platforms -- [Python](https://www.python.org/downloads/) 3.10+. +- [Python](https://www.python.org/downloads/) 3.11+. - [Node](https://nodejs.org) is optional, but required for complete linting. - Alternatively you can install the [pyright python wrapper](https://pypi.org/project/pyright/) which has a bit of an overhead delay. -- [PowerShell](https://learn.microsoft.com/en-us/powershell/scripting/install/installing-powershell) +- [PowerShell](https://learn.microsoft.com/en-us/powershell/scripting/install/installing-powershell) is used to run all the scripts. + - This is needed even for Windows, as the bundled PowerShell 5.1 is too old. - [VSCode](https://code.visualstudio.com/Download) is not required, but highly recommended. - Everything already configured in the workspace, including Run (F5) and Build (Ctrl+Shift+B) commands, default shell, and recommended extensions. - [PyCharm](https://www.jetbrains.com/pycharm/) is also a good Python IDE, but nothing is configured. If you are a PyCharm user, feel free to open a PR with all necessary workspace configurations! @@ -26,7 +32,7 @@ - `python3 -m venv .venv` - `source .venv/bin/activate` - Run `./scripts/install.ps1` to install all dependencies. - - If you're having issues with the PySide generated code, you might want to first run `pip uninstall -y shiboken6 PySide PySide-Essentials` + - If you're having issues with the PySide generated code, you might want to first run `pip uninstall -y shiboken6 PySide6 PySide6-Essentials` - Run the app directly with `./scripts/start.ps1 [--auto-controlled]`. - Or debug by pressing `F5` in VSCode. - The `--auto-controlled` flag is passed when AutoSplit is started by LiveSplit. diff --git a/docs/tutorial.md b/docs/tutorial.md new file mode 100644 index 00000000..b9040ddf --- /dev/null +++ b/docs/tutorial.md @@ -0,0 +1,266 @@ +# TUTORIAL + +## OPTIONS + +#### Split Image Folder + +- Supported image file types: PNG, JPEG, bitmaps, WebP, and [more](https://docs.opencv.org/4.8.0/d4/da8/group__imgcodecs.html#imread). +- Images can be any size and ratio. +- Images are matched in alphanumerical order. +- Recommended filenaming convention: `001_SplitName.png, 002_SplitName.png, 003_SplitName.png`... +- Custom split image settings are handled in the filename. See how [here](#custom-split-image-settings). +- To create split images, it is recommended to use AutoSplit's Take Screenshot button for accuracy. However, images can be created using any method including Print Screen and [Snipping Tool](https://support.microsoft.com/en-us/help/4027213/windows-10-open-snipping-tool-and-take-a-screenshot). + +#### Capture Region + +- This is the region that your split images are compared to. Usually, this is going to be the full game screen. +- Click "Select Region". +- Click and drag to form a rectangle over the region you want to capture. +- Adjust the x, y, width, and height of the capture region manually to make adjustments as needed. +- If you want to align your capture region by using a reference image, click "Align Region". +- You can freely move the window that the program is capturing, but resizing the window will cause the capture region to change. +- Once you are happy with your capture region, you may unselect Live Capture Region to decrease CPU usage if you wish. +- You can save a screenshot of the capture region to your split image folder using the Take Screenshot button. + +#### Avg. FPS + +- Calculates the average comparison rate of the capture region to split images. This value will likely be much higher than needed, so it is highly recommended to limit your FPS depending on the frame rate of the game you are capturing. + +### Settings + +#### Comparison Method + +- There are three comparison methods to choose from: L2 Norm, Histograms, and Perceptual Hash (or pHash). + - L2 Norm: This method should be fine to use for most cases. It finds the difference between each pixel, squares it, sums it over the entire image and takes the square root. This is very fast but is a problem if your image is high frequency. Any translational movement or rotation can cause similarity to be very different. + - Histograms: An explanation on Histograms comparison can be found [here](https://mpatacchiola.github.io/blog/2016/11/12/the-simplest-classifier-histogram-intersection.html). This is a great method to use if you are using several masked images. + > This algorithm is particular reliable when the colour is a strong predictor of the object identity. The histogram intersection [...] is robust to occluding objects in the foreground. + - Perceptual Hash: An explanation on pHash comparison can be found [here](http://www.hackerfactor.com/blog/index.php?/archives/432-Looks-Like-It.html). It is highly recommended to NOT use pHash if you use masked images, or it'll be very inaccurate. + +#### Capture Method + + +##### Windows + +- **Windows Graphics Capture** (fast, most compatible, capped at 60fps) + Only available in Windows 10.0.17134 and up. + Allows recording UWP apps, Hardware Accelerated and Exclusive Fullscreen windows. + Adds a yellow border on Windows 10 (not on Windows 11). + Caps at around 60 FPS. +- **BitBlt** (fastest, least compatible) + The best option when compatible. But it cannot properly record OpenGL, Hardware Accelerated or Exclusive Fullscreen windows. + The smaller the selected region, the more efficient it is. +- **Direct3D Desktop Duplication** (slower, bound to display) + Duplicates the desktop using Direct3D. + It can record OpenGL and Hardware Accelerated windows. + Up to 15x slower than BitBlt for tiny regions. Not affected by window size. + Limited by the target window and monitor's refresh rate. + Overlapping windows will show up and can't record across displays. + This option may not be available for hybrid GPU laptops, see [D3DDD-Note-Laptops.md](/docs/D3DDD-Note-Laptops.md) for a solution. +- **Force Full Content Rendering** (very slow, can affect rendering) + Uses BitBlt behind the scene, but passes a special flag to PrintWindow to force rendering the entire desktop. + About 10-15x slower than BitBlt based on original window size and can mess up some applications' rendering pipelines. + +##### Linux + +- **X11 XCB** (fast, requires XCB) + Uses the XCB library to take screenshots of the X11 server. +- **Scrot** (very slow, may leave files) + Uses Scrot (SCReenshOT) to take screenshots. + Leaves behind a screenshot file if interrupted. + + "scrot" must be installed: `sudo apt-get install scrot` + +##### All platforms + +- **Video Capture Device** + Uses a Video Capture Device, like a webcam, virtual cam, or capture card. + +#### Capture Device + +Select the Video Capture Device that you wanna use if selecting the `Video Capture Device` Capture Method. + + +#### Show Live Similarity + +- Displays the live similarity between the capture region and the current split image. This number is between 0 and 1, with 1 being a perfect match. + +#### Show Highest Similarity + +- Shows the highest similarity between the capture region and current split image. + +#### Current Similarity Threshold + +- When the live similarity goes above this value, the program hits your split hotkey and moves to the next split image. + +#### Default Similarity Threshold + +- This value will be set as the threshold for an image if there is no custom threshold set for that image. + +#### Default Delay Time + +- Time in milliseconds that the program waits before hitting the split hotkey for that specific split if there is no custom Delay Time set for that image. + +#### Default Pause Time + +- Time in seconds that the program stops comparison after a split if there is no custom Pause Time set for that image. Useful for if you have two of the same split images in a row and want to avoid double-splitting. Also useful for reducing CPU usage. + +#### Dummy splits when undoing / skipping + +AutoSplit will group dummy splits together with a real split when undoing/skipping. This basically allows you to tie one or more dummy splits to a real split to keep it as in sync as possible with the real splits in LiveSplit/wsplit. If they are out of sync, you can always use "Previous Image" and "Next Image". + +Examples: +Given these splits: 1 dummy, 2 normal, 3 dummy, 4 dummy, 5 normal, 6 normal. + +In this situation you would have only 3 splits in LiveSplit/wsplit (even though there are 6 split images, only 3 are "real" splits). This basically results in 3 groups of splits: 1st split is images 1 and 2. 2nd split is images 3, 4 and 5. 3rd split is image 6. + +- If you are in the 1st or 2nd image and press the skip key, it will end up on the 3rd image +- If you are in the 3rd, 4th or 5th image and press the undo key, it will end up on the 2nd image +- If you are in the 3rd, 4th or 5th image and press the skip key, it will end up on the 6th image +- If you are in the 6th image and press the undo key, it will end up on the 5th image + +#### Loop last Split Image to first Split Image + +If this option is enabled, when the last split meets the threshold and splits, AutoSplit will loop back to the first split image and continue comparisons. +If this option is disabled, when the last split meets the threshold and splits, AutoSplit will stop running comparisons. +This option does not loop single, specific images. See the Custom Split Image Settings section above for this feature. + +#### Start also Resets + +If this option is enabled, a "Start" command (ie: from the Start Image) will also send the "Reset" command. This is useful if you want to automatically restart your timer using the Start Image. Since AutoSplit won't be running and won't be checking for the Reset Image. + +Having the reset image check be active at all time would be a better, more organic solution in the future. But that is dependent on migrating to an observer pattern () and being able to reload all images. + +#### Enable auto Reset Image + +This option is mainly meant to be toggled with the `Toggle auto Reset Image` hotkey. You can enable it to temporarily disable the Reset Image if you make a mistake in your run that would cause the Reset Image to trigger. Like exiting back to the game's menu (aka Save&Quit). + +### Custom Split Image Settings + +- Each split image can have different thresholds, pause times, delay split times, loop amounts, and can be flagged. +- These settings are handled in the image's filename. +- **Custom thresholds** are place between parenthesis `()` in the filename. This value will override the default threshold. +- **Custom pause times** are placed between square brackets `[]` in the filename. This value will override the default pause time. +- **Custom delay times** are placed between hash signs `##` in the filename. Note that these are in milliseconds. For example, a 10 second split delay would be `#10000#`. You cannot skip or undo splits during split delays. +- A different **comparison method** can be specified with their 0-base index between carets `^^`: + - `^0^`: L2 Norm + - `^1^`: Histogram + - `^2^`: Perceptual Hash +- **Image loop** amounts are placed between at symbols `@@` in the filename. For example, a specific image that you want to split 5 times in a row would be `@5@`. The current loop # is conveniently located beneath the current split image. +- **Flags** are placed between curly brackets `{}` in the filename. Multiple flags are placed in the same set of curly brackets. Current available flags: + - `{d}` **dummy split image**. When matched, it moves to the next image without hitting your split hotkey. + - `{b}` split when **similarity goes below** the threshold rather than above. When a split image filename has this flag, the split image similarity will go above the threshold, do nothing, and then split the next time the similarity goes below the threshold. + - `{p}` **pause flag**. When a split image filename has this flag, it will hit your pause hotkey rather than your split hokey. +- Filename examples: + - `001_SplitName_(0.9)_[10].png` is a split image with a threshold of 0.9 and a pause time of 10 seconds. + - `002_SplitName_(0.9)_[10]_{d}.png` is the second split image with a threshold of 0.9, pause time of 10, and is a dummy split. + - `003_SplitName_(0.85)_[20]_#3500#.png` is the third split image with a threshold of 0.85, pause time of 20 and has a delay split time of 3.5 seconds. + - `004_SplitName_(0.9)_[10]_#3500#_@3@_{b}.png` is the fourth split image with a threshold of 0.9, pause time of 10 seconds, delay split time of 3.5 seconds, will loop 3 times, and will split when similarity is below the threshold rather than above. + +## Special images + +### How to Create a Masked Image + +Masked images are very useful if only a certain part of the capture region is consistent (for example, consistent text on the screen, but the background is always different). Histogram or L2 norm comparison is recommended if you use any masked images. It is highly recommended that you do NOT use pHash comparison if you use any masked images, or it'll be very inaccurate. + +The best way to create a masked image is to set your capture region as the entire game screen, take a screenshot, and use a program like [paint.net](https://www.getpaint.net/) to "erase" (make transparent) everything you don't want the program to compare. More on creating images with transparency using paint.net can be found in [this tutorial](https://www.youtube.com/watch?v=v53kkUYFVn8). For visualization, here is what the capture region compared to a masked split image looks like if you would want to split on "Shine Get!" text in Super Mario Sunshine: + +![Mask Example](/docs/mask_example_image.png) + +### Reset Image + +You can have one (and only one) image with the keyword `reset` in its name. AutoSplit will press the reset button when it finds this image. This image will only be used for resets and it will not be tied to any split. You can set a threshold and pause time for it. The pause time is the amount of seconds AutoSplit will wait before checking for the Reset Image once the run starts. For example: `Reset_(0.95)_[10].png`. + +### Start Image + +The Start Image is similar to the Reset Image. You can only have one Start Image with the keyword `start_auto_splitter`.You can reload the image using the "`Reload Start Image`" button. The pause time is the amount of seconds AutoSplit will wait before starting comparisons of the first split image. Delay times will be used to delay starting your timer after the threshold is met. + +### Text Recognition / Optical Character Recognition (OCR) ⚠️EXPERIMENTAL⚠️ + +You can use text recognition as an alternative comparison method. + +#### Tesseract install + +First you need to install tesseract and include it in your system or user environment variables. + +- See for installation instruction on all platforms. +- For Windows: + 1. You can go directly to to find the installer. + 2. If you change the "Destination Folder" during install, then you'll also need to add it to your `PATH` environment variable. + +#### Usage + +To use this feature you need to place a text file (`.txt`) in your splits folder instead of an image file. + +An example file name and content could look like this: + +Filename: `001_start_auto_splitter.txt` + +Content: + +```toml +texts = ["complete any 2 encounters"] +left = 275 +right = 540 +top = 70 +bottom = 95 +methods = [0] +fps_limit = 1 +``` + +The `texts` field is an array and can take more than one text to look for: + +```toml +texts = ["look for me", "or this text"] +``` + +Note: for now we only use lowercase letters in the comparison. All uppercase letters are converted to lowercase before the comparison. + +The rectangle coordinates where the text you are looking for is expected to appear in the image are configured as follows: + +```toml +left = 275 +right = 540 +top = 70 +bottom = 95 +``` + +If you're used to working in corner coordinates, you can think of `top_left = [left, top]` and `bottom_right = [right, bottom]`. + +Currently there are two comparison methods: + +- `0` - uses the Levenshtein distance (the default) +- `1` - checks if the OCR text contains the searched text (results in matches of either `0.0` or `1.0`) + +If you only want a perfect full match, use "Levenshtein" with a threshold of `(1.0)` on your file name. + +You can also chain multiple comparison methods using the array notation: + +```toml +methods = [1, 0] +``` + +The methods are then checked in the order you defined and the best match upon them wins. + +Note: This method can cause high CPU usage at the standard comparison FPS. You should therefor limit the comparison FPS when you use this method to 1 or 2 FPS using the `fps_limit` option. +The size of the selected rectangle can also impact the CPU load (bigger = more CPU load). + +### Profiles + + +- Profiles use the extension `.toml`. Profiles can be saved and loaded by using `File -> Save Profile As...` and `File -> Load Profile`. +- The profile contains all of your settings, including information about the capture region. +- You can save multiple profiles, which is useful if you speedrun multiple games. +- If you change your display setup (like using a new monitor, or upgrading to Windows 11), you may need to readjust or reselect your Capture Region. + +## Timer Integration Tutorial + +### Timer Global Hotkeys + +- Click "Set Hotkey" on each hotkey to set the hotkeys to AutoSplit. The Start / Split hotkey and Pause hotkey must be the same as the one used in your preferred timer program in order for the splitting/pausing to work properly. +- Make sure that Global Hotkeys are enabled in your speedrun timer. +- All of these actions can also be handled by their corresponding buttons. +- Note that pressing your Pause Hotkey does not serve any function in AutoSplit itself and is strictly used for the Pause flag. + +#### LiveSplit Integration + +See the [usage instructions](https://github.com/Toufool/LiveSplit.AutoSplitIntegration#openingclosing-autosplit). diff --git a/mypy.ini b/mypy.ini index 7f45e93d..7a8a4815 100644 --- a/mypy.ini +++ b/mypy.ini @@ -1,6 +1,7 @@ ; We don't run mypy in the CI. This is just to help anyone who would like to use it manually. ; Namely, the mypy_primer tool. [mypy] +python_version = 3.11 show_column_numbers = true mypy_path = $MYPY_CONFIG_FILE_DIR/typings implicit_reexport = true @@ -12,6 +13,9 @@ disallow_untyped_calls = false disallow_untyped_defs = false disallow_incomplete_defs = false disable_error_code = return +# Note: mypy still has issues with some boolean infered returns like `is_valid_hwnd` +# https://github.com/python/mypy/issues/4409 +# https://github.com/python/mypy/issues/10149 ; exclude mypyc build exclude = .*(build)/.* @@ -19,8 +23,3 @@ exclude = .*(build)/.* ; Auto-generated code, not much we can do there [mypy-gen.*] disable_error_code = attr-defined, arg-type - -; Of course my stubs are going to be incomplete. Otherwise they'd be on typeshed! -; Mypy becomes really whack with its errors inside these stubs though -[mypy-cv2.*] -disable_error_code = misc, name-defined, override diff --git a/pyproject.toml b/pyproject.toml index 48b0fbc7..a54b50f0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,150 +1,7 @@ -# https://docs.astral.sh/ruff/configuration/ -[tool.ruff] -target-version = "py310" -line-length = 120 -select = ["ALL"] -preview = true -# https://docs.astral.sh/ruff/rules/ -ignore = [ - ### - # Not needed or wanted - ### - "D1", # pydocstyle Missing doctring - "D401", # pydocstyle: non-imperative-mood - "EM", # flake8-errmsg - "FBT", # flake8-boolean-trap - "INP", # flake8-no-pep420 - "ISC003", # flake8-implicit-str-concat: explicit-string-concatenation - # Short messages are still considered "long" messages - "TRY003", # tryceratops : raise-vanilla-args - # Don't remove commented code, also too inconsistant - "ERA001", # eradicate: commented-out-code - # contextlib.suppress is roughly 3x slower than try/except - "SIM105", # flake8-simplify: use-contextlib-suppress - # Negative performance impact - "UP038", # non-pep604-isinstance - # Checked by type-checker (pyright) - "ANN", # flake-annotations - "PGH003", # blanket-type-ignore - "TCH", # flake8-type-checking - # Already shown by Pylance, checked by pyright, and can be caused by overloads. - "ARG002", # Unused method argument - # We want D213: multi-line-summary-second-line and D211: no-blank-line-before-class - "D203", # pydocstyle: one-blank-line-before-class - "D212", # pydocstyle: multi-line-summary-first-line - # Allow differentiating between broken (FIXME) and to be done/added/completed (TODO) - "TD001", # flake8-todos: invalid-todo-tag - - ### - # These should be warnings (https://github.com/astral-sh/ruff/issues/1256 & https://github.com/astral-sh/ruff/issues/1774) - ### - "FIX", # flake8-fixme - # Not all TODOs are worth an issue, this would be better as a warning - "TD003", # flake8-todos: missing-todo-link - - # False-positives - "TCH004", # https://github.com/astral-sh/ruff/issues/3821 - - ### - # Specific to this project - ### - "D205", # Not all docstrings have a short description + desrciption - # We have some Pascal case module names - "N999", # pep8-naming: Invalid module name - # Print are used as debug logs - "T20", # flake8-print - # This is a relatively small, low contributors project. Git blame suffice. - "TD002", # missing-todo-author - # Python 3.11, introduced "zero cost" exception handling - "PERF203", # try-except-in-loop - - ### FIXME/TODO (no warnings in Ruff yet: https://github.com/astral-sh/ruff/issues/1256 & https://github.com/astral-sh/ruff/issues/1774): - "CPY001", # flake8-copyright - "PTH", # flake8-use-pathlib - # Ignore until linux support - "EXE", # flake8-executable -] - -[tool.ruff.per-file-ignores] -"typings/**/*.pyi" = [ - "F811", # Re-exports false positives - "F821", # https://github.com/astral-sh/ruff/issues/3011 - # The following can't be controlled for external libraries: - "A", # Shadowing builtin names - "ICN001", # unconventional-import-alias - "N8", # Naming conventions - "PLR0904", # Too many public methods - "PLR0913", # Argument count - "PLR0917", # Too many positional arguments - "PLW3201", # misspelled dunder method name - "PYI042", # CamelCase TypeAlias -] - -# https://docs.astral.sh/ruff/settings/#flake8-implicit-str-concat -[tool.ruff.flake8-implicit-str-concat] -allow-multiline = false - -# https://docs.astral.sh/ruff/settings/#isort -[tool.ruff.isort] -combine-as-imports = true -split-on-trailing-comma = false -# Unlike isort, Ruff only counts relative imports as local-folder by default for know. -# https://github.com/astral-sh/ruff/issues/3115 -known-local-folder = [ - "AutoControlledThread", - "AutoSplit", - "AutoSplitImage", - "capture_method", - "compare", - "error_messages", - "gen", - "hotkeys", - "menu_bar", - "region_selection", - "split_parser", - "user_profile", - "utils", -] - -# https://docs.astral.sh/ruff/settings/#mccabe -[tool.ruff.mccabe] -# Hard limit, arbitrary to 4 bytes -max-complexity = 31 -# Arbitrary to 2 bytes, same as SonarLint -# max-complexity = 15 - -[tool.ruff.pylint] -# Arbitrary to 1 byte, same as SonarLint -max-args = 7 -# At least same as max-complexity -max-branches = 15 - -# https://github.com/hhatto/autopep8#usage -# https://github.com/hhatto/autopep8#more-advanced-usage -[tool.autopep8] -max_line_length = 120 -aggressive = 3 -exclude = ".venv/*,src/gen/*" -ignore = [ - "E124", # Closing bracket may not match multi-line method invocation style (enforced by add-trailing-comma) - "E70", # Allow ... on same line as def - # Autofixed by Ruff - # Check for the "Fix" flag https://docs.astral.sh/ruff/rules/#pycodestyle-e-w - "E2", # Whitespace - "E703", # useless-semicolon - "E71", # Statement (comparisons) - "E731", # lambda-assignment - "W29", # Whitespace warning - "W605", # invalid-escape-sequence - # Autofixed by other Ruff rules - "E401", # I001: unsorted-imports - "W690", # UP: pyupgrade -] - # https://github.com/microsoft/pyright/blob/main/docs/configuration.md#sample-pyprojecttoml-file [tool.pyright] typeCheckingMode = "strict" -pythonVersion = "3.10" +pythonVersion = "3.11" # Prefer `pyright: ignore` enableTypeIgnoreComments = false @@ -155,10 +12,8 @@ enableTypeIgnoreComments = false reportMissingTypeStubs = "warning" # Extra runtime safety reportUnnecessaryComparison = "warning" -# Using Flake8/Ruff instead. Name is already grayed out and red squiggle looks like a mistyped import +# Using Ruff instead. Name is already grayed out and red squiggle looks like a mistyped import reportUnusedImport = "none" -# pywin32 has way too many Unknown parameters left -reportUnknownMemberType = "none" ### # Off by default even in strict mode diff --git a/res/settings.ui b/res/settings.ui index 1238e382..3ae4d2f3 100644 --- a/res/settings.ui +++ b/res/settings.ui @@ -7,20 +7,20 @@ 0 0 - 285 - 294 + 284 + 334 - 285 - 294 + 284 + 334 - 285 - 294 + 284 + 334 @@ -41,7 +41,7 @@ -3 -3 291 - 301 + 341 @@ -63,6 +63,9 @@ true + + (Defaults to Split Image Folder) + @@ -85,7 +88,7 @@ 10 40 - 141 + 261 24 @@ -104,7 +107,7 @@ 10 70 - 151 + 261 16 @@ -155,7 +158,7 @@ 10 120 - 151 + 261 16 @@ -207,7 +210,7 @@ 10 220 - 181 + 261 24 @@ -221,6 +224,102 @@ false + + + + 10 + 280 + 131 + 24 + + + + Screenshot on Split + + + false + + + + + + 10 + 240 + 131 + 24 + + + + Screenshot on Start + + + false + + + + + + 10 + 260 + 131 + 24 + + + + Screenshot on Reset + + + false + + + + + + 140 + 260 + 131 + 24 + + + + Screenshot on Skip + + + false + + + + + + 140 + 240 + 131 + 24 + + + + Screenshot on Undo + + + false + + + + + + 140 + 280 + 131 + 24 + + + + Screenshot on Pause + + + false + + diff --git a/ruff.toml b/ruff.toml new file mode 100644 index 00000000..bf73ba1a --- /dev/null +++ b/ruff.toml @@ -0,0 +1,159 @@ +# SPDX-License-Identifier: MIT +# Copyright 2024 Beslogic Inc. + +# The source skeleton for this configuration can be found at +# https://github.com/BesLogic/shared-configs/blob/main/ruff.toml +# Modifications to this file that are not project-specific should also be done upstream. +# These configs are incompatible with ruff<0.5.7 + +# https://docs.astral.sh/ruff/configuration/ +target-version = "py311" # Change this to the oldest supported version by your application +line-length = 100 +preview = true + +[format] +docstring-code-format = true + +[lint] +select = ["ALL"] +# https://docs.astral.sh/ruff/rules/ +ignore = [ + ### + # Not needed or wanted + ### + "D1", # pydocstyle Missing doctring + "D401", # pydocstyle: non-imperative-mood + "EM", # flake8-errmsg + "EXE", # flake8-executable + # This is often something we can't control: https://github.com/astral-sh/ruff/issues/9497 + # Also false-positive with positional-only arguments: https://github.com/astral-sh/ruff/issues/3247 + "FBT003", # flake8-boolean-trap: boolean-positional-value-in-call + "INP", # flake8-no-pep420 + "ISC003", # flake8-implicit-str-concat: explicit-string-concatenation + # Short messages are still considered "long" messages + "TRY003", # tryceratops : raise-vanilla-args + # Don't remove commented code, also too inconsistant + "ERA001", # eradicate: commented-out-code + # contextlib.suppress is roughly 3x slower than try/except + "SIM105", # flake8-simplify: use-contextlib-suppress + # Negative performance impact and more verbose https://github.com/astral-sh/ruff/issues/7871 + "UP038", # non-pep604-isinstance + # Checked by type-checker (pyright/mypy) + "ANN", # flake-annotations + "PGH003", # blanket-type-ignore + "TC", # flake8-type-checking + # Already shown by Pylance, checked by pyright, and can be caused by overloads. + "ARG002", # Unused method argument + # We want D213: multi-line-summary-second-line and D211: no-blank-line-before-class + "D203", # pydocstyle: one-blank-line-before-class + "D212", # pydocstyle: multi-line-summary-first-line + # Allow differentiating between broken (FIXME) and to be done/added/completed (TODO) + "TD001", # flake8-todos: invalid-todo-tag + + ### + # These should be warnings (https://github.com/astral-sh/ruff/issues/1256 & https://github.com/astral-sh/ruff/issues/1774) + ### + "FIX", # flake8-fixme + # Not all TODOs are worth an issue, this would be better as a warning + "TD003", # flake8-todos: missing-todo-link + + # False-positives + "TC004", # https://github.com/astral-sh/ruff/issues/3821 + + ### + # Conflict with formatter (you can remove this section if you don't use Ruff as a formatter) + ### + "COM812", # missing-trailing-comma + "ISC001", # single-line-implicit-string-concatenation + "RUF028", # invalid-formatter-suppression-comment, Is meant for the formatter, but false-positives + + ### + # Rules about missing special documentation. Up to you if you wanna enable these, you must also disable D406, D407 + ### + "DOC201", # docstring-missing-returns + "DOC402", # docstring-missing-yields + "DOC501", # docstring-missing-exception + # "D406", # new-line-after-section-name, conflicts with DOC + # "D407", # dashed-underline-after-section, conflicts with DOC + + ### + # Specific to this project + ### + "D205", # Not all docstrings have a short description + description + # TODO: Consider for more complete doc + "DOC201", # docstring-extraneous-returns + "DOC501", # docstring-missing-exception + # We have some Pascal case module names + "N999", # pep8-naming: Invalid module name + # Print are used as debug logs + "T20", # flake8-print + # This is a relatively small, low contributors project. Git blame suffice. + "TD002", # missing-todo-author + # Python 3.11, introduced "zero cost" exception handling + "PERF203", # try-except-in-loop + + ### + # FIXME/TODO: I'd normally set them as temporarily warnings, but no warnings in Ruff yet: + # https://github.com/astral-sh/ruff/issues/1256 & https://github.com/astral-sh/ruff/issues/1774): + ### + "CPY001", # flake8-copyright + "PTH", # flake8-use-pathlib + # Ignore until linux support + "EXE", # flake8-executable +] + +# https://docs.astral.sh/ruff/settings/#flake8-implicit-str-concat +[lint.flake8-implicit-str-concat] +allow-multiline = false + +# https://docs.astral.sh/ruff/settings/#isort +[lint.isort] +combine-as-imports = true +split-on-trailing-comma = false +# This should be automatically detected in src layout, but somehow pre-commit messes it up +known-first-party = ["gen"] + +# https://docs.astral.sh/ruff/settings/#mccabe +[lint.mccabe] +# Arbitrary to 2 bytes, same as SonarLint +max-complexity = 15 + +[lint.pylint] +# Arbitrary to 1 byte, same as SonarLint +max-args = 7 +# At least same as max-complexity +max-branches = 15 + +[lint.per-file-ignores] +"**/typings/**/*.pyi" = [ + "F811", # Re-exports false positives + # The following can't be controlled for external libraries: + "A", # Shadowing builtin names + "F403", # `from . import *` used; unable to detect undefined names + "FBT", # flake8-boolean-trap + "ICN001", # unconventional-import-alias + "N8", # Naming conventions + "PLC2701", # Private name import + "PLR0904", # Too many public methods + "PLR0913", # Argument count + "PLR0917", # Too many positional arguments + "PLW3201", # misspelled dunder method name + "PYI042", # CamelCase TypeAlias + # Stubs can sometimes re-export entire modules. + # Issues with using a star-imported name will be caught by type-checkers. + "F405", # may be undefined, or defined from star imports +] +"src/d3d11.py" = [ + # Following windows API/ctypes like naming conventions + "N801", # invalid-class-name +] + +[lint.flake8-tidy-imports.banned-api] +"cv2.imread".msg = """\ +it doesn't support special characters. \ +Use `cv2.imdecode(np.fromfile(filename, dtype=np.uint8), flags)` instead. +https://github.com/opencv/opencv/issues/4292#issuecomment-2266019697""" +"cv2.imwrite".msg = """\ +it doesn't support special characters. \ +Use `cv2.imencode(os.path.splitext(filename)[1], img)[1].tofile(filename)` instead. +https://github.com/opencv/opencv/issues/4292#issuecomment-2266019697""" diff --git a/scripts/build.ps1 b/scripts/build.ps1 index 94ebd2e9..3dd62f23 100644 --- a/scripts/build.ps1 +++ b/scripts/build.ps1 @@ -12,12 +12,33 @@ $arguments = @( # if requirements.txt was used directly to help ensure consistency when building locally. # # Installed by PyAutoGUI - '--exclude=pyscreeze', '--exclude=pygetwindow', '--exclude=pymsgbox', '--exclude=pytweening', - '--exclude=mouseinfo', - # Used by imagehash.whash - '--exclude=pywt') + '--exclude=mouseinfo') +if ($IsWindows) { + # These are used on Linux + $arguments += @( + # Installed by PyAutoGUI + '--exclude=pyscreeze' + # Sometimes installed by other automation/image libraries. + # Keep this exclusion even if nothing currently installs it, to stay future-proof. + '--exclude=PIL') +} +if ($IsLinux) { + $arguments += @( + # Required on the CI for PyWinCtl + '--hidden-import pynput.keyboard._xorg', + '--hidden-import pynput.mouse._xorg') +} Start-Process -Wait -NoNewWindow pyinstaller -ArgumentList $arguments + +If ($IsLinux) { + Move-Item -Force $PSScriptRoot/../dist/AutoSplit $PSScriptRoot/../dist/AutoSplit.elf + If ($?) { + Write-Host 'Added .elf extension' + } + chmod +x $PSScriptRoot/../dist/AutoSplit.elf + Write-Host 'Added execute permission' +} diff --git a/scripts/compile_resources.ps1 b/scripts/compile_resources.ps1 index 1a49dd22..57483dd0 100644 --- a/scripts/compile_resources.ps1 +++ b/scripts/compile_resources.ps1 @@ -11,14 +11,15 @@ pyside6-rcc './res/resources.qrc' -o './src/gen/resources_rc.py' $files = Get-ChildItem ./src/gen/ *.py foreach ($file in $files) { (Get-Content $file.PSPath) | - ForEach-Object { $_ -replace 'import resources_rc', 'from . import resources_rc' } | + ForEach-Object { $_.replace('import resources_rc', 'from . import resources_rc') } | + ForEach-Object { $_ -replace 'def (\w+?)\(self, (\w+?)\):', 'def $1(self, $2: QWidget):' } | Set-Content $file.PSPath } Write-Host 'Generated code from .ui files' $build_vars_path = "$PSScriptRoot/../src/gen/build_vars.py" If ($Env:GITHUB_EXCLUDE_BUILD_NUMBER -eq $true -or ( - $Env:GITHUB_EVENT_NAME -eq 'push' -and $Env:GITHUB_REF_NAME -eq 'master') + $Env:GITHUB_EVENT_NAME -eq 'push' -and $Env:GITHUB_REF_NAME -eq 'main') ) { $BUILD_NUMBER = '' } @@ -28,7 +29,10 @@ Else { $GITHUB_REPOSITORY = $Env:GITHUB_HEAD_REPOSITORY If (-not $GITHUB_REPOSITORY) { $repo_url = git config --get remote.origin.url - $GITHUB_REPOSITORY = $repo_url.substring(19, $repo_url.length - 19) -replace '\.git', '' + # Validate in case the repo was downloaded rather than cloned + If ($repo_url) { + $GITHUB_REPOSITORY = $repo_url.substring(19, $repo_url.length - 19) -replace '\.git', '' + } } If (-not $GITHUB_REPOSITORY) { $GITHUB_REPOSITORY = 'Toufool/AutoSplit' diff --git a/scripts/designer.ps1 b/scripts/designer.ps1 index a6a159f6..96e14edf 100644 --- a/scripts/designer.ps1 +++ b/scripts/designer.ps1 @@ -1,10 +1,13 @@ +$python = $IsWindows ? 'python' : 'python3' $qt6_applications_import = 'import qt6_applications; print(qt6_applications.__path__[0])' -$qt6_applications_path = python -c $qt6_applications_import + +$qt6_applications_path = &"$python" -c $qt6_applications_import if ($null -eq $qt6_applications_path) { Write-Host 'Designer not found, installing qt6_applications' - python -m pip install qt6_applications + &"$python" -m pip install qt6_applications } -$qt6_applications_path = python -c $qt6_applications_import + +$qt6_applications_path = &"$python" -c $qt6_applications_import & "$qt6_applications_path/Qt/bin/designer" ` "$PSScriptRoot/../res/design.ui" ` "$PSScriptRoot/../res/about.ui" ` diff --git a/scripts/install.ps1 b/scripts/install.ps1 index 140af292..ef55a7ea 100644 --- a/scripts/install.ps1 +++ b/scripts/install.ps1 @@ -1,35 +1,61 @@ +$python = $IsWindows ? 'python' : 'python3' + +# Validating user groups on Linux +If ($IsLinux) { + $groups = groups + if ($groups.Contains('input') -and $groups.Contains('tty')) { + Write-Host "User $Env:USER is already part of groups input and tty. No actions taken." + } + Else { + # https://github.com/boppreh/keyboard/issues/312#issuecomment-1189734564 + Write-Host "User $Env:USER isn't part of groups input and tty. It is required to install the keyboard module." + # Keep in sync with README.md and src/error_messages.py + sudo usermod -a -G 'tty,input' $Env:USER + sudo touch /dev/uinput + sudo chmod +0666 /dev/uinput + If (-not $Env:GITHUB_JOB) { + Write-Output 'KERNEL=="uinput", TAG+="uaccess""' | sudo tee /etc/udev/rules.d/50-uinput.rules + Write-Output 'SUBSYSTEM=="input", MODE="0666" GROUP="plugdev"' | sudo tee /etc/udev/rules.d/12-input.rules + Write-Output 'SUBSYSTEM=="misc", MODE="0666" GROUP="plugdev"' | sudo tee -a /etc/udev/rules.d/12-input.rules + Write-Output 'SUBSYSTEM=="tty", MODE="0666" GROUP="plugdev"' | sudo tee -a /etc/udev/rules.d/12-input.rules + } + Write-Host 'You have been added automatically,' ` + "but still need to manually terminate your session with 'loginctl terminate-user $Env:USER'" ` + 'for the changes to take effect outside of this script.' + If (-not $Env:GITHUB_JOB) { + Write-Host -NoNewline 'Press any key to continue...'; + $null = $Host.UI.RawUI.ReadKey('NoEcho,IncludeKeyDown'); + } + } +} + # Installing Python dependencies $dev = If ($Env:GITHUB_JOB -eq 'Build') { '' } Else { '-dev' } +If ($IsLinux) { + If (-not $Env:GITHUB_JOB -or $Env:GITHUB_JOB -eq 'Build') { + sudo apt-get update + # python3-tk for splash screen, libxcb-cursor-dev for QT_QPA_PLATFORM=xcb, the rest for PySide6 + sudo apt-get install -y python3-pip python3-tk libxcb-cursor-dev libegl1 libxkbcommon0 + # having issues with npm for pyright, maybe let users take care of it themselves? (pyright from pip) + } +} # Ensures installation tools are up to date. This also aliases pip to pip3 on MacOS. -python -m pip install wheel pip setuptools --upgrade -pip install -r "$PSScriptRoot/requirements$dev.txt" --upgrade +&"$python" -m pip install wheel pip setuptools --upgrade +# Upgrading QT to 6.6.2 w/o first uninstalling shiboken6 can lead to issues +# https://bugreports.qt.io/browse/PYSIDE-2616?focusedId=777285&page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel#comment-777285 +&"$python" -m pip uninstall shiboken6 -y +&"$python" -m pip install -r "$PSScriptRoot/requirements$dev.txt" --upgrade # These libraries install extra requirements we don't want # Open suggestion for support in requirements files: https://github.com/pypa/pip/issues/9948 & https://github.com/pypa/pip/pull/10837 # PyAutoGUI: We only use it for hotkeys -# ImageHash: uneeded + broken on Python 3.12 PyWavelets install -# scipy: needed for ImageHash -pip install PyAutoGUI ImageHash scipy --no-deps --upgrade - -# Patch libraries so we don't have to install from git +&"$python" -m pip install PyAutoGUI --no-deps --upgrade -# Prevent PyAutoGUI and pywinctl from setting Process DPI Awareness, which Qt tries to do then throws warnings about it. -# The unittest workaround significantly increases build time, boot time and build size with PyInstaller. -# https://github.com/asweigart/pyautogui/issues/663#issuecomment-1296719464 -$libPath = python -c 'import pyautogui as _; print(_.__path__[0])' -(Get-Content "$libPath/_pyautogui_win.py").replace('ctypes.windll.user32.SetProcessDPIAware()', 'pass') | - Set-Content "$libPath/_pyautogui_win.py" -$libPath = python -c 'import pymonctl as _; print(_.__path__[0])' -(Get-Content "$libPath/_pymonctl_win.py").replace('ctypes.windll.shcore.SetProcessDpiAwareness(2)', 'pass') | - Set-Content "$libPath/_pymonctl_win.py" -$libPath = python -c 'import pywinbox as _; print(_.__path__[0])' -(Get-Content "$libPath/_pywinbox_win.py").replace('ctypes.windll.shcore.SetProcessDpiAwareness(2)', 'pass') | - Set-Content "$libPath/_pywinbox_win.py" # Uninstall optional dependencies if PyAutoGUI was installed outside this script -# pyscreeze -> pyscreenshot -> mss deps call SetProcessDpiAwareness -# pygetwindow, pymsgbox, pytweening, MouseInfo are picked up by PySide6 +# PyScreeze -> pyscreenshot -> mss deps call SetProcessDpiAwareness, used to be installed on Windows +# pygetwindow, pymsgbox, pytweening, MouseInfo are picked up by PyInstaller # (also --exclude from build script, but more consistent with unfrozen run) -python -m pip uninstall pyscreeze pyscreenshot mss pygetwindow pymsgbox pytweening MouseInfo -y - +&"$python" -m pip uninstall pyscreenshot mss pygetwindow pymsgbox pytweening MouseInfo -y +If ($IsWindows) { &"$python" -m pip uninstall PyScreeze -y } # Don't compile resources on the Build CI job as it'll do so in build script If ($dev) { diff --git a/scripts/lint.ps1 b/scripts/lint.ps1 index 84dc6f8b..2049ef13 100644 --- a/scripts/lint.ps1 +++ b/scripts/lint.ps1 @@ -2,12 +2,8 @@ $originalDirectory = $pwd Set-Location "$PSScriptRoot/.." $exitCodes = 0 -Write-Host "`nRunning formatting..." -autopep8 src/ --recursive --in-place -add-trailing-comma $(git ls-files '**.py*') - -Write-Host "`nRunning Ruff..." -ruff check . --fix +Write-Host "`nRunning Ruff ..." +ruff check --fix $exitCodes += $LastExitCode if ($LastExitCode -gt 0) { Write-Host "`Ruff failed ($LastExitCode)" -ForegroundColor Red @@ -16,12 +12,19 @@ else { Write-Host "`Ruff passed" -ForegroundColor Green } -Write-Host "`nRunning Pyright..." -$Env:PYRIGHT_PYTHON_FORCE_VERSION = 'latest' -npx pyright@latest src/ +Write-Host "`nRunning formatting..." +ruff format + +$pyrightVersion = 'latest' # Change this if latest has issues +Write-Host "`nRunning Pyright $pyrightVersion ..." +$Env:PYRIGHT_PYTHON_FORCE_VERSION = $pyrightVersion +npx -y pyright@$pyrightVersion src/ $exitCodes += $LastExitCode if ($LastExitCode -gt 0) { Write-Host "`Pyright failed ($LastExitCode)" -ForegroundColor Red + if ($pyrightVersion -eq 'latest') { + npx pyright@latest --version + } } else { Write-Host "`Pyright passed" -ForegroundColor Green diff --git a/scripts/python_build_from_source_linux.bash b/scripts/python_build_from_source_linux.bash new file mode 100644 index 00000000..7e9fb839 --- /dev/null +++ b/scripts/python_build_from_source_linux.bash @@ -0,0 +1,28 @@ +cd .. + +# Update package lists +sudo apt update + +# Install dependent libraries: +sudo apt install build-essential zlib1g-dev libncurses5-dev libgdbm-dev libnss3-dev libssl-dev libsqlite3-dev libreadline-dev libffi-dev curl libbz2-dev tk-dev + +# Download Python binary package: +wget https://www.python.org/ftp/python/3.11.10/Python-3.11.10.tgz + +# Unzip the package: +tar -xzf Python-3.11.10.tgz + +# Execute configure script +cd Python-3.11.10 +./configure --enable-optimizations --enable-shared + +# Build Python 3.11 +make -j 2 + +# Install Python 3.11 +sudo make install + +# Verify the installation +python3.11 -V + +echo "If Python version did not print, you may need to stop active processes" diff --git a/scripts/requirements-dev.txt b/scripts/requirements-dev.txt index 6d233d8e..51cec95d 100644 --- a/scripts/requirements-dev.txt +++ b/scripts/requirements-dev.txt @@ -12,17 +12,14 @@ -r requirements.txt # # Linters & Formatters -add-trailing-comma>=3.1.0 # Must match .pre-commit-config.yaml -autopep8>=2.0.4 # Must match .pre-commit-config.yaml -ruff>=0.1.7 # New checks # Must match .pre-commit-config.yaml +ruff>=0.8.5 # # Types -types-D3DShot ; sys_platform == 'win32' +scipy-stubs>=1.14.1.1 types-keyboard -types-Pillow types-psutil types-PyAutoGUI types-pyinstaller -types-pywin32 ; sys_platform == 'win32' -types-requests +types-python-xlib ; sys_platform == 'linux' +types-pywin32>=306.0.0.20240130 ; sys_platform == 'win32' types-toml diff --git a/scripts/requirements.txt b/scripts/requirements.txt index e01f9fcc..fc0d1726 100644 --- a/scripts/requirements.txt +++ b/scripts/requirements.txt @@ -3,30 +3,42 @@ # Read /docs/build%20instructions.md for more information on how to install, run and build the python code. # # Dependencies: -certifi -ImageHash>=4.3.1 ; python_version < '3.12' # Contains type information + setup as package not module # PyWavelets install broken on Python 3.12 git+https://github.com/boppreh/keyboard.git#egg=keyboard # Fix install on macos and linux-ci https://github.com/boppreh/keyboard/pull/568 -numpy>=1.26 # Python 3.12 support -opencv-python-headless>=4.8.1.78 # Typing fixes +Levenshtein>=0.25 +numpy>=2.1 # Python 3.13 support +opencv-python-headless>=4.10 # NumPy 2 support packaging -Pillow>=10.0 # Python 3.12 support -psutil>=5.9.6 # Python 3.12 fixes -PyAutoGUI +psutil>=6.0.0 # Python 3.13 support +# PyAutoGUI # See install.ps1 PyWinCtl>=0.0.42 # py.typed # When needed, dev builds can be found at https://download.qt.io/snapshots/ci/pyside/dev?C=M;O=D -PySide6-Essentials>=6.6.0 # Python 3.12 support -requests>=2.28.2 # charset_normalizer 3.x update -toml +PySide6-Essentials>=6.8.2 # Fixed typing issue with QMessageBox.warning +scipy>=1.14.1 # Python 3.13 support +tomli-w>=1.1.0 # Typing fixes typing-extensions>=4.4.0 # @override decorator support + # # Build and compile resources -pyinstaller>=5.13 # Python 3.12 support -pyinstaller-hooks-contrib>=2022.15 # charset-normalizer fix https://github.com/pyinstaller/pyinstaller-hooks-contrib/issues/534 +pyinstaller>=6.10.0 # Python 3.13 support + # # https://peps.python.org/pep-0508/#environment-markers # # Windows-only dependencies: pygrabber>=0.2 ; sys_platform == 'win32' # Completed types -pywin32>=301 ; sys_platform == 'win32' -winsdk>=1.0.0b10 ; sys_platform == 'win32' # Python 3.12 support -git+https://github.com/ranchen421/D3DShot.git#egg=D3DShot ; sys_platform == 'win32' # D3DShot from PyPI with Pillow>=7.2.0 will install 0.1.3 instead of 0.1.5 +pywin32>=307 ; sys_platform == 'win32' # Python 3.13 support +typed-D3DShot[numpy]>=1.0.1 ; sys_platform == 'win32' +winrt-Windows.Foundation>=2.2.0 ; sys_platform == 'win32' # Python 3.13 support +winrt-Windows.Graphics.Capture>=2.3.0 ; sys_platform == 'win32' # Python 3.13 support +winrt-Windows.Graphics.Capture.Interop>=2.3.0 ; sys_platform == 'win32' # Python 3.13 support +winrt-Windows.Graphics.DirectX>=2.3.0 ; sys_platform == 'win32' # Python 3.13 support +winrt-Windows.Graphics.DirectX.Direct3D11>=2.3.0 ; sys_platform == 'win32' # Python 3.13 support +winrt-Windows.Graphics.DirectX.Direct3D11.Interop>=2.3.0 ; sys_platform == 'win32' +winrt-Windows.Graphics>=2.2.0 ; sys_platform == 'win32' # Python 3.13 support +winrt-Windows.Graphics.Imaging>=2.3.0 ; sys_platform == 'win32' # Python 3.13 support + +# +# Linux-only dependencies +PyScreeze ; sys_platform == 'linux' +pillow>=11.0 ; sys_platform == 'linux' # Python 3.13 support # Necessary for PyScreeze. For unknown reasons it's not pulled in on CI +python-xlib ; sys_platform == 'linux' diff --git a/scripts/start.ps1 b/scripts/start.ps1 index 70d6fd8b..55657460 100644 --- a/scripts/start.ps1 +++ b/scripts/start.ps1 @@ -1,3 +1,4 @@ param ([string]$p1) & "$PSScriptRoot/compile_resources.ps1" -python "$PSScriptRoot/../src/AutoSplit.py" $p1 +$python = $IsWindows ? 'python' : 'python3' +&"$python" "$PSScriptRoot/../src/AutoSplit.py" $p1 diff --git a/src/AutoControlledThread.py b/src/AutoControlledThread.py index e61cb118..416d3529 100644 --- a/src/AutoControlledThread.py +++ b/src/AutoControlledThread.py @@ -24,6 +24,8 @@ def run(self): break except EOFError: continue + if line in self._autosplit_ref.settings_dict["screenshot_on"]: + self._autosplit_ref.screenshot_signal.emit() match line: # This is for use in a Development environment case "kill": diff --git a/src/AutoSplit.py b/src/AutoSplit.py index 3fd27f1b..a39a743f 100644 --- a/src/AutoSplit.py +++ b/src/AutoSplit.py @@ -1,14 +1,36 @@ #!/usr/bin/python3 import os -import signal import sys + +# Prevent PyAutoGUI and pywinctl from setting Process DPI Awareness, +# which Qt tries to do then throws warnings about it. +# The unittest workaround significantly increases +# build time, boot time and build size with PyInstaller. +# https://github.com/asweigart/pyautogui/issues/663#issuecomment-1296719464 +# QT doesn't call those from Python/ctypes, meaning we can stop other programs from setting it. +if sys.platform == "win32": + import ctypes + + # pyautogui._pyautogui_win.py + ctypes.windll.user32.SetProcessDPIAware = ( # pyright: ignore[reportAttributeAccessIssue] + lambda: None + ) + # pymonctl._pymonctl_win.py + # pywinbox._pywinbox_win.py + ctypes.windll.shcore.SetProcessDpiAwareness = ( # pyright: ignore[reportAttributeAccessIssue] + lambda _: None # pyright: ignore[reportUnknownLambdaType] + ) +if sys.platform == "linux": + # Fixes "undefined symbol: wl_proxy_marshal_flags": https://bugreports.qt.io/browse/QTBUG-114635 + os.environ.setdefault("QT_QPA_PLATFORM", "xcb") + +import signal from collections.abc import Callable from copy import deepcopy from time import time from types import FunctionType from typing import NoReturn -import certifi import cv2 from cv2.typing import MatLike from psutil import process_iter @@ -16,7 +38,6 @@ from PySide6.QtTest import QTest from PySide6.QtWidgets import QApplication, QFileDialog, QLabel, QMainWindow, QMessageBox from typing_extensions import override -from win32comext.shell import shell as shell32 import error_messages import user_profile @@ -24,7 +45,13 @@ from AutoSplitImage import START_KEYWORD, AutoSplitImage, ImageType from capture_method import CaptureMethodBase, CaptureMethodEnum from gen import about, design, settings, update_checker -from hotkeys import HOTKEYS, after_setting_hotkey, send_command +from hotkeys import ( + HOTKEYS, + KEYBOARD_GROUPS_ISSUE, + KEYBOARD_UINPUT_ISSUE, + after_setting_hotkey, + send_command, +) from menu_bar import ( about_qt, about_qt_for_python, @@ -35,27 +62,36 @@ open_update_checker, view_help, ) -from region_selection import align_region, select_region, select_window, validate_before_parsing -from split_parser import BELOW_FLAG, DUMMY_FLAG, PAUSE_FLAG, parse_and_validate_images +from region_selection import align_region, select_region, select_window +from split_parser import ( + BELOW_FLAG, + DUMMY_FLAG, + PAUSE_FLAG, + parse_and_validate_images, + validate_before_parsing, +) from user_profile import DEFAULT_PROFILE from utils import ( AUTOSPLIT_VERSION, BGRA_CHANNEL_COUNT, FROZEN, ONE_SECOND, + RUNNING_WAYLAND, auto_split_directory, decimal, flatten, + imwrite, is_valid_image, open_file, ) CHECK_FPS_ITERATIONS = 10 -# Needed when compiled, along with the custom hook-requests PyInstaller hook -os.environ["REQUESTS_CA_BUNDLE"] = certifi.where() -myappid = f"Toufool.AutoSplit.v{AUTOSPLIT_VERSION}" -shell32.SetCurrentProcessExplicitAppUserModelID(myappid) +if sys.platform == "win32": + from win32comext.shell import shell as shell32 + + myappid = f"Toufool.AutoSplit.v{AUTOSPLIT_VERSION}" + shell32.SetCurrentProcessExplicitAppUserModelID(myappid) class AutoSplit(QMainWindow, design.Ui_MainWindow): @@ -71,7 +107,7 @@ class AutoSplit(QMainWindow, design.Ui_MainWindow): screenshot_signal = QtCore.Signal() after_setting_hotkey_signal = QtCore.Signal() update_checker_widget_signal = QtCore.Signal(str, bool) - load_start_image_signal = QtCore.Signal(bool, bool) + reload_start_image_signal = QtCore.Signal(bool, bool) # Use this signal when trying to show an error from outside the main thread show_error_signal = QtCore.Signal(FunctionType) @@ -127,8 +163,8 @@ def _show_error_signal_slot(error_message_box: Callable[..., object]): self.setupUi(self) self.setWindowTitle( - f"AutoSplit v{AUTOSPLIT_VERSION}" + - (" (externally controlled)" if self.is_auto_controlled else ""), + f"AutoSplit v{AUTOSPLIT_VERSION}" + + (" (externally controlled)" if self.is_auto_controlled else "") ) # Hotkeys need to be initialized to be passed as thread arguments in hotkeys.py @@ -173,13 +209,20 @@ def _show_error_signal_slot(error_message_box: Callable[..., object]): self.reset_button.clicked.connect(self.reset) self.skip_split_button.clicked.connect(self.skip_split) self.undo_split_button.clicked.connect(self.undo_split) - self.next_image_button.clicked.connect(lambda: self.skip_split(True)) - self.previous_image_button.clicked.connect(lambda: self.undo_split(True)) + self.next_image_button.clicked.connect(lambda: self.skip_split(navigate_image_only=True)) + self.previous_image_button.clicked.connect( + lambda: self.undo_split(navigate_image_only=True) + ) self.align_region_button.clicked.connect(lambda: align_region(self)) self.select_window_button.clicked.connect(lambda: select_window(self)) - self.reload_start_image_button.clicked.connect(lambda: self.__load_start_image(True, True)) + self.reload_start_image_button.clicked.connect( + lambda: self.__reload_start_image(started_by_button=True) + ) self.action_check_for_updates_on_open.changed.connect( - lambda: user_profile.set_check_for_updates_on_open(self, self.action_check_for_updates_on_open.isChecked()), + lambda: user_profile.set_check_for_updates_on_open( + self, + self.action_check_for_updates_on_open.isChecked(), + ), ) # update x, y, width, and height when changing the value of these spinbox's are changed @@ -192,12 +235,11 @@ def _show_error_signal_slot(error_message_box: Callable[..., object]): self.after_setting_hotkey_signal.connect(lambda: after_setting_hotkey(self)) self.start_auto_splitter_signal.connect(self.__auto_splitter) - def _update_checker_widget_signal_slot(latest_version: str, check_on_open: bool): - return open_update_checker(self, latest_version, check_on_open) + def _update_checker_widget_signal_slot(latest_version: str, check_on_open: bool): # noqa: FBT001 + return open_update_checker(self, latest_version, check_on_open=check_on_open) self.update_checker_widget_signal.connect(_update_checker_widget_signal_slot) - - self.load_start_image_signal.connect(self.__load_start_image) + self.reload_start_image_signal.connect(self.__reload_start_image) self.reset_signal.connect(self.reset) self.skip_split_signal.connect(self.skip_split) self.undo_split_signal.connect(self.undo_split) @@ -205,11 +247,13 @@ def _update_checker_widget_signal_slot(latest_version: str, check_on_open: bool) self.screenshot_signal.connect(self.__take_screenshot) # live image checkbox - self.timer_live_image.timeout.connect(lambda: self.__update_live_image_details(None, True)) + self.timer_live_image.timeout.connect( + lambda: self.__update_live_image_details(None, called_from_timer=True) + ) self.timer_live_image.start(int(ONE_SECOND / self.settings_dict["fps_limit"])) # Automatic timer start - self.timer_start_image.timeout.connect(self.__start_image_function) + self.timer_start_image.timeout.connect(self.__compare_capture_for_auto_start) self.show() @@ -242,9 +286,14 @@ def __browse(self): # set the split image folder line to the directory text self.settings_dict["split_image_directory"] = new_split_image_directory self.split_image_folder_input.setText(f"{new_split_image_directory}/") - self.load_start_image_signal.emit(False, True) - - def __update_live_image_details(self, capture: MatLike | None, called_from_timer: bool = False): + self.reload_start_image_signal.emit(False, True) + + def __update_live_image_details( + self, + capture: MatLike | None, + *, + called_from_timer: bool = False, + ): # HACK: Since this is also called in __get_capture_for_comparison, # we don't need to update anything if the app is running if called_from_timer: @@ -267,14 +316,31 @@ def __update_live_image_details(self, capture: MatLike | None, called_from_timer else: set_preview_image(self.live_image, capture) - def __load_start_image(self, started_by_button: bool = False, wait_for_delay: bool = True): - """Not thread safe (if triggered by LiveSplit for example). Use `load_start_image_signal.emit` instead.""" + def __reload_start_image(self, *, started_by_button: bool = False, wait_for_delay: bool = True): + """ + Not thread safe (if triggered by LiveSplit for example). + Use `reload_start_image_signal.emit` instead. + + 1. Stops the automated start check and clear the current Split Image. + 2. Reloads the Start Image from disk and validate. + 3. If validation passed: + - + - Updates the shown Split Image and Start Image text + - Reinitialise values + - Restart the automated start check + """ + if self.is_running: + raise RuntimeError("Start Image should never be reloaded whilst running!") + self.timer_start_image.stop() self.current_image_file_label.setText("-") self.start_image_status_value_label.setText("not found") set_preview_image(self.current_split_image, None) - if not (validate_before_parsing(self, started_by_button) and parse_and_validate_images(self)): + if not ( + validate_before_parsing(self, show_error=started_by_button) + and parse_and_validate_images(self) + ): QApplication.processEvents() return @@ -284,8 +350,6 @@ def __load_start_image(self, started_by_button: bool = False, wait_for_delay: bo QApplication.processEvents() return - self.split_image_number = 0 - if not wait_for_delay and self.start_image.get_pause_time(self) > 0: self.start_image_status_value_label.setText("paused") self.table_current_image_highest_label.setText("-") @@ -294,16 +358,18 @@ def __load_start_image(self, started_by_button: bool = False, wait_for_delay: bo self.start_image_status_value_label.setText("ready") self.__update_split_image(self.start_image) + self.split_image_number = 0 self.highest_similarity = 0.0 self.reset_highest_similarity = 0.0 self.split_below_threshold = False - self.timer_start_image.start(int(ONE_SECOND / self.settings_dict["fps_limit"])) + + self.timer_start_image.start(int(ONE_SECOND / self.start_image.get_fps_limit(self))) QApplication.processEvents() - def __start_image_function(self): + def __compare_capture_for_auto_start(self): if not self.start_image: - return + raise ValueError("There are no Start Image. How did we even get here?") self.start_image_status_value_label.setText("ready") self.__update_split_image(self.start_image) @@ -313,27 +379,29 @@ def __start_image_function(self): start_image_similarity = self.start_image.compare_with_capture(self, capture) # If the similarity becomes higher than highest similarity, set it as such. - if start_image_similarity > self.highest_similarity: - self.highest_similarity = start_image_similarity + self.highest_similarity = max(start_image_similarity, self.highest_similarity) self.table_current_image_live_label.setText(decimal(start_image_similarity)) self.table_current_image_highest_label.setText(decimal(self.highest_similarity)) self.table_current_image_threshold_label.setText(decimal(start_image_threshold)) - # If the {b} flag is set, let similarity go above threshold first, then split on similarity below threshold + # If the {b} flag is set, let similarity go above threshold first, + # then split on similarity below threshold # Otherwise just split when similarity goes above threshold # TODO: Abstract with similar check in split image below_flag = self.start_image.check_flag(BELOW_FLAG) - # Negative means belove threshold, positive means above + # Negative means below threshold, positive means above similarity_diff = start_image_similarity - start_image_threshold if below_flag and not self.split_below_threshold and similarity_diff >= 0: self.split_below_threshold = True return - if ( - (below_flag and self.split_below_threshold and similarity_diff < 0 and is_valid_image(capture)) # noqa: PLR0916 # See above TODO - or (not below_flag and similarity_diff >= 0) - ): + if ( # noqa: PLR0916 # See above TODO + below_flag + and self.split_below_threshold + and similarity_diff < 0 + and is_valid_image(capture) + ) or (not below_flag and similarity_diff >= 0): self.timer_start_image.stop() self.split_below_threshold = False @@ -347,7 +415,7 @@ def __start_image_function(self): while time_delta < start_delay: delay_time_left = start_delay - time_delta self.current_split_image.setText( - f"Delayed Before Starting:\n {seconds_remaining_text(delay_time_left)}", + f"Delayed Before Starting:\n {seconds_remaining_text(delay_time_left)}" ) # Wait 0.1s. Doesn't need to be shorter as we only show 1 decimal QTest.qWait(100) @@ -371,7 +439,19 @@ def __update_height(self): self.settings_dict["capture_region"]["height"] = self.height_spinbox.value() def __take_screenshot(self): - if not validate_before_parsing(self, check_empty_directory=False): + if not self.capture_method.check_selected_region_exists(): + error_messages.region() + return + + screenshot_directory = ( + self.settings_dict["screenshot_directory"] + or self.settings_dict["split_image_directory"] + ) + if not screenshot_directory: + error_messages.split_image_directory() + return + if not os.path.exists(screenshot_directory): + error_messages.invalid_directory(screenshot_directory) return # Check if file exists and rename it if it does. @@ -380,7 +460,7 @@ def __take_screenshot(self): screenshot_index = 1 while True: screenshot_path = os.path.join( - self.settings_dict["screenshot_directory"] or self.settings_dict["split_image_directory"], + screenshot_directory, f"{screenshot_index:03}_SplitImage.png", ) if not os.path.exists(screenshot_path): @@ -394,7 +474,7 @@ def __take_screenshot(self): return # Save and open image - cv2.imwrite(screenshot_path, capture) + imwrite(screenshot_path, capture) if self.settings_dict["open_screenshot"]: open_file(screenshot_path) @@ -435,7 +515,7 @@ def __is_current_split_out_of_range(self): or self.split_image_number > len(self.split_images_and_loop_number) - 1 ) - def undo_split(self, navigate_image_only: bool = False): + def undo_split(self, *, navigate_image_only: bool = False): """Undo Split" and "Prev. Img." buttons connect to here.""" # Can't undo until timer is started # or Undoing past the first image @@ -459,14 +539,18 @@ def undo_split(self, navigate_image_only: bool = False): if not navigate_image_only: send_command(self, "undo") - def skip_split(self, navigate_image_only: bool = False): + def skip_split(self, *, navigate_image_only: bool = False): """Skip Split" and "Next Img." buttons connect to here.""" # Can't skip or split until timer is started # or Splitting/skipping when there are no images left if ( not self.is_running or "Delayed Split" in self.current_split_image.text() - or not (self.skip_split_button.isEnabled() or self.is_auto_controlled or navigate_image_only) + or not ( + self.skip_split_button.isEnabled() # fmt: skip + or self.is_auto_controlled + or navigate_image_only + ) or self.__is_current_split_out_of_range() ): return @@ -494,10 +578,12 @@ def reset(self): """ self.is_running = False - # Functions for the hotkeys to return to the main thread from signals and start their corresponding functions + # Functions for the hotkeys to return to the main thread from signals + # and start their corresponding functions def start_auto_splitter(self): - # If the auto splitter is already running or the button is disabled, don't emit the signal to start it. - if ( + # If the auto splitter is already running or the button is disabled, + # don't emit the signal to start it. + if ( # fmt: skip self.is_running or (not self.start_auto_splitter_button.isEnabled() and not self.is_auto_controlled) ): @@ -512,13 +598,13 @@ def start_auto_splitter(self): def __check_for_reset_state_update_ui(self): """Check if AutoSplit is started, if not then update the GUI.""" if not self.is_running: - self.gui_changes_on_reset(True) + self.gui_changes_on_reset(safe_to_reload_start_image=True) return True return False - def __auto_splitter(self): # noqa: PLR0912,PLR0915 + def __auto_splitter(self): # noqa: C901,PLR0912,PLR0915 if not self.settings_dict["split_hotkey"] and not self.is_auto_controlled: - self.gui_changes_on_reset(True) + self.gui_changes_on_reset(safe_to_reload_start_image=True) error_messages.split_hotkey() return @@ -526,18 +612,18 @@ def __auto_splitter(self): # noqa: PLR0912,PLR0915 self.run_start_time = time() if not (validate_before_parsing(self) and parse_and_validate_images(self)): - # `safe_to_reload_start_image: bool = False` because __load_start_image also does this check, + # `safe_to_reload_start_image: bool = False` + # because __reload_start_image also does this check, # we don't want to double a Start/Reset Image error message - self.gui_changes_on_reset(False) + self.gui_changes_on_reset() return # Construct a list of images + loop count tuples. self.split_images_and_loop_number = list( flatten( ((split_image, i + 1) for i in range(split_image.loops)) - for split_image - in self.split_images - ), + for split_image in self.split_images + ) ) # Construct groups of splits @@ -571,7 +657,7 @@ def __auto_splitter(self): # noqa: PLR0912,PLR0915 while self.split_image_number < number_of_split_images: # Check if we are not waiting for the split delay to send the key press if self.waiting_for_split_delay: - time_millis = int(round(time() * ONE_SECOND)) + time_millis = round(time() * ONE_SECOND) if time_millis < split_time: QApplication.processEvents() continue @@ -604,7 +690,8 @@ def __auto_splitter(self): # noqa: PLR0912,PLR0915 button.setEnabled(False) self.current_image_file_label.clear() - # check for reset while delayed and display a counter of the remaining split delay time + # check for reset while delayed and + # display a counter of the remaining split delay time if self.__pause_loop(split_delay, "Delayed Split:"): return @@ -618,22 +705,30 @@ def __auto_splitter(self): # noqa: PLR0912,PLR0915 # if loop check box is checked and its the last split, go to first split. # else go to the next split image. - if self.settings_dict["loop_splits"] and self.split_image_number == number_of_split_images - 1: + if ( + self.settings_dict["loop_splits"] + and self.split_image_number == number_of_split_images - 1 + ): self.split_image_number = 0 else: self.split_image_number += 1 # If its not the last split image, pause for the amount set by the user # A pause loop to check if the user presses skip split, undo split, or reset here. - # Also updates the current split image text, counting down the time until the next split image + # Also updates the current split image text, + # counting down the time until the next split image if self.__pause_loop(self.split_image.get_pause_time(self), "None (Paused)."): return # loop breaks to here when the last image splits self.is_running = False - self.gui_changes_on_reset(True) + self.gui_changes_on_reset(safe_to_reload_start_image=True) - def __similarity_threshold_loop(self, number_of_split_images: int, dummy_splits_array: list[bool]): + def __similarity_threshold_loop( + self, + number_of_split_images: int, + dummy_splits_array: list[bool], + ): """ Wait until the similarity threshold is met. @@ -656,8 +751,7 @@ def __similarity_threshold_loop(self, number_of_split_images: int, dummy_splits_ self.table_current_image_live_label.setText(decimal(similarity)) # if the similarity becomes higher than highest similarity, set it as such. - if similarity > self.highest_similarity: - self.highest_similarity = similarity + self.highest_similarity = max(similarity, self.highest_similarity) # show live highest similarity if the checkbox is checked self.table_current_image_highest_label.setText(decimal(self.highest_similarity)) @@ -667,13 +761,16 @@ def __similarity_threshold_loop(self, number_of_split_images: int, dummy_splits_ self.next_image_button.setEnabled(self.split_image_number != number_of_split_images - 1) self.previous_image_button.setEnabled(self.split_image_number != 0) if not self.is_auto_controlled: - # If its the last non-dummy split image and last loop number, disable the skip split button - self.skip_split_button.setEnabled(dummy_splits_array[self.split_image_number :].count(False) > 1) + # If its the last non-dummy split image and last loop number, + # disable the skip split button + self.skip_split_button.setEnabled( + dummy_splits_array[self.split_image_number :].count(False) > 1 + ) self.undo_split_button.setEnabled(self.split_image_number != 0) QApplication.processEvents() # Limit the number of time the comparison runs to reduce cpu usage - frame_interval = 1 / self.settings_dict["fps_limit"] + frame_interval = 1 / self.split_image.get_fps_limit(self) # Use a time delta to have a consistant check interval wait_delta_ms = int((frame_interval - (time() - start) % frame_interval) * ONE_SECOND) @@ -728,7 +825,9 @@ def __pause_loop(self, stop_time: float, message: str): ): break - self.current_split_image.setText(f"{message} {seconds_remaining_text(stop_time - time_delta)}") + self.current_split_image.setText( + f"{message} {seconds_remaining_text(stop_time - time_delta)}" + ) QTest.qWait(1) return False @@ -755,7 +854,7 @@ def gui_changes_on_start(self): QApplication.processEvents() - def gui_changes_on_reset(self, safe_to_reload_start_image: bool = False): + def gui_changes_on_reset(self, *, safe_to_reload_start_image: bool = False): self.start_auto_splitter_button.setText("Start Auto Splitter") self.image_loop_value_label.setText("N/A") self.current_split_image.clear() @@ -785,7 +884,7 @@ def gui_changes_on_reset(self, safe_to_reload_start_image: bool = False): QApplication.processEvents() if safe_to_reload_start_image: - self.load_start_image_signal.emit(False, False) + self.reload_start_image_signal.emit(False, False) def __get_capture_for_comparison(self): """Grab capture region and resize for comparison.""" @@ -802,7 +901,9 @@ def __get_capture_for_comparison(self): if self.settings_dict["capture_method"] == CaptureMethodEnum.BITBLT: message += "\n(captured window may be incompatible with BitBlt)" self.live_image.setText(message) - recovered = self.capture_method.recover_window(self.settings_dict["captured_window_title"]) + recovered = self.capture_method.recover_window( + self.settings_dict["captured_window_title"] + ) if recovered: capture = self.capture_method.get_frame() @@ -825,9 +926,10 @@ def __reset_if_should(self, capture: MatLike | None): self.table_reset_image_live_label.setText("paused") else: should_reset = similarity >= threshold - if similarity > self.reset_highest_similarity: - self.reset_highest_similarity = similarity - self.table_reset_image_highest_label.setText(decimal(self.reset_highest_similarity)) + self.reset_highest_similarity = max(similarity, self.reset_highest_similarity) + self.table_reset_image_highest_label.setText( + decimal(self.reset_highest_similarity) + ) self.table_reset_image_live_label.setText(decimal(similarity)) self.table_reset_image_threshold_label.setText(decimal(threshold)) @@ -856,12 +958,21 @@ def __update_split_image(self, specific_image: AutoSplitImage | None = None): return # Get split image - self.split_image = specific_image or self.split_images_and_loop_number[0 + self.split_image_number][0] - if is_valid_image(self.split_image.byte_array): + self.split_image = ( + specific_image # fmt: skip + or self.split_images_and_loop_number[0 + self.split_image_number][0] + ) + if self.split_image.is_ocr: + # TODO: test if setText clears a set image + text = "\nor\n".join(self.split_image.texts) + self.current_split_image.setText(f"Looking for OCR text:\n{text}") + elif is_valid_image(self.split_image.byte_array): set_preview_image(self.current_split_image, self.split_image.byte_array) self.current_image_file_label.setText(self.split_image.filename) - self.table_current_image_threshold_label.setText(decimal(self.split_image.get_similarity_threshold(self))) + self.table_current_image_threshold_label.setText( + decimal(self.split_image.get_similarity_threshold(self)) + ) # Set Image Loop number if specific_image and specific_image.image_type == ImageType.START: @@ -904,7 +1015,11 @@ def exit_program() -> NoReturn: self, "AutoSplit", f"Do you want to save changes made to settings file {settings_file_name}?", - QMessageBox.StandardButton.Yes | QMessageBox.StandardButton.No | QMessageBox.StandardButton.Cancel, + ( + QMessageBox.StandardButton.Yes + | QMessageBox.StandardButton.No + | QMessageBox.StandardButton.Cancel + ), ) if warning is QMessageBox.StandardButton.Yes: @@ -932,19 +1047,13 @@ def set_preview_image(qlabel: QLabel, image: MatLike | None): image_format = QtGui.QImage.Format.Format_BGR888 capture = image - qimage = QtGui.QImage( - capture.data, # pyright: ignore[reportGeneralTypeIssues] # https://bugreports.qt.io/browse/PYSIDE-2476 - width, - height, - width * channels, - image_format, - ) + qimage = QtGui.QImage(capture.data, width, height, width * channels, image_format) qlabel.setPixmap( QtGui.QPixmap(qimage).scaled( qlabel.size(), QtCore.Qt.AspectRatioMode.IgnoreAspectRatio, QtCore.Qt.TransformationMode.SmoothTransformation, - ), + ) ) @@ -952,6 +1061,7 @@ def seconds_remaining_text(seconds: float): return f"{seconds:.1f} second{'' if 0 < seconds <= 1 else 's'} remaining" +# TODO: Add Linux support def is_already_open(): # When running directly in Python, any AutoSplit process means it's already open # When bundled, we must ignore itself and the splash screen @@ -976,6 +1086,12 @@ def main(): if is_already_open(): error_messages.already_open() + if KEYBOARD_GROUPS_ISSUE: + error_messages.linux_groups() + if KEYBOARD_UINPUT_ISSUE: + error_messages.linux_uinput() + if RUNNING_WAYLAND: + error_messages.linux_wayland() AutoSplit() diff --git a/src/AutoSplitImage.py b/src/AutoSplitImage.py index d4176560..3e69a27b 100644 --- a/src/AutoSplitImage.py +++ b/src/AutoSplitImage.py @@ -1,4 +1,5 @@ import os +import tomllib from enum import IntEnum, auto from math import sqrt from typing import TYPE_CHECKING @@ -8,8 +9,20 @@ from cv2.typing import MatLike import error_messages -from compare import check_if_image_has_transparency, get_comparison_method_by_index -from utils import BGR_CHANNEL_COUNT, MAXBYTE, ColorChannel, ImageShape, is_valid_image +from compare import ( + check_if_image_has_transparency, + extract_and_compare_text, + get_comparison_method_by_index, +) +from utils import ( + BGR_CHANNEL_COUNT, + MAXBYTE, + TESSERACT_PATH, + ColorChannel, + ImageShape, + imread, + is_valid_image, +) if TYPE_CHECKING: from AutoSplit import AutoSplit @@ -20,8 +33,8 @@ COMPARISON_RESIZE_HEIGHT = 240 COMPARISON_RESIZE = (COMPARISON_RESIZE_WIDTH, COMPARISON_RESIZE_HEIGHT) COMPARISON_RESIZE_AREA = COMPARISON_RESIZE_WIDTH * COMPARISON_RESIZE_HEIGHT -MASK_LOWER_BOUND = np.array([0, 0, 0, 1], dtype="uint8") -MASK_UPPER_BOUND = np.array([MAXBYTE, MAXBYTE, MAXBYTE, MAXBYTE], dtype="uint8") +MASK_LOWER_BOUND = np.array([0, 0, 0, 1], dtype=np.uint8) +MASK_UPPER_BOUND = np.array([MAXBYTE, MAXBYTE, MAXBYTE, MAXBYTE], dtype=np.uint8) START_KEYWORD = "start_auto_splitter" RESET_KEYWORD = "reset" @@ -33,20 +46,26 @@ class ImageType(IntEnum): class AutoSplitImage: - path: str - filename: str - flags: int - loops: int image_type: ImageType byte_array: MatLike | None = None mask: MatLike | None = None # This value is internal, check for mask instead _has_transparency = False - # These values should be overriden by some Defaults if None. Use getters instead + # These values should be overridden by some Defaults if None. Use getters instead __delay_time: float | None = None __comparison_method: int | None = None __pause_time: float | None = None __similarity_threshold: float | None = None + __rect = (0, 0, 1, 1) + __fps_limit = 0 + + @property + def is_ocr(self): + """ + Whether a "split image" is actually for Optical Text Recognition + based on whether there's any text strings to search for. + """ + return bool(self.texts) def get_delay_time(self, default: "AutoSplit | int"): """Get image's delay time or fallback to the default value from spinbox.""" @@ -80,6 +99,12 @@ def get_similarity_threshold(self, default: "AutoSplit | float"): return default return default.settings_dict["default_similarity_threshold"] + def get_fps_limit(self, default: "AutoSplit"): + """Get image's fps limit or fallback to the default value from spinbox.""" + if self.__fps_limit != 0: + return self.__fps_limit + return default.settings_dict["fps_limit"] + def __init__(self, path: str): self.path = path self.filename = os.path.split(path)[-1].lower() @@ -89,7 +114,12 @@ def __init__(self, path: str): self.__comparison_method = comparison_method_from_filename(self.filename) self.__pause_time = pause_from_filename(self.filename) self.__similarity_threshold = threshold_from_filename(self.filename) - self.__read_image_bytes(path) + self.texts: list[str] = [] + self.__ocr_comparison_methods: list[int] = [] + if path.endswith("txt"): + self.__parse_text_file(path) + else: + self.__read_image_bytes(path) if START_KEYWORD in self.filename: self.image_type = ImageType.START @@ -98,8 +128,33 @@ def __init__(self, path: str): else: self.image_type = ImageType.SPLIT + def __parse_text_file(self, path: str): + if not TESSERACT_PATH: + error_messages.tesseract_missing(path) + return + + with open(path, mode="rb") as f: + data = tomllib.load(f) + + self.texts = [text.lower().strip() for text in data["texts"]] + self.__rect = (data["left"], data["right"], data["top"], data["bottom"]) + self.__ocr_comparison_methods = data.get("methods", [0]) + self.__fps_limit = data.get("fps_limit", 0) + + if self.__validate_ocr(): + error_messages.wrong_ocr_values(path) + return + + def __validate_ocr(self): + values = [*self.__rect, *self.__ocr_comparison_methods, self.__fps_limit] + return ( + all(value >= 0 for value in values) # Check for invalid negative values + and self.__rect[1] > self.__rect[0] + and self.__rect[3] > self.__rect[2] + ) + def __read_image_bytes(self, path: str): - image = cv2.imread(path, cv2.IMREAD_UNCHANGED) + image = imread(path, cv2.IMREAD_UNCHANGED) if not is_valid_image(image): self.byte_array = None error_messages.image_type(path) @@ -110,9 +165,12 @@ def __read_image_bytes(self, path: str): if self._has_transparency: # Adaptively determine the target size according to # the number of nonzero elements in the alpha channel of the split image. - # This may result in images bigger than COMPARISON_RESIZE if there's plenty of transparency. + # This may result in images bigger than COMPARISON_RESIZE if there's plenty of transparency. # noqa: E501 # Which wouldn't incur any performance loss in methods where masked regions are ignored. - scale = min(1, sqrt(COMPARISON_RESIZE_AREA / cv2.countNonZero(image[:, :, ColorChannel.Alpha]))) + scale = min( + 1, + sqrt(COMPARISON_RESIZE_AREA / cv2.countNonZero(image[:, :, ColorChannel.Alpha])), + ) image = cv2.resize( image, @@ -135,15 +193,31 @@ def __read_image_bytes(self, path: str): def check_flag(self, flag: int): return self.flags & flag == flag - def compare_with_capture( - self, - default: "AutoSplit | int", - capture: MatLike | None, - ): - """Compare image with capture using image's comparison method. Falls back to combobox.""" - if not is_valid_image(self.byte_array) or not is_valid_image(capture): + def compare_with_capture(self, default: "AutoSplit | int", capture: MatLike | None): + """ + Compare image with capture using image's comparison method. Falls back to combobox. + + For OCR text files: + extract image text from rectangle position and compare it with the expected string. + """ + if not is_valid_image(capture): return 0.0 - resized_capture = cv2.resize(capture, self.byte_array.shape[1::-1]) + + if self.is_ocr: + return extract_and_compare_text( + capture[ + self.__rect[2] : self.__rect[3], + self.__rect[0] : self.__rect[1], + ], + self.texts, + self.__ocr_comparison_methods, + ) + + if not is_valid_image(self.byte_array): + return 0.0 + resized_capture = cv2.resize( + capture, self.byte_array.shape[1::-1], interpolation=cv2.INTER_NEAREST + ) return get_comparison_method_by_index( self.__get_comparison_method_index(default), diff --git a/src/capture_method/BitBltCaptureMethod.py b/src/capture_method/BitBltCaptureMethod.py index 29c3ddb7..3b2fa049 100644 --- a/src/capture_method/BitBltCaptureMethod.py +++ b/src/capture_method/BitBltCaptureMethod.py @@ -1,3 +1,7 @@ +import sys + +if sys.platform != "win32": + raise OSError import ctypes import numpy as np @@ -26,19 +30,19 @@ def is_blank(image: MatLike): class BitBltCaptureMethod(CaptureMethodBase): name = "BitBlt" short_description = "fastest, least compatible" - description = ( - "\nThe best option when compatible. But it cannot properly record " - + "\nOpenGL, Hardware Accelerated or Exclusive Fullscreen windows. " - + "\nThe smaller the selected region, the more efficient it is. " - ) + description = """ +The best option when compatible. But it cannot properly record +OpenGL, Hardware Accelerated or Exclusive Fullscreen windows. +The smaller the selected region, the more efficient it is.""" _render_full_content = False @override def get_frame(self) -> MatLike | None: selection = self._autosplit_ref.settings_dict["capture_region"] + width = selection["width"] + height = selection["height"] hwnd = self._autosplit_ref.hwnd - image: MatLike | None = None if not self.check_selected_region_exists(): return None @@ -57,11 +61,11 @@ def get_frame(self) -> MatLike | None: compatible_dc = dc_object.CreateCompatibleDC() bitmap = win32ui.CreateBitmap() - bitmap.CreateCompatibleBitmap(dc_object, selection["width"], selection["height"]) + bitmap.CreateCompatibleBitmap(dc_object, width, height) compatible_dc.SelectObject(bitmap) compatible_dc.BitBlt( (0, 0), - (selection["width"], selection["height"]), + (width, height), dc_object, (selection["x"] + left_bounds, selection["y"] + top_bounds), win32con.SRCCOPY, @@ -74,7 +78,7 @@ def get_frame(self) -> MatLike | None: if is_blank(image): image = None else: - image.shape = (selection["height"], selection["width"], BGRA_CHANNEL_COUNT) + image.shape = (height, width, BGRA_CHANNEL_COUNT) # Cleanup DC and handle try_delete_dc(dc_object) diff --git a/src/capture_method/CaptureMethodBase.py b/src/capture_method/CaptureMethodBase.py index d03765f7..73cbb9ad 100644 --- a/src/capture_method/CaptureMethodBase.py +++ b/src/capture_method/CaptureMethodBase.py @@ -16,15 +16,14 @@ class CaptureMethodBase: _autosplit_ref: "AutoSplit" def __init__(self, autosplit: "AutoSplit"): - # Some capture methods don't need an initialization process self._autosplit_ref = autosplit def reinitialize(self): self.close() - self.__init__(self._autosplit_ref) # type: ignore[misc] + self.__init__(self._autosplit_ref) # type: ignore[misc] # noqa: PLC2801 def close(self): - # Some capture methods don't need an initialization process + # Some capture methods don't need any cleanup pass def get_frame(self) -> MatLike | None: # noqa: PLR6301 diff --git a/src/capture_method/DesktopDuplicationCaptureMethod.py b/src/capture_method/DesktopDuplicationCaptureMethod.py index 65ce19ec..e13ff11c 100644 --- a/src/capture_method/DesktopDuplicationCaptureMethod.py +++ b/src/capture_method/DesktopDuplicationCaptureMethod.py @@ -1,11 +1,14 @@ -from typing import TYPE_CHECKING, cast +import sys + +if sys.platform != "win32": + raise OSError +from typing import TYPE_CHECKING import cv2 import d3dshot import win32api import win32con import win32gui -from cv2.typing import MatLike from typing_extensions import override from capture_method.BitBltCaptureMethod import BitBltCaptureMethod @@ -18,15 +21,15 @@ class DesktopDuplicationCaptureMethod(BitBltCaptureMethod): name = "Direct3D Desktop Duplication" short_description = "slower, bound to display" - description = ( - "\nDuplicates the desktop using Direct3D. " - + "\nIt can record OpenGL and Hardware Accelerated windows. " - + "\nAbout 10-15x slower than BitBlt. Not affected by window size. " - + "\nOverlapping windows will show up and can't record across displays. " - + "\nThis option may not be available for hybrid GPU laptops, " - + "\nsee D3DDD-Note-Laptops.md for a solution. " - + f"\nhttps://www.github.com/{GITHUB_REPOSITORY}#capture-method " - ) + description = f""" +Duplicates the desktop using Direct3D. +It can record OpenGL and Hardware Accelerated windows. +Up to 15x slower than BitBlt for tiny regions. Not affected by window size. +Limited by the target window and monitor's refresh rate. +Overlapping windows will show up and can't record across displays. +This option may not be available for hybrid GPU laptops, +see D3DDD-Note-Laptops.md for a solution. +https://www.github.com/{GITHUB_REPOSITORY}#capture-method""" def __init__(self, autosplit: "AutoSplit"): super().__init__(autosplit) @@ -43,9 +46,9 @@ def get_frame(self): left_bounds, top_bounds, *_ = get_window_bounds(hwnd) self.desktop_duplication.display = next( - display for display - in self.desktop_duplication.displays - if display.hmonitor == hmonitor + display + for display in self.desktop_duplication.displays + if display.hmonitor == hmonitor # fmt: skip ) offset_x, offset_y, *_ = win32gui.GetWindowRect(hwnd) offset_x -= self.desktop_duplication.display.position["left"] @@ -54,10 +57,7 @@ def get_frame(self): top = selection["y"] + offset_y + top_bounds right = selection["width"] + left bottom = selection["height"] + top - screenshot = cast( - MatLike | None, - self.desktop_duplication.screenshot((left, top, right, bottom)), - ) + screenshot = self.desktop_duplication.screenshot((left, top, right, bottom)) if screenshot is None: return None return cv2.cvtColor(screenshot, cv2.COLOR_RGB2BGRA) diff --git a/src/capture_method/ForceFullContentRenderingCaptureMethod.py b/src/capture_method/ForceFullContentRenderingCaptureMethod.py index ebc4cc40..c028d3a9 100644 --- a/src/capture_method/ForceFullContentRenderingCaptureMethod.py +++ b/src/capture_method/ForceFullContentRenderingCaptureMethod.py @@ -1,13 +1,17 @@ +import sys + +if sys.platform != "win32": + raise OSError from capture_method.BitBltCaptureMethod import BitBltCaptureMethod class ForceFullContentRenderingCaptureMethod(BitBltCaptureMethod): name = "Force Full Content Rendering" short_description = "very slow, can affect rendering" - description = ( - "\nUses BitBlt behind the scene, but passes a special flag " - + "\nto PrintWindow to force rendering the entire desktop. " - + "\nAbout 10-15x slower than BitBlt based on original window size " - + "\nand can mess up some applications' rendering pipelines. " - ) + description = """ +Uses BitBlt behind the scene, but passes a special flag +to PrintWindow to force rendering the entire desktop. +About 10-15x slower than BitBlt based on original window size +and can mess up some applications' rendering pipelines.""" + _render_full_content = True diff --git a/src/capture_method/Screenshot using QT attempt.py b/src/capture_method/Screenshot using QT attempt.py new file mode 100644 index 00000000..abb3d3af --- /dev/null +++ b/src/capture_method/Screenshot using QT attempt.py @@ -0,0 +1,36 @@ +# ruff: noqa: RET504 +import sys + +if sys.platform != "linux": + raise OSError +from typing import cast + +import numpy as np +from cv2.typing import MatLike +from PySide6.QtCore import QBuffer, QIODeviceBase +from PySide6.QtGui import QGuiApplication +from typing_extensions import override + +from capture_method.CaptureMethodBase import CaptureMethodBase + + +class QtCaptureMethod(CaptureMethodBase): + _render_full_content = False + + @override + def get_frame(self): + if not self.check_selected_region_exists(): + return None + + buffer = QBuffer() + buffer.open(QIODeviceBase.OpenModeFlag.ReadWrite) + winid = self._autosplit_ref.winId() + test = QGuiApplication.primaryScreen().grabWindow(winid, 0, 0, 200, 200) + image = test.toImage() + b = image.bits() + # sip.voidptr must know size to support python buffer interface + # b.setsize(200 * 200 * 3) + frame = np.frombuffer(cast(MatLike, b), np.uint8).reshape((200, 200, 3)) + + # frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) + return frame diff --git a/src/capture_method/ScrotCaptureMethod.py b/src/capture_method/ScrotCaptureMethod.py new file mode 100644 index 00000000..20ed9375 --- /dev/null +++ b/src/capture_method/ScrotCaptureMethod.py @@ -0,0 +1,59 @@ +import sys + +if sys.platform != "linux": + raise OSError + +import cv2 +import numpy as np +import pyscreeze +from pywinctl import getWindowsWithTitle +from typing_extensions import override +from Xlib.display import Display +from Xlib.error import BadWindow + +from capture_method.CaptureMethodBase import CaptureMethodBase +from utils import is_valid_image + + +class ScrotCaptureMethod(CaptureMethodBase): + name = "Scrot" + short_description = "very slow, may leave files" + description = ( + "\nUses Scrot (SCReenshOT) to take screenshots. " + + "\nLeaves behind a screenshot file if interrupted. " + ) + + @override + def get_frame(self): + if not self.check_selected_region_exists(): + return None + xdisplay = Display() + root = xdisplay.screen().root + try: + data = root.translate_coords(self._autosplit_ref.hwnd, 0, 0)._data # noqa: SLF001 + except BadWindow: + return None + offset_x = data["x"] + offset_y = data["y"] + selection = self._autosplit_ref.settings_dict["capture_region"] + image = pyscreeze.screenshot( + None, + ( + selection["x"] + offset_x, + selection["y"] + offset_y, + selection["width"], + selection["height"], + ), + ) + image = np.array(image) + if not is_valid_image(image): + return None + return cv2.cvtColor(image, cv2.COLOR_RGB2BGRA) + + @override + def recover_window(self, captured_window_title: str): + windows = getWindowsWithTitle(captured_window_title) + if len(windows) == 0: + return False + self._autosplit_ref.hwnd = windows[0].getHandle() + return self.check_selected_region_exists() diff --git a/src/capture_method/VideoCaptureDeviceCaptureMethod.py b/src/capture_method/VideoCaptureDeviceCaptureMethod.py index 04d37f1d..a2229f14 100644 --- a/src/capture_method/VideoCaptureDeviceCaptureMethod.py +++ b/src/capture_method/VideoCaptureDeviceCaptureMethod.py @@ -5,17 +5,16 @@ import cv2.Error import numpy as np from cv2.typing import MatLike -from pygrabber.dshow_graph import FilterGraph from typing_extensions import override from capture_method.CaptureMethodBase import CaptureMethodBase from error_messages import CREATE_NEW_ISSUE_MESSAGE, exception_traceback -from utils import ImageShape, is_valid_image +from utils import ImageShape, get_input_device_resolution, is_valid_image if TYPE_CHECKING: from AutoSplit import AutoSplit -OBS_VIRTUALCAM_PLUGIN_BLANK_PIXEL = [127, 129, 128] +OBS_VIRTUALCAM_PLUGIN_BLANK_PIXEL = (127, 129, 128) def is_blank(image: MatLike): @@ -27,7 +26,7 @@ def is_blank(image: MatLike): :: image.shape[ImageShape.Y] - 1, :: image.shape[ImageShape.X] - 1, ] - == OBS_VIRTUALCAM_PLUGIN_BLANK_PIXEL, + == OBS_VIRTUALCAM_PLUGIN_BLANK_PIXEL ) @@ -55,7 +54,8 @@ def __read_loop(self): if not ( cv2_error.code == cv2.Error.STS_ERROR and ( - # Likely means the camera is occupied OR the camera index is out of range (like -1) + # Likely means the camera is occupied + # OR the camera index is out of range (like -1) cv2_error.msg.endswith("in function 'cv::VideoCapture::grab'\n") # Some capture cards we cannot use directly # https://github.com/opencv/opencv/issues/23539 @@ -83,7 +83,7 @@ def __read_loop(self): "AutoSplit encountered an unhandled exception while " + "trying to grab a frame and has stopped capture. " + CREATE_NEW_ISSUE_MESSAGE, - ), + ) ) def __init__(self, autosplit: "AutoSplit"): @@ -94,20 +94,19 @@ def __init__(self, autosplit: "AutoSplit"): # The video capture device isn't accessible, don't bother with it. if not self.capture_device.isOpened(): + self.close() return - filter_graph = FilterGraph() - filter_graph.add_video_input_device(autosplit.settings_dict["capture_device_id"]) - width, height = filter_graph.get_input_device().get_current_format() - filter_graph.remove_filters() - # Ensure we're using the right camera size. And not OpenCV's default 640x480 - try: - self.capture_device.set(cv2.CAP_PROP_FRAME_WIDTH, width) - self.capture_device.set(cv2.CAP_PROP_FRAME_HEIGHT, height) - except cv2.error: - # Some cameras don't allow changing the resolution - pass + resolution = get_input_device_resolution(autosplit.settings_dict["capture_device_id"]) + if resolution is not None: + try: + self.capture_device.set(cv2.CAP_PROP_FRAME_WIDTH, resolution[0]) + self.capture_device.set(cv2.CAP_PROP_FRAME_HEIGHT, resolution[1]) + except cv2.error: + # Some cameras don't allow changing the resolution + pass + self.capture_thread = Thread(target=self.__read_loop) self.capture_thread.start() diff --git a/src/capture_method/WindowsGraphicsCaptureMethod.py b/src/capture_method/WindowsGraphicsCaptureMethod.py index 6c387a58..aa905a32 100644 --- a/src/capture_method/WindowsGraphicsCaptureMethod.py +++ b/src/capture_method/WindowsGraphicsCaptureMethod.py @@ -1,3 +1,7 @@ +import sys + +if sys.platform != "win32": + raise OSError import asyncio from typing import TYPE_CHECKING, cast @@ -5,36 +9,40 @@ import win32gui from cv2.typing import MatLike from typing_extensions import override -from winsdk.windows.graphics import SizeInt32 -from winsdk.windows.graphics.capture import Direct3D11CaptureFramePool, GraphicsCaptureSession -from winsdk.windows.graphics.capture.interop import create_for_window -from winsdk.windows.graphics.directx import DirectXPixelFormat -from winsdk.windows.graphics.imaging import BitmapBufferAccessMode, SoftwareBitmap +from winrt.windows.graphics import SizeInt32 +from winrt.windows.graphics.capture import Direct3D11CaptureFramePool, GraphicsCaptureSession +from winrt.windows.graphics.capture.interop import create_for_window +from winrt.windows.graphics.directx import DirectXPixelFormat +from winrt.windows.graphics.directx.direct3d11 import IDirect3DSurface +from winrt.windows.graphics.directx.direct3d11.interop import ( + create_direct3d11_device_from_dxgi_device, +) +from winrt.windows.graphics.imaging import BitmapBufferAccessMode, SoftwareBitmap from capture_method.CaptureMethodBase import CaptureMethodBase -from utils import BGRA_CHANNEL_COUNT, WGC_MIN_BUILD, WINDOWS_BUILD_NUMBER, get_direct3d_device, is_valid_hwnd +from d3d11 import D3D11_CREATE_DEVICE_FLAG, D3D_DRIVER_TYPE, D3D11CreateDevice +from utils import BGRA_CHANNEL_COUNT, WGC_MIN_BUILD, WINDOWS_BUILD_NUMBER, is_valid_hwnd if TYPE_CHECKING: from AutoSplit import AutoSplit WGC_NO_BORDER_MIN_BUILD = 20348 -LEARNING_MODE_DEVICE_BUILD = 17763 -"""https://learn.microsoft.com/en-us/uwp/api/windows.ai.machinelearning.learningmodeldevice""" + + +async def convert_d3d_surface_to_software_bitmap(surface: IDirect3DSurface | None): + return await SoftwareBitmap.create_copy_from_surface_async(surface) class WindowsGraphicsCaptureMethod(CaptureMethodBase): name = "Windows Graphics Capture" short_description = "fast, most compatible, capped at 60fps" - description = ( - f"\nOnly available in Windows 10.0.{WGC_MIN_BUILD} and up. " - + f"\nDue to current technical limitations, Windows versions below 10.0.0.{LEARNING_MODE_DEVICE_BUILD}" - + "\nrequire having at least one audio or video Capture Device connected and enabled." - + "\nAllows recording UWP apps, Hardware Accelerated and Exclusive Fullscreen windows. " - + "\nAdds a yellow border on Windows 10 (not on Windows 11)." - + "\nCaps at around 60 FPS. " - ) - - size: SizeInt32 + description = f""" +Only available in Windows 10.0.{WGC_MIN_BUILD} and up. +Allows recording UWP apps, Hardware Accelerated and Exclusive Fullscreen windows. +Adds a yellow border on Windows 10 (not on Windows 11). +Caps at around 60 FPS.""" + + size: "SizeInt32" frame_pool: Direct3D11CaptureFramePool | None = None session: GraphicsCaptureSession | None = None """This is stored to prevent session from being garbage collected""" @@ -45,11 +53,16 @@ def __init__(self, autosplit: "AutoSplit"): if not is_valid_hwnd(autosplit.hwnd): return + dxgi, *_ = D3D11CreateDevice( + DriverType=D3D_DRIVER_TYPE.HARDWARE, + Flags=D3D11_CREATE_DEVICE_FLAG.BGRA_SUPPORT, + ) + direct3d_device = create_direct3d11_device_from_dxgi_device(dxgi.value) item = create_for_window(autosplit.hwnd) frame_pool = Direct3D11CaptureFramePool.create_free_threaded( - get_direct3d_device(), + direct3d_device, DirectXPixelFormat.B8_G8_R8_A8_UINT_NORMALIZED, - 1, + 1, # number_of_buffers item.size, ) if not frame_pool: @@ -68,6 +81,7 @@ def __init__(self, autosplit: "AutoSplit"): @override def close(self): + super().close() if self.frame_pool: self.frame_pool.close() self.frame_pool = None @@ -75,8 +89,8 @@ def close(self): try: self.session.close() except OSError: - # OSError: The application called an interface that was marshalled for a different thread - # This still seems to close the session and prevent the following hard crash in LiveSplit + # OSError: The application called an interface that was marshalled for a different thread # noqa: E501 + # This still seems to close the session and prevent the following hard crash in LiveSplit # noqa: E501 # "AutoSplit.exe " # noqa: E501 pass self.session = None @@ -99,14 +113,13 @@ def get_frame(self) -> MatLike | None: return None # We were too fast and the next frame wasn't ready yet + # TODO: Consider "add_frame_arrive" instead ! + # https://github.com/pywinrt/pywinrt/blob/5bf1ac5ff4a77cf343e11d7c841c368fa9235d81/samples/screen_capture/__main__.py#L67-L78 if not frame: return self.last_converted_frame - async def coroutine(): - return await SoftwareBitmap.create_copy_from_surface_async(frame.surface) - try: - software_bitmap = asyncio.run(coroutine()) + software_bitmap = asyncio.run(convert_d3d_surface_to_software_bitmap(frame.surface)) except SystemError as exception: # HACK: can happen when closing the GraphicsCapturePicker if str(exception).endswith("returned a result with an error set"): @@ -149,7 +162,7 @@ def recover_window(self, captured_window_title: str): @override def check_selected_region_exists(self): return bool( - is_valid_hwnd(self._autosplit_ref.hwnd) + is_valid_hwnd(self._autosplit_ref.hwnd) # fmt: skip and self.frame_pool - and self.session, + and self.session ) diff --git a/src/capture_method/XcbCaptureMethod.py b/src/capture_method/XcbCaptureMethod.py new file mode 100644 index 00000000..f91f1f8b --- /dev/null +++ b/src/capture_method/XcbCaptureMethod.py @@ -0,0 +1,69 @@ +import sys + +if sys.platform != "linux": + raise OSError + +import cv2 +import numpy as np +from PIL import ImageGrab +from pywinctl import getWindowsWithTitle +from typing_extensions import override +from Xlib.display import Display +from Xlib.error import BadWindow + +from capture_method.CaptureMethodBase import CaptureMethodBase +from utils import is_valid_image + + +class XcbCaptureMethod(CaptureMethodBase): + name = "X11 XCB" + short_description = "fast, requires XCB" + description = "\nUses the XCB library to take screenshots of the X11 server." + + _xdisplay: str | None = "" # ":0" + + @override + def get_frame(self): + if not self.check_selected_region_exists(): + return None + xdisplay = Display() + root = xdisplay.screen().root + try: + data = root.translate_coords(self._autosplit_ref.hwnd, 0, 0)._data # noqa: SLF001 + except BadWindow: + return None + offset_x = data["x"] + offset_y = data["y"] + # image = window.get_image( + # selection["x"], + # selection["y"], + # selection["width"], + # selection["height"], + # 1, + # 0, + # ) + + selection = self._autosplit_ref.settings_dict["capture_region"] + x = selection["x"] + offset_x + y = selection["y"] + offset_y + image = ImageGrab.grab( + ( + x, + y, + x + selection["width"], + y + selection["height"], + ), + xdisplay=self._xdisplay, + ) + image = np.array(image) + if not is_valid_image(image): + return None + return cv2.cvtColor(image, cv2.COLOR_RGB2BGRA) + + @override + def recover_window(self, captured_window_title: str): + windows = getWindowsWithTitle(captured_window_title) + if len(windows) == 0: + return False + self._autosplit_ref.hwnd = windows[0].getHandle() + return self.check_selected_region_exists() diff --git a/src/capture_method/__init__.py b/src/capture_method/__init__.py index de7e2581..a494e0f5 100644 --- a/src/capture_method/__init__.py +++ b/src/capture_method/__init__.py @@ -1,21 +1,36 @@ -import asyncio +import os +import sys from collections import OrderedDict from dataclasses import dataclass -from enum import Enum, EnumMeta, auto, unique +from enum import EnumMeta, StrEnum, auto, unique from itertools import starmap -from typing import TYPE_CHECKING, NoReturn, TypedDict, cast +from typing import TYPE_CHECKING, Never, TypedDict, cast -from _ctypes import COMError -from pygrabber.dshow_graph import FilterGraph -from typing_extensions import Never, override +from typing_extensions import override -from capture_method.BitBltCaptureMethod import BitBltCaptureMethod from capture_method.CaptureMethodBase import CaptureMethodBase -from capture_method.DesktopDuplicationCaptureMethod import DesktopDuplicationCaptureMethod -from capture_method.ForceFullContentRenderingCaptureMethod import ForceFullContentRenderingCaptureMethod from capture_method.VideoCaptureDeviceCaptureMethod import VideoCaptureDeviceCaptureMethod -from capture_method.WindowsGraphicsCaptureMethod import WindowsGraphicsCaptureMethod -from utils import WGC_MIN_BUILD, WINDOWS_BUILD_NUMBER, first, try_get_direct3d_device +from utils import WGC_MIN_BUILD, WINDOWS_BUILD_NUMBER, first, get_input_device_resolution + +if sys.platform == "win32": + from _ctypes import COMError # noqa: PLC2701 # comtypes is untyped + + from pygrabber.dshow_graph import FilterGraph + + from capture_method.BitBltCaptureMethod import BitBltCaptureMethod + from capture_method.DesktopDuplicationCaptureMethod import DesktopDuplicationCaptureMethod + from capture_method.ForceFullContentRenderingCaptureMethod import ( + ForceFullContentRenderingCaptureMethod, + ) + from capture_method.WindowsGraphicsCaptureMethod import WindowsGraphicsCaptureMethod + +if sys.platform == "linux": + import pyscreeze + from PIL import UnidentifiedImageError, features + + from capture_method.ScrotCaptureMethod import ScrotCaptureMethod + from capture_method.XcbCaptureMethod import XcbCaptureMethod + if TYPE_CHECKING: from AutoSplit import AutoSplit @@ -28,42 +43,28 @@ class Region(TypedDict): height: int -class CaptureMethodMeta(EnumMeta): +class ContainerEnumMeta(EnumMeta): # Allow checking if simple string is enum @override - def __contains__(self, other: object): + def __contains__(cls, other: object): try: - self(other) + cls(other) except ValueError: return False return True @unique -# TODO: Try StrEnum in Python 3.11 -class CaptureMethodEnum(Enum, metaclass=CaptureMethodMeta): - # Allow TOML to save as a simple string - @override - def __repr__(self): - return self.value - - # Allow direct comparison with strings - @override - def __eq__(self, other: object): - if isinstance(other, str): - return self.value == other - if isinstance(other, Enum): - return self.value == other.value - return other == self - - # Restore hashing functionality for use in Maps - @override - def __hash__(self): - return self.value.__hash__() - +class CaptureMethodEnum(StrEnum, metaclass=ContainerEnumMeta): + # Capitalize the string value from auto() @override @staticmethod - def _generate_next_value_(name: "str | CaptureMethodEnum", *_): + def _generate_next_value_( + name: str, + start: int, + count: int, + last_values: list[str], + ) -> str: return name NONE = "" @@ -71,6 +72,8 @@ def _generate_next_value_(name: "str | CaptureMethodEnum", *_): WINDOWS_GRAPHICS_CAPTURE = auto() PRINTWINDOW_RENDERFULLCONTENT = auto() DESKTOP_DUPLICATION = auto() + SCROT = auto() + XCB = auto() VIDEO_CAPTURE_DEVICE = auto() @@ -97,15 +100,15 @@ def get_method_by_index(self, index: int): # Disallow unsafe get w/o breaking it at runtime @override def __getitem__( # type:ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] - self, - __key: Never, - ) -> NoReturn | type[CaptureMethodBase]: - return super().__getitem__(__key) + self, key: Never, / + ) -> type[CaptureMethodBase]: + return super().__getitem__(key) @override - def get(self, key: CaptureMethodEnum, __default: object = None): + def get(self, key: CaptureMethodEnum, default: object = None, /): """ - Returns the `CaptureMethodBase` subclass for `CaptureMethodEnum` if `CaptureMethodEnum` is available, + Returns the `CaptureMethodBase` subclass for `CaptureMethodEnum` + if `CaptureMethodEnum` is available, else defaults to the first available `CaptureMethodEnum`. Returns `CaptureMethodBase` directly if there's no capture methods. """ @@ -115,40 +118,48 @@ def get(self, key: CaptureMethodEnum, __default: object = None): CAPTURE_METHODS = CaptureMethodDict() -if ( # Windows Graphics Capture requires a minimum Windows Build - WINDOWS_BUILD_NUMBER >= WGC_MIN_BUILD - # Our current implementation of Windows Graphics Capture does not ensure we can get an ID3DDevice - and try_get_direct3d_device() -): - CAPTURE_METHODS[CaptureMethodEnum.WINDOWS_GRAPHICS_CAPTURE] = WindowsGraphicsCaptureMethod -CAPTURE_METHODS[CaptureMethodEnum.BITBLT] = BitBltCaptureMethod -try: # Test for laptop cross-GPU Desktop Duplication issue - import d3dshot - - d3dshot.create(capture_output="numpy") -except (ModuleNotFoundError, COMError): - pass -else: - CAPTURE_METHODS[CaptureMethodEnum.DESKTOP_DUPLICATION] = DesktopDuplicationCaptureMethod -CAPTURE_METHODS[CaptureMethodEnum.PRINTWINDOW_RENDERFULLCONTENT] = ForceFullContentRenderingCaptureMethod +if sys.platform == "win32": + # Windows Graphics Capture requires a minimum Windows Build + if WINDOWS_BUILD_NUMBER >= WGC_MIN_BUILD: + CAPTURE_METHODS[CaptureMethodEnum.WINDOWS_GRAPHICS_CAPTURE] = WindowsGraphicsCaptureMethod + CAPTURE_METHODS[CaptureMethodEnum.BITBLT] = BitBltCaptureMethod + try: # Test for laptop cross-GPU Desktop Duplication issue + import d3dshot + + d3dshot.create(capture_output="numpy") + except (ModuleNotFoundError, COMError): + pass + else: + CAPTURE_METHODS[CaptureMethodEnum.DESKTOP_DUPLICATION] = DesktopDuplicationCaptureMethod + CAPTURE_METHODS[CaptureMethodEnum.PRINTWINDOW_RENDERFULLCONTENT] = ( + ForceFullContentRenderingCaptureMethod + ) +elif sys.platform == "linux": + if features.check_feature(feature="xcb"): + CAPTURE_METHODS[CaptureMethodEnum.XCB] = XcbCaptureMethod + try: + pyscreeze.screenshot() + except UnidentifiedImageError: + pass + else: + # TODO: Investigate solution for Slow Scrot: + # https://github.com/asweigart/pyscreeze/issues/68 + CAPTURE_METHODS[CaptureMethodEnum.SCROT] = ScrotCaptureMethod CAPTURE_METHODS[CaptureMethodEnum.VIDEO_CAPTURE_DEVICE] = VideoCaptureDeviceCaptureMethod def change_capture_method(selected_capture_method: CaptureMethodEnum, autosplit: "AutoSplit"): """ - Seemlessly change the current capture method, - initialize the new one with transfered subscriptions + Seamlessly change the current capture method, + initialize the new one with transferred subscriptions and update UI as needed. """ autosplit.capture_method.close() autosplit.capture_method = CAPTURE_METHODS.get(selected_capture_method)(autosplit) - if selected_capture_method == CaptureMethodEnum.VIDEO_CAPTURE_DEVICE: - autosplit.select_region_button.setDisabled(True) - autosplit.select_window_button.setDisabled(True) - else: - autosplit.select_region_button.setDisabled(False) - autosplit.select_window_button.setDisabled(False) + disable_selection_buttons = selected_capture_method == CaptureMethodEnum.VIDEO_CAPTURE_DEVICE + autosplit.select_region_button.setDisabled(disable_selection_buttons) + autosplit.select_window_button.setDisabled(disable_selection_buttons) @dataclass @@ -160,24 +171,25 @@ class CameraInfo: resolution: tuple[int, int] -def get_input_device_resolution(index: int): - filter_graph = FilterGraph() - try: - filter_graph.add_video_input_device(index) - # This can happen with virtual cameras throwing errors. - # For example since OBS 29.1 updated FFMPEG breaking VirtualCam 3.0 - # https://github.com/Toufool/AutoSplit/issues/238 - except COMError: - return None - resolution = filter_graph.get_input_device().get_current_format() - filter_graph.remove_filters() - return resolution +def get_input_devices(): + if sys.platform == "win32": + return FilterGraph().get_input_devices() + + cameras: list[str] = [] + if sys.platform == "linux": + try: + for index in range(len(os.listdir("/sys/class/video4linux"))): + with open(f"/sys/class/video4linux/video{index}/name", encoding="utf-8") as file: + cameras.append(file.readline()[:-2]) + except FileNotFoundError: + pass + return cameras -async def get_all_video_capture_devices(): - named_video_inputs = FilterGraph().get_input_devices() +def get_all_video_capture_devices(): + named_video_inputs = get_input_devices() - async def get_camera_info(index: int, device_name: str): + def get_camera_info(index: int, device_name: str): backend = "" # Probing freezes some devices (like GV-USB2 and AverMedia) if already in use. See #169 # FIXME: Maybe offer the option to the user to obtain more info about their devices? @@ -204,9 +216,4 @@ async def get_camera_info(index: int, device_name: str): else None ) - return [ - camera_info - for camera_info - in await asyncio.gather(*starmap(get_camera_info, enumerate(named_video_inputs))) - if camera_info is not None - ] + return list(filter(None, starmap(get_camera_info, enumerate(named_video_inputs)))) diff --git a/src/compare.py b/src/compare.py index 6e3bd990..868293cd 100644 --- a/src/compare.py +++ b/src/compare.py @@ -1,17 +1,27 @@ +from collections.abc import Iterable from math import sqrt import cv2 -import imagehash +import Levenshtein +import numpy as np from cv2.typing import MatLike -from PIL import Image +from scipy import fft -from utils import BGRA_CHANNEL_COUNT, MAXBYTE, ColorChannel, ImageShape, is_valid_image +from utils import ( + BGRA_CHANNEL_COUNT, + MAXBYTE, + ColorChannel, + ImageShape, + is_valid_image, + run_tesseract, +) MAXRANGE = MAXBYTE + 1 -CHANNELS = [ColorChannel.Red.value, ColorChannel.Green.value, ColorChannel.Blue.value] -HISTOGRAM_SIZE = [8, 8, 8] -RANGES = [0, MAXRANGE, 0, MAXRANGE, 0, MAXRANGE] +CHANNELS = (ColorChannel.Red.value, ColorChannel.Green.value, ColorChannel.Blue.value) +HISTOGRAM_SIZE = (8, 8, 8) +RANGES = (0, MAXRANGE, 0, MAXRANGE, 0, MAXRANGE) MASK_SIZE_MULTIPLIER = ColorChannel.Alpha * MAXBYTE * MAXBYTE +MAX_VALUE = 1.0 def compare_histograms(source: MatLike, capture: MatLike, mask: MatLike | None = None): @@ -72,7 +82,7 @@ def compare_template(source: MatLike, capture: MatLike, mask: MatLike | None = N # matchTemplate returns the sum of square differences, this is the max # that the value can be. Used for normalizing from 0 to 1. max_error = ( - source.size * MAXBYTE * MAXBYTE + source.size * MAXBYTE * MAXBYTE # fmt: skip if not is_valid_image(mask) else cv2.countNonZero(mask) ) @@ -80,6 +90,29 @@ def compare_template(source: MatLike, capture: MatLike, mask: MatLike | None = N return 1 - (min_val / max_error) +def __cv2_phash(image: MatLike, hash_size: int = 8, highfreq_factor: int = 4): + """Implementation copied from https://github.com/JohannesBuchner/imagehash/blob/38005924fe9be17cfed145bbc6d83b09ef8be025/imagehash/__init__.py#L260 .""" # noqa: E501 + # OpenCV has its own pHash comparison implementation in `cv2.img_hash`, + # but it requires contrib/extra modules and is inaccurate + # unless we precompute the size with a specific interpolation. + # See: https://github.com/opencv/opencv_contrib/issues/3295#issuecomment-1172878684 + # + # pHash = cv2.img_hash.PHash.create() + # source = cv2.resize(source, (8, 8), interpolation=cv2.INTER_AREA) + # capture = cv2.resize(capture, (8, 8), interpolation=cv2.INTER_AREA) + # source_hash = pHash.compute(source) + # capture_hash = pHash.compute(capture) + # hash_diff = pHash.compare(source_hash, capture_hash) + + img_size = hash_size * highfreq_factor + image = cv2.cvtColor(image, cv2.COLOR_BGRA2GRAY) + image = cv2.resize(image, (img_size, img_size), interpolation=cv2.INTER_AREA) + dct = fft.dct(fft.dct(image, axis=0), axis=1) + dct_low_frequency = dct[:hash_size, :hash_size] + median = np.median(dct_low_frequency) + return dct_low_frequency > median + + def compare_phash(source: MatLike, capture: MatLike, mask: MatLike | None = None): """ Compares the Perceptual Hash of the two given images and returns the similarity between the two. @@ -89,21 +122,64 @@ def compare_phash(source: MatLike, capture: MatLike, mask: MatLike | None = None @param mask: An image matching the dimensions of the source, but 1 channel grayscale @return: The similarity between the hashes of the image as a number 0 to 1. """ - # Since imagehash doesn't have any masking itself, bitwise_and will allow us - # to apply the mask to the source and capture before calculating the pHash for - # each of the images. As a result of this, this function is not going to be very - # helpful for large masks as the images when shrinked down to 8x8 will mostly be - # the same + # Apply the mask to the source and capture before calculating the + # pHash for each of the images. As a result of this, this function + # is not going to be very helpful for large masks as the images + # when shrinked down to 8x8 will mostly be the same. if is_valid_image(mask): source = cv2.bitwise_and(source, source, mask=mask) capture = cv2.bitwise_and(capture, capture, mask=mask) - source_hash = imagehash.phash(Image.fromarray(source)) - capture_hash = imagehash.phash(Image.fromarray(capture)) - hash_diff = source_hash - capture_hash + source_hash = __cv2_phash(source) + capture_hash = __cv2_phash(capture) + hash_diff = np.count_nonzero(source_hash != capture_hash) + return 1 - (hash_diff / 64.0) +def extract_and_compare_text(capture: MatLike, texts: Iterable[str], methods_index: Iterable[int]): + """ + Compares the extracted text of the given image and returns the similarity between the two texts. + The best match of all texts and methods is returned. + + @param capture: Image of any given shape as a numpy array + @param texts: a list of strings to match for + @param methods_index: a list of comparison methods to use in order + @return: The similarity between the text in the image and the text supplied as a number 0 to 1. + """ + methods = [get_ocr_comparison_method_by_index(i) for i in methods_index] + png = np.array(cv2.imencode(".png", capture)[1]).tobytes() + # Especially with stylised characters, OCR could conceivably get the right + # letter, but mix up the casing (m/M, o/O, t/T, etc.) + image_string = run_tesseract(png).lower().strip() + + ratio = 0.0 + for text in texts: + for method in methods: + ratio = max(ratio, method(text, image_string)) + if ratio == MAX_VALUE: + return ratio # we found the best match; try to return early + return ratio + + +def compare_submatch(a: str, b: str): + return float(a in b) + + +def __compare_dummy(*_: object): + return 0.0 + + +def get_ocr_comparison_method_by_index(comparison_method_index: int): + match comparison_method_index: + case 0: + return Levenshtein.ratio + case 1: + return compare_submatch + case _: + return __compare_dummy + + def get_comparison_method_by_index(comparison_method_index: int): match comparison_method_index: case 0: @@ -116,12 +192,9 @@ def get_comparison_method_by_index(comparison_method_index: int): return __compare_dummy -def __compare_dummy(*_: object): - return 0.0 - - def check_if_image_has_transparency(image: MatLike): - # Check if there's a transparency channel (4th channel) and if at least one pixel is transparent (< 255) + # Check if there's a transparency channel (4th channel) + # and if at least one pixel is transparent (< 255) if image.shape[ImageShape.Channels] != BGRA_CHANNEL_COUNT: return False mean: float = image[:, :, ColorChannel.Alpha].mean() @@ -129,6 +202,7 @@ def check_if_image_has_transparency(image: MatLike): # Non-transparent images code path is usually faster and simpler, so let's return that return False # TODO: error message if all pixels are transparent - # (the image appears as all black in windows, so it's not obvious for the user what they did wrong) + # (the image appears as all black in windows, + # so it's not obvious for the user what they did wrong) return mean != MAXBYTE diff --git a/src/d3d11.py b/src/d3d11.py new file mode 100644 index 00000000..b27b5295 --- /dev/null +++ b/src/d3d11.py @@ -0,0 +1,226 @@ +# SPDX-License-Identifier: MIT +# Copyright (c) 2024 David Lechner +import sys + +if sys.platform != "win32": + raise OSError + +import ctypes +import enum +import uuid +from ctypes import wintypes +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from ctypes import _FuncPointer # pyright: ignore[reportPrivateUsage] + + +### +# https://github.com/pywinrt/pywinrt/blob/main/samples/screen_capture/iunknown.py +### + + +class GUID(ctypes.Structure): + _fields_ = ( + ("Data1", ctypes.c_ulong), + ("Data2", ctypes.c_ushort), + ("Data3", ctypes.c_ushort), + ("Data4", ctypes.c_ubyte * 8), + ) + + +class IUnknown(ctypes.c_void_p): + QueryInterface = ctypes.WINFUNCTYPE( + # _CData is incompatible with int + int, # type: ignore[arg-type] # pyright: ignore[reportArgumentType] + ctypes.POINTER(GUID), + ctypes.POINTER(wintypes.LPVOID), + )(0, "QueryInterface") + AddRef = ctypes.WINFUNCTYPE(wintypes.ULONG)(1, "AddRef") + Release = ctypes.WINFUNCTYPE(wintypes.ULONG)(2, "Release") + + def query_interface(self, iid: uuid.UUID | str) -> "IUnknown": + if isinstance(iid, str): + iid = uuid.UUID(iid) + + ppv = wintypes.LPVOID() + riid = GUID.from_buffer_copy(iid.bytes_le) + ret = self.QueryInterface(self, ctypes.byref(riid), ctypes.byref(ppv)) + + if ret: + raise ctypes.WinError(ret) + + return IUnknown(ppv.value) + + def __del__(self): + IUnknown.Release(self) + + +### +# https://github.com/pywinrt/pywinrt/blob/main/samples/screen_capture/d3d11.py +### + + +__all__ = [ + "D3D11_CREATE_DEVICE_FLAG", + "D3D_DRIVER_TYPE", + "D3D_FEATURE_LEVEL", + "D3D11CreateDevice", +] + +IN = 1 +OUT = 2 + +# https://learn.microsoft.com/en-us/windows/win32/api/d3dcommon/ne-d3dcommon-d3d_driver_type +# +# typedef enum D3D_DRIVER_TYPE { +# D3D_DRIVER_TYPE_UNKNOWN = 0, +# D3D_DRIVER_TYPE_HARDWARE, +# D3D_DRIVER_TYPE_REFERENCE, +# D3D_DRIVER_TYPE_NULL, +# D3D_DRIVER_TYPE_SOFTWARE, +# D3D_DRIVER_TYPE_WARP +# } ; + + +class D3D_DRIVER_TYPE(enum.IntEnum): + UNKNOWN = 0 + HARDWARE = 1 + REFERENCE = 2 + NULL = 3 + SOFTWARE = 4 + WARP = 5 + + +# https://learn.microsoft.com/en-us/windows/win32/api/d3d11/ne-d3d11-d3d11_create_device_flag +# +# typedef enum D3D11_CREATE_DEVICE_FLAG { +# D3D11_CREATE_DEVICE_SINGLETHREADED = 0x1, +# D3D11_CREATE_DEVICE_DEBUG = 0x2, +# D3D11_CREATE_DEVICE_SWITCH_TO_REF = 0x4, +# D3D11_CREATE_DEVICE_PREVENT_INTERNAL_THREADING_OPTIMIZATIONS = 0x8, +# D3D11_CREATE_DEVICE_BGRA_SUPPORT = 0x20, +# D3D11_CREATE_DEVICE_DEBUGGABLE = 0x40, +# D3D11_CREATE_DEVICE_PREVENT_ALTERING_LAYER_SETTINGS_FROM_REGISTRY = 0x80, +# D3D11_CREATE_DEVICE_DISABLE_GPU_TIMEOUT = 0x100, +# D3D11_CREATE_DEVICE_VIDEO_SUPPORT = 0x800 +# } ; + + +class D3D11_CREATE_DEVICE_FLAG(enum.IntFlag): + SINGLETHREADED = 0x1 + DEBUG = 0x2 + SWITCH_TO_REF = 0x4 + PREVENT_INTERNAL_THREADING_OPTIMIZATIONS = 0x8 + BGRA_SUPPORT = 0x20 + DEBUGGABLE = 0x40 + PREVENT_ALTERING_LAYER_SETTINGS_FROM_REGISTRY = 0x80 + DISABLE_GPU_TIMEOUT = 0x100 + VIDEO_SUPPORT = 0x800 + + +# https://learn.microsoft.com/en-us/windows/win32/api/d3dcommon/ne-d3dcommon-d3d_feature_level +# +# typedef enum D3D_FEATURE_LEVEL { +# D3D_FEATURE_LEVEL_1_0_GENERIC, +# D3D_FEATURE_LEVEL_1_0_CORE, +# D3D_FEATURE_LEVEL_9_1, +# D3D_FEATURE_LEVEL_9_2, +# D3D_FEATURE_LEVEL_9_3, +# D3D_FEATURE_LEVEL_10_0, +# D3D_FEATURE_LEVEL_10_1, +# D3D_FEATURE_LEVEL_11_0, +# D3D_FEATURE_LEVEL_11_1, +# D3D_FEATURE_LEVEL_12_0, +# D3D_FEATURE_LEVEL_12_1, +# D3D_FEATURE_LEVEL_12_2 +# } ; + + +class D3D_FEATURE_LEVEL(enum.IntEnum): + LEVEL_1_0_GENERIC = 0x1000 + LEVEL_1_0_CORE = 0x1001 + LEVEL_9_1 = 0x9100 + LEVEL_9_2 = 0x9200 + LEVEL_9_3 = 0x9300 + LEVEL_10_0 = 0xA000 + LEVEL_10_1 = 0xA100 + LEVEL_11_0 = 0xB000 + LEVEL_11_1 = 0xB100 + LEVEL_12_0 = 0xC000 + LEVEL_12_1 = 0xC100 + LEVEL_12_2 = 0xC200 + + +# not sure where this is officially defined or if the value would ever change + +D3D11_SDK_VERSION = 7 + +# https://learn.microsoft.com/en-us/windows/win32/api/d3d11/nf-d3d11-d3d11createdevice +# +# HRESULT D3D11CreateDevice( +# [in, optional] IDXGIAdapter *pAdapter, +# D3D_DRIVER_TYPE DriverType, +# HMODULE Software, +# UINT Flags, +# [in, optional] const D3D_FEATURE_LEVEL *pFeatureLevels, +# UINT FeatureLevels, +# UINT SDKVersion, +# [out, optional] ID3D11Device **ppDevice, +# [out, optional] D3D_FEATURE_LEVEL *pFeatureLevel, +# [out, optional] ID3D11DeviceContext **ppImmediateContext +# ); + + +def errcheck( + result: int, + _func: "_FuncPointer", # Actually WinFunctionType but that's an internal class + args: tuple[ + IUnknown | None, # IDXGIAdapter + D3D_DRIVER_TYPE, + wintypes.HMODULE | None, + D3D11_CREATE_DEVICE_FLAG, + D3D_FEATURE_LEVEL | None, + int, + int, + IUnknown, # ID3D11Device + wintypes.UINT, + IUnknown, # ID3D11DeviceContext + ], +): + if result: + raise ctypes.WinError(result) + + return (args[7], D3D_FEATURE_LEVEL(args[8].value), args[9]) + + +D3D11CreateDevice = ctypes.WINFUNCTYPE( + # _CData is incompatible with int + int, # type: ignore[arg-type] # pyright: ignore[reportArgumentType] + wintypes.LPVOID, + wintypes.UINT, + wintypes.LPVOID, + wintypes.UINT, + ctypes.POINTER(wintypes.UINT), + wintypes.UINT, + wintypes.UINT, + ctypes.POINTER(IUnknown), + ctypes.POINTER(wintypes.UINT), + ctypes.POINTER(IUnknown), +)( + ("D3D11CreateDevice", ctypes.windll.d3d11), + ( + (IN, "pAdapter", None), + (IN, "DriverType", D3D_DRIVER_TYPE.UNKNOWN), + (IN, "Software", None), + (IN, "Flags", 0), + (IN, "pFeatureLevels", None), + (IN, "FeatureLevels", 0), + (IN, "SDKVersion", D3D11_SDK_VERSION), + (OUT, "ppDevice"), + (OUT, "pFeatureLevel"), + (OUT, "ppImmediateContext"), + ), +) +# _CData is incompatible with int +D3D11CreateDevice.errcheck = errcheck # type: ignore[assignment] # pyright: ignore[reportAttributeAccessIssue] diff --git a/src/error_messages.py b/src/error_messages.py index c455f265..5dac82ca 100644 --- a/src/error_messages.py +++ b/src/error_messages.py @@ -21,7 +21,12 @@ def __exit_program(): sys.exit(1) -def set_text_message(message: str, details: str = "", kill_button: str = "", accept_button: str = ""): +def set_text_message( + message: str, + details: str = "", + kill_button: str = "", + accept_button: str = "", +): message_box = QtWidgets.QMessageBox() message_box.setWindowTitle("Error") message_box.setTextFormat(QtCore.Qt.TextFormat.RichText) @@ -30,7 +35,10 @@ def set_text_message(message: str, details: str = "", kill_button: str = "", acc if accept_button: message_box.addButton(accept_button, QtWidgets.QMessageBox.ButtonRole.AcceptRole) if kill_button: - force_quit_button = message_box.addButton(kill_button, QtWidgets.QMessageBox.ButtonRole.ResetRole) + force_quit_button = message_box.addButton( + kill_button, + QtWidgets.QMessageBox.ButtonRole.ResetRole, + ) force_quit_button.clicked.connect(__exit_program) if details: message_box.setDetailedText(details) @@ -43,11 +51,11 @@ def set_text_message(message: str, details: str = "", kill_button: str = "", acc def split_image_directory(): - set_text_message("No split image folder is selected.") + set_text_message("No Split Image Folder is selected.") -def split_image_directory_not_found(): - set_text_message("The Split Image Folder does not exist.") +def invalid_directory(directory: str): + set_text_message(f"Folder {directory!r} is invalid or does not exist.") def split_image_directory_empty(): @@ -57,14 +65,14 @@ def split_image_directory_empty(): def image_type(image: str): set_text_message( f"{image!r} is not a valid image file, does not exist, " - + "or the full image file path contains a special character.", + + "or the full image file path contains a special character." ) def region(): set_text_message( "No region is selected or the Capture Region window is not open. " - + "Select a region or load settings while the Capture Region window is open.", + + "Select a region or load settings while the Capture Region window is open." ) @@ -74,7 +82,8 @@ def split_hotkey(): def pause_hotkey(): set_text_message( - "Your split image folder contains an image filename with a pause flag {p}, but no pause hotkey is set.", + "Your split image folder contains an image filename with a pause flag {p}, " + + "but no pause hotkey is set." ) @@ -87,7 +96,9 @@ def alignment_not_matched(): def no_keyword_image(keyword: str): - set_text_message(f"Your split image folder does not contain an image with the keyword {keyword!r}.") + set_text_message( + f"Your split image folder does not contain an image with the keyword {keyword!r}." + ) def multiple_keyword_images(keyword: str): @@ -100,7 +111,9 @@ def reset_hotkey(): def old_version_settings_file(): set_text_message( - "Old version settings file detected. This version allows settings files in .toml format. Starting from v2.0.", + "Old version settings file detected. " + + "This version allows settings files in .toml format. " + + "Starting from v2.0." ) @@ -114,46 +127,89 @@ def invalid_hotkey(hotkey_name: str): def no_settings_file_on_open(): set_text_message( - "No settings file found. One can be loaded on open if placed in the same folder as the AutoSplit executable.", + "No settings file found. " + + "One can be loaded on open if placed in the same folder as the AutoSplit executable." ) def too_many_settings_files_on_open(): set_text_message( "Too many settings files found. " - + "Only one can be loaded on open if placed in the same folder as the AutoSplit executable.", + + "Only one can be loaded on open if placed in the same folder as the AutoSplit executable." ) def check_for_updates(): - set_text_message("An error occurred while attempting to check for updates. Please check your connection.") + set_text_message( + "An error occurred while attempting to check for updates. Please check your connection." + ) def load_start_image(): set_text_message( "Start Image found, but cannot be loaded unless Start hotkey is set. " - + "Please set the hotkey, and then click the Reload Start Image button.", + + "Please set the hotkey, and then click the Reload Start Image button." ) def stdin_lost(): - set_text_message("stdin not supported or lost, external control like LiveSplit integration will not work.") + set_text_message( + "stdin not supported or lost, external control like LiveSplit integration will not work." + ) def already_open(): set_text_message( - "An instance of AutoSplit is already running.
Are you sure you want to open a another one?", + "An instance of AutoSplit is already running." + + "
Are you sure you want to open a another one?", "", "Don't open", "Ignore", ) +def linux_groups(): + set_text_message( + "Linux users must ensure they are in the 'tty' and 'input' groups " + + "and have write access to '/dev/uinput'. You can run the following commands to do so:", + # Keep in sync with README.md and scripts/install.ps1 + """\ +sudo usermod -a -G tty,input $USER +sudo touch /dev/uinput +sudo chmod +0666 /dev/uinput +echo 'KERNEL=="uinput", TAG+="uaccess"' | sudo tee /etc/udev/rules.d/50-uinput.rules +echo 'SUBSYSTEM=="input", MODE="0666" GROUP="plugdev"' | sudo tee /etc/udev/rules.d/12-input.rules +echo 'SUBSYSTEM=="misc", MODE="0666" GROUP="plugdev"' | sudo tee -a /etc/udev/rules.d/12-input.rules +echo 'SUBSYSTEM=="tty", MODE="0666" GROUP="plugdev"' | sudo tee -a /etc/udev/rules.d/12-input.rules +loginctl terminate-user $USER""", + ) + + +def linux_uinput(): + set_text_message( + "Failed to create a device file using `uinput` module. " + + "This can happen when running Linux under WSL. " + + "Keyboard events have been disabled." + ) + + +# Keep in sync with README.md#DOWNLOAD_AND_OPEN +WAYLAND_WARNING = """\ +All screen capture method are incompatible with Wayland. Follow this guide to disable it: + \ +https://linuxconfig.org/how-to-enable-disable-wayland-on-ubuntu-22-04-desktop""" + + +def linux_wayland(): + set_text_message(WAYLAND_WARNING) + + def exception_traceback(exception: BaseException, message: str = ""): if not message: message = ( "AutoSplit encountered an unhandled exception and will try to recover, " - + f"however, there is no guarantee it will keep working properly. {CREATE_NEW_ISSUE_MESSAGE}" + + "however, there is no guarantee it will keep working properly. " + + CREATE_NEW_ISSUE_MESSAGE ) set_text_message( message, @@ -170,14 +226,17 @@ def exception_traceback(exception: BaseException, message: str = ""): def make_excepthook(autosplit: "AutoSplit"): - def excepthook(exception_type: type[BaseException], exception: BaseException, _traceback: TracebackType | None): + def excepthook( + exception_type: type[BaseException], + exception: BaseException, + _traceback: TracebackType | None, + ): # Catch Keyboard Interrupts for a clean close if exception_type is KeyboardInterrupt or isinstance(exception, KeyboardInterrupt): sys.exit(0) - # HACK: Can happen when starting the region selector while capturing with WindowsGraphicsCapture - if ( - exception_type is SystemError - and str(exception) == " returned a result with an error set" + # HACK: Can happen when starting the region selector while capturing with WindowsGraphicsCapture # noqa: E501 + if exception_type is SystemError and str(exception) == ( + " returned a result with an error set" ): return # Whithin LiveSplit excepthook needs to use MainWindow's signals to show errors @@ -187,10 +246,30 @@ def excepthook(exception_type: type[BaseException], exception: BaseException, _t def handle_top_level_exceptions(exception: Exception) -> NoReturn: - message = f"AutoSplit encountered an unrecoverable exception and will likely now close. {CREATE_NEW_ISSUE_MESSAGE}" + message = ( + "AutoSplit encountered an unrecoverable exception and will likely now close. " + + CREATE_NEW_ISSUE_MESSAGE + ) # Print error to console if not running in executable if FROZEN: exception_traceback(exception, message) else: traceback.print_exception(type(exception), exception, exception.__traceback__) sys.exit(1) + + +def tesseract_missing(ocr_split_file_path: str): + set_text_message( + f"{ocr_split_file_path!r} is an Optical Character Recognition split file " + + "but tesseract couldn't be found." + + f'\nPlease read ' + + f"github.com/{GITHUB_REPOSITORY}#install-tesseract for installation instructions." + ) + + +def wrong_ocr_values(ocr_split_file_path: str): + set_text_message( + f"{ocr_split_file_path!r} has invalid values." + + "\nPlease make sure that `left < right` and `top < bottom`. " + + "Also check for negative values in the 'methods' or 'fps_limit' settings" + ) diff --git a/src/hotkeys.py b/src/hotkeys.py index 34f4519e..5f17f32b 100644 --- a/src/hotkeys.py +++ b/src/hotkeys.py @@ -1,3 +1,4 @@ +import sys from collections.abc import Callable from typing import TYPE_CHECKING, Literal, cast @@ -6,7 +7,18 @@ from PySide6 import QtWidgets import error_messages -from utils import fire_and_forget, is_digit +from utils import fire_and_forget, is_digit, try_input_device_access + +if sys.platform == "linux": + import grp + import os + + groups = {grp.getgrgid(group).gr_name for group in os.getgroups()} + KEYBOARD_GROUPS_ISSUE = not {"input", "tty"}.issubset(groups) + KEYBOARD_UINPUT_ISSUE = not try_input_device_access() +else: + KEYBOARD_GROUPS_ISSUE = False + KEYBOARD_UINPUT_ISSUE = False if TYPE_CHECKING: from AutoSplit import AutoSplit @@ -17,13 +29,31 @@ SET_HOTKEY_TEXT = "Set Hotkey" PRESS_A_KEY_TEXT = "Press a key..." -Commands = Literal["split", "start", "pause", "reset", "skip", "undo"] -Hotkey = Literal["split", "reset", "skip_split", "undo_split", "pause", "screenshot", "toggle_auto_reset_image"] -HOTKEYS: list[Hotkey] = ["split", "reset", "skip_split", "undo_split", "pause", "screenshot", "toggle_auto_reset_image"] +CommandStr = Literal["split", "start", "pause", "reset", "skip", "undo"] +Hotkey = Literal[ + "split", + "reset", + "skip_split", + "undo_split", + "pause", + "screenshot", + "toggle_auto_reset_image", +] +HOTKEYS = ( + "split", + "reset", + "skip_split", + "undo_split", + "pause", + "screenshot", + "toggle_auto_reset_image", +) +HOTKEYS_WHEN_AUTOCONTROLLED = {"screenshot", "toggle_auto_reset_image"} def remove_all_hotkeys(): - keyboard.unhook_all() + if not KEYBOARD_GROUPS_ISSUE and not KEYBOARD_UINPUT_ISSUE: + keyboard.unhook_all() def before_setting_hotkey(autosplit: "AutoSplit"): @@ -43,19 +73,25 @@ def after_setting_hotkey(autosplit: "AutoSplit"): autosplit.start_auto_splitter_button.setEnabled(True) if autosplit.SettingsWidget: for hotkey in HOTKEYS: - getattr(autosplit.SettingsWidget, f"set_{hotkey}_hotkey_button").setText(SET_HOTKEY_TEXT) + getattr( + autosplit.SettingsWidget, + f"set_{hotkey}_hotkey_button", + ).setText(SET_HOTKEY_TEXT) getattr(autosplit.SettingsWidget, f"set_{hotkey}_hotkey_button").setEnabled(True) -def send_command(autosplit: "AutoSplit", command: Commands): - # Note: Rather than having the start image able to also reset the timer, - # having the reset image check be active at all time would be a better, more organic solution, - # but that is dependent on migrating to an observer pattern (#219) and being able to reload all images. +def send_command(autosplit: "AutoSplit", command: CommandStr): + if command in autosplit.settings_dict["screenshot_on"]: + autosplit.screenshot_signal.emit() match command: case _ if autosplit.is_auto_controlled: if command == "start" and autosplit.settings_dict["start_also_resets"]: print("reset", flush=True) print(command, flush=True) + # Note: Rather than having the start image able to also reset the timer, having + # the reset image check be active at all time would be a better, more organic solution. + # But that is dependent on migrating to an observer pattern (#219) and + # being able to reload all images. case "start" if autosplit.settings_dict["start_also_resets"]: _send_hotkey(autosplit.settings_dict["reset_hotkey"]) case "reset": @@ -68,7 +104,7 @@ def send_command(autosplit: "AutoSplit", command: Commands): _send_hotkey(autosplit.settings_dict["skip_split_hotkey"]) case "undo": _send_hotkey(autosplit.settings_dict["undo_split_hotkey"]) - case _: # pyright: ignore[reportUnnecessaryComparison] + case _: raise KeyError(f"{command!r} is not a valid command") @@ -87,7 +123,7 @@ def _send_hotkey(hotkey_or_scan_code: int | str | None): # Deal with regular inputs # If an int or does not contain the following strings - if ( + if ( # fmt: skip isinstance(hotkey_or_scan_code, int) or not any(key in hotkey_or_scan_code for key in ("num ", "decimal", "+")) ): @@ -95,16 +131,14 @@ def _send_hotkey(hotkey_or_scan_code: int | str | None): return # FIXME: Localized keys won't work here - # Deal with problematic keys. Even by sending specific scan code "keyboard" still sends the default (wrong) key + # Deal with problematic keys. + # Even by sending specific scan code "keyboard" still sends the default (wrong) key # keyboard also has issues with capitalization modifier (shift+A) # keyboard.send(keyboard.key_to_scan_codes(key_or_scan_code)[1]) - pyautogui.hotkey( - *[ - "+" if key == "plus" else key - for key - in hotkey_or_scan_code.replace(" ", "").split("+") - ], - ) + pyautogui.hotkey(*[ + "+" if key == "plus" else key # fmt: skip + for key in hotkey_or_scan_code.replace(" ", "").split("+") + ]) def __validate_keypad(expected_key: str, keyboard_event: keyboard.KeyboardEvent) -> bool: @@ -114,13 +148,15 @@ def __validate_keypad(expected_key: str, keyboard_event: keyboard.KeyboardEvent) For example, "Home", "Num Home" and "Num 7" are all `71`. See: https://github.com/boppreh/keyboard/issues/171#issuecomment-390437684 . - Since we reuse the key string we set to send to LiveSplit, we can't use fake names like "num home". + Since we reuse the key string we set to send to LiveSplit, + we can't use fake names like "num home". We're also trying to achieve the same hotkey behaviour as LiveSplit has. """ # Prevent "(keypad)delete", "(keypad)./decimal" and "del" from triggering each other # as well as "." and "(keypad)./decimal" if keyboard_event.scan_code in {83, 52}: - # TODO: "del" won't work with "(keypad)delete" if localized in non-english (ie: "suppr" in french) + # TODO: "del" won't work with "(keypad)delete" if localized in non-english + # (ie: "suppr" in french) return expected_key == keyboard_event.name # Prevent "action keys" from triggering "keypad keys" if keyboard_event.name and is_digit(keyboard_event.name[-1]): @@ -128,7 +164,7 @@ def __validate_keypad(expected_key: str, keyboard_event: keyboard.KeyboardEvent) return bool( keyboard_event.is_keypad if expected_key.startswith("num ") - else not keyboard_event.is_keypad, + else not keyboard_event.is_keypad ) # Prevent "keypad action keys" from triggering "regular numbers" and "keypad numbers" @@ -136,13 +172,20 @@ def __validate_keypad(expected_key: str, keyboard_event: keyboard.KeyboardEvent) return not is_digit(expected_key[-1]) -def _hotkey_action(keyboard_event: keyboard.KeyboardEvent, key_name: str, action: Callable[[], None]): +def _hotkey_action( + keyboard_event: keyboard.KeyboardEvent, + key_name: str, + action: Callable[[], None], +): """ We're doing the check here instead of saving the key code because the non-keypad shared keys are localized while the keypad ones aren't. They also share scan codes on Windows. """ - if keyboard_event.event_type == keyboard.KEY_DOWN and __validate_keypad(key_name, keyboard_event): + if keyboard_event.event_type == keyboard.KEY_DOWN and __validate_keypad( + key_name, + keyboard_event, + ): action() @@ -219,9 +262,9 @@ def __get_hotkey_action(autosplit: "AutoSplit", hotkey: Hotkey): if hotkey == "split": return autosplit.start_auto_splitter if hotkey == "skip_split": - return lambda: autosplit.skip_split(True) + return lambda: autosplit.skip_split(navigate_image_only=True) if hotkey == "undo_split": - return lambda: autosplit.undo_split(True) + return lambda: autosplit.undo_split(navigate_image_only=True) if hotkey == "toggle_auto_reset_image": def toggle_auto_reset_image(): @@ -237,15 +280,24 @@ def toggle_auto_reset_image(): def is_valid_hotkey_name(hotkey_name: str): return any( key and not keyboard.is_modifier(keyboard.key_to_scan_codes(key)[0]) - for key - in hotkey_name.split("+") + for key in hotkey_name.split("+") ) + # TODO: using getattr/setattr is NOT a good way to go about this. It was only temporarily done to # reduce duplicated code. We should use a dictionary of hotkey class or something. def set_hotkey(autosplit: "AutoSplit", hotkey: Hotkey, preselected_hotkey_name: str = ""): + if KEYBOARD_GROUPS_ISSUE: + if not preselected_hotkey_name: + error_messages.linux_groups() + return + if KEYBOARD_UINPUT_ISSUE: + if not preselected_hotkey_name: + error_messages.linux_uinput() + return + if autosplit.SettingsWidget: # Unfocus all fields cast(QtWidgets.QWidget, autosplit.SettingsWidget).setFocus() @@ -264,7 +316,9 @@ def read_and_set_hotkey(): # Unset hotkey by pressing "Escape". This is the same behaviour as LiveSplit if hotkey_name == "esc": _unhook(getattr(autosplit, f"{hotkey}_hotkey")) - autosplit.settings_dict[f"{hotkey}_hotkey"] = "" # pyright: ignore[reportGeneralTypeIssues] + autosplit.settings_dict[f"{hotkey}_hotkey"] = ( # pyright: ignore[reportGeneralTypeIssues] + "" + ) if autosplit.SettingsWidget: getattr(autosplit.SettingsWidget, f"{hotkey}_input").setText("") return @@ -283,13 +337,16 @@ def read_and_set_hotkey(): setattr( autosplit, f"{hotkey}_hotkey", - # keyboard.add_hotkey doesn't give the last keyboard event, so we can't __validate_keypad. + # keyboard.add_hotkey doesn't give the last keyboard event, + # so we can't __validate_keypad. # This means "ctrl + num 5" and "ctrl + 5" will both be registered. # For that reason, we still prefer keyboard.hook_key for single keys. - # keyboard module allows you to hit multiple keys for a hotkey. they are joined together by +. + # keyboard module allows you to hit multiple keys for a hotkey. + # They are joined together by + . keyboard.add_hotkey(hotkey_name, action) if "+" in hotkey_name - # We need to inspect the event to know if it comes from numpad because of _canonial_names. + # We need to inspect the event to know if it comes from numpad + # because of _canonial_names. # See: https://github.com/boppreh/keyboard/issues/161#issuecomment-386825737 # The best way to achieve this is make our own hotkey handling on top of hook # See: https://github.com/boppreh/keyboard/issues/216#issuecomment-431999553 @@ -301,7 +358,9 @@ def read_and_set_hotkey(): if autosplit.SettingsWidget: getattr(autosplit.SettingsWidget, f"{hotkey}_input").setText(hotkey_name) - autosplit.settings_dict[f"{hotkey}_hotkey"] = hotkey_name # pyright: ignore[reportGeneralTypeIssues] + autosplit.settings_dict[f"{hotkey}_hotkey"] = ( # pyright: ignore[reportGeneralTypeIssues] + hotkey_name + ) except Exception as exception: # noqa: BLE001 # We really want to catch everything here error = exception autosplit.show_error_signal.emit(lambda: error_messages.exception_traceback(error)) diff --git a/src/menu_bar.py b/src/menu_bar.py index f4107a27..27d0d7fb 100644 --- a/src/menu_bar.py +++ b/src/menu_bar.py @@ -1,14 +1,16 @@ -import asyncio +import json +import sys import webbrowser -from typing import TYPE_CHECKING, Any, cast +from functools import partial +from typing import TYPE_CHECKING, Any, Literal, cast +from urllib.error import URLError +from urllib.request import urlopen -import requests from packaging.version import parse as version_parse from PySide6 import QtCore, QtWidgets from PySide6.QtCore import Qt from PySide6.QtGui import QBrush, QPalette from PySide6.QtWidgets import QFileDialog -from requests.exceptions import RequestException from typing_extensions import override import error_messages @@ -21,13 +23,29 @@ get_all_video_capture_devices, ) from gen import about, design, settings as settings_ui, update_checker -from hotkeys import HOTKEYS, Hotkey, set_hotkey +from hotkeys import HOTKEYS, HOTKEYS_WHEN_AUTOCONTROLLED, CommandStr, set_hotkey from utils import AUTOSPLIT_VERSION, GITHUB_REPOSITORY, ONE_SECOND, decimal, fire_and_forget if TYPE_CHECKING: from AutoSplit import AutoSplit HALF_BRIGHTNESS = 128 +LINUX_SCREENSHOT_SUPPORT = ( + "\n\n----------------------------------------------------\n\n" + + error_messages.WAYLAND_WARNING + # Keep in sync with README.md#Capture_Method_Linux + + '\n"scrot" must be installed to use SCReenshOT. ' + + "\nRun: sudo apt-get install scrot" +) if sys.platform == "linux" else "" # fmt: skip + +_DEBUG_SCREENSHOT_COMMANDS: tuple[CommandStr, ...] = ( + "split", + "start", + "reset", + "undo", + "skip", + "pause", +) class __AboutWidget(QtWidgets.QWidget, about.Ui_AboutAutoSplitWidget): # noqa: N801 # Private class @@ -48,7 +66,13 @@ def open_about(autosplit: "AutoSplit"): class __UpdateCheckerWidget(QtWidgets.QWidget, update_checker.Ui_UpdateChecker): # noqa: N801 # Private class - def __init__(self, latest_version: str, design_window: design.Ui_MainWindow, check_on_open: bool = False): + def __init__( + self, + latest_version: str, + design_window: design.Ui_MainWindow, + *, + check_on_open: bool = False, + ): super().__init__() self.setupUi(self) self.current_version_number_label.setText(AUTOSPLIT_VERSION) @@ -79,17 +103,24 @@ def do_not_ask_me_again_state_changed(self): ) -def open_update_checker(autosplit: "AutoSplit", latest_version: str, check_on_open: bool): - if not autosplit.UpdateCheckerWidget or cast(QtWidgets.QWidget, autosplit.UpdateCheckerWidget).isHidden(): - autosplit.UpdateCheckerWidget = __UpdateCheckerWidget(latest_version, autosplit, check_on_open) +def open_update_checker(autosplit: "AutoSplit", latest_version: str, *, check_on_open: bool): + if ( + not autosplit.UpdateCheckerWidget + or cast(QtWidgets.QWidget, autosplit.UpdateCheckerWidget).isHidden() + ): + autosplit.UpdateCheckerWidget = __UpdateCheckerWidget( + latest_version, + autosplit, + check_on_open=check_on_open, + ) def view_help(): - webbrowser.open(f"https://github.com/{GITHUB_REPOSITORY}#tutorial") + webbrowser.open(f"https://github.com/{GITHUB_REPOSITORY}/blob/main/docs/tutorial.md") class __CheckForUpdatesThread(QtCore.QThread): # noqa: N801 # Private class - def __init__(self, autosplit: "AutoSplit", check_on_open: bool): + def __init__(self, autosplit: "AutoSplit", *, check_on_open: bool): super().__init__() self._autosplit_ref = autosplit self.check_on_open = check_on_open @@ -97,10 +128,17 @@ def __init__(self, autosplit: "AutoSplit", check_on_open: bool): @override def run(self): try: - response = requests.get(f"https://api.github.com/repos/{GITHUB_REPOSITORY}/releases/latest", timeout=30) - latest_version = str(response.json()["name"]).split("v")[1] - self._autosplit_ref.update_checker_widget_signal.emit(latest_version, self.check_on_open) - except (RequestException, KeyError): + with urlopen( + f"https://api.github.com/repos/{GITHUB_REPOSITORY}/releases/latest", + timeout=30, + ) as response: + json_response: dict[str, str] = json.loads(response.read()) + latest_version = json_response["name"].split("v")[1] + self._autosplit_ref.update_checker_widget_signal.emit( + latest_version, + self.check_on_open, + ) + except (URLError, KeyError): if not self.check_on_open: self._autosplit_ref.show_error_signal.emit(error_messages.check_for_updates) @@ -113,8 +151,11 @@ def about_qt_for_python(): webbrowser.open("https://wiki.qt.io/Qt_for_Python") -def check_for_updates(autosplit: "AutoSplit", check_on_open: bool = False): - autosplit.CheckForUpdatesThread = __CheckForUpdatesThread(autosplit, check_on_open) +def check_for_updates(autosplit: "AutoSplit", *, check_on_open: bool = False): + autosplit.CheckForUpdatesThread = __CheckForUpdatesThread( + autosplit, + check_on_open=check_on_open, + ) autosplit.CheckForUpdatesThread.start() @@ -124,7 +165,8 @@ def __init__(self, autosplit: "AutoSplit"): self.__video_capture_devices: list[CameraInfo] = [] """ Used to temporarily store the existing cameras, - we don't want to call `get_all_video_capture_devices` agains and possibly have a different result + we don't want to call `get_all_video_capture_devices` again + and possibly have a different result """ self.setupUi(self) @@ -145,11 +187,14 @@ def __init__(self, autosplit: "AutoSplit"): # Don't autofocus any particular field self.setFocus() -# region Build the Capture method combobox + # region Build the Capture method combobox # fmt: skip + capture_method_values = CAPTURE_METHODS.values() self.__set_all_capture_devices() - # TODO: Word-wrapping works, but there's lots of extra padding to the right. Raise issue upstream + # TODO: Word-wrapping works, but there's lots of extra padding to the right. + # Raise issue upstream + # # list_view = QtWidgets.QListView() # list_view.setWordWrap(True) # list_view.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarPolicy.ScrollBarAlwaysOff) @@ -157,16 +202,15 @@ def __init__(self, autosplit: "AutoSplit"): # self.capture_method_combobox.setView(list_view) self.capture_method_combobox.addItems([ - f"- {method.name} ({method.short_description})" - for method in capture_method_values + f"- {method.name} ({method.short_description})" for method in capture_method_values ]) self.capture_method_combobox.setToolTip( - "\n\n".join([ - f"{method.name} :\n{method.description}" - for method in capture_method_values - ]), + "\n\n".join( + f"{method.name} :\n{method.description}" for method in capture_method_values + ) + + LINUX_SCREENSHOT_SUPPORT ) -# endregion + # endregion self.__setup_bindings() @@ -177,12 +221,12 @@ def __update_default_threshold(self, value: Any): self._autosplit_ref.table_current_image_threshold_label.setText( decimal(self._autosplit_ref.split_image.get_similarity_threshold(self._autosplit_ref)) if self._autosplit_ref.split_image - else "-", + else "-" ) self._autosplit_ref.table_reset_image_threshold_label.setText( decimal(self._autosplit_ref.reset_image.get_similarity_threshold(self._autosplit_ref)) if self._autosplit_ref.reset_image - else "-", + else "-" ) def __set_value(self, key: str, value: Any): @@ -191,7 +235,10 @@ def __set_value(self, key: str, value: Any): def get_capture_device_index(self, capture_device_id: int): """Returns 0 if the capture_device_id is invalid.""" try: - return [device.device_id for device in self.__video_capture_devices].index(capture_device_id) + return [ + device.device_id # fmt: skip + for device in self.__video_capture_devices + ].index(capture_device_id) except ValueError: return 0 @@ -205,14 +252,18 @@ def __enable_capture_device_if_its_selected_method( self.capture_device_combobox.setEnabled(is_video_capture_device) if is_video_capture_device: self.capture_device_combobox.setCurrentIndex( - self.get_capture_device_index(self._autosplit_ref.settings_dict["capture_device_id"]), + self.get_capture_device_index( + self._autosplit_ref.settings_dict["capture_device_id"] + ) ) else: self.capture_device_combobox.setPlaceholderText('Select "Video Capture Device" above') self.capture_device_combobox.setCurrentIndex(-1) def __capture_method_changed(self): - selected_capture_method = CAPTURE_METHODS.get_method_by_index(self.capture_method_combobox.currentIndex()) + selected_capture_method = CAPTURE_METHODS.get_method_by_index( + self.capture_method_combobox.currentIndex() + ) self.__enable_capture_device_if_its_selected_method(selected_capture_method) change_capture_method(selected_capture_method, self._autosplit_ref) return selected_capture_method @@ -224,7 +275,10 @@ def __capture_device_changed(self): capture_device = self.__video_capture_devices[device_index] self._autosplit_ref.settings_dict["capture_device_name"] = capture_device.name self._autosplit_ref.settings_dict["capture_device_id"] = capture_device.device_id - if self._autosplit_ref.settings_dict["capture_method"] == CaptureMethodEnum.VIDEO_CAPTURE_DEVICE: + if ( + self._autosplit_ref.settings_dict["capture_method"] + == CaptureMethodEnum.VIDEO_CAPTURE_DEVICE + ): # Re-initializes the VideoCaptureDeviceCaptureMethod change_capture_method(CaptureMethodEnum.VIDEO_CAPTURE_DEVICE, self._autosplit_ref) @@ -235,7 +289,7 @@ def __fps_limit_changed(self, value: int): @fire_and_forget def __set_all_capture_devices(self): - self.__video_capture_devices = asyncio.run(get_all_video_capture_devices()) + self.__video_capture_devices = get_all_video_capture_devices() if len(self.__video_capture_devices) > 0: for i in range(self.capture_device_combobox.count()): self.capture_device_combobox.removeItem(i) @@ -251,80 +305,125 @@ def __set_all_capture_devices(self): def __set_readme_link(self): self.custom_image_settings_info_label.setText( - self.custom_image_settings_info_label - .text() - .format(GITHUB_REPOSITORY=GITHUB_REPOSITORY), + self.custom_image_settings_info_label.text().format(GITHUB_REPOSITORY=GITHUB_REPOSITORY) ) # HACK: This is a workaround because custom_image_settings_info_label # simply will not open links with a left click no matter what we tried. self.readme_link_button.clicked.connect( - lambda: webbrowser.open(f"https://github.com/{GITHUB_REPOSITORY}#readme"), + lambda: webbrowser.open(f"https://github.com/{GITHUB_REPOSITORY}#readme") ) self.readme_link_button.setStyleSheet("border: 0px; background-color:rgba(0,0,0,0%);") + if sys.platform == "linux": + geometry = self.readme_link_button.geometry() + # In-button font has different width so "README" doesn't fit -.- + self.readme_link_button.setText("#DOC#") + self.readme_link_button.setGeometry( + QtCore.QRect(116, 220, geometry.width(), geometry.height()) + ) def __select_screenshot_directory(self): - self._autosplit_ref.settings_dict["screenshot_directory"] = QFileDialog.getExistingDirectory( - self, - "Select Screenshots Directory", + self._autosplit_ref.settings_dict["screenshot_directory"] = ( + QFileDialog.getExistingDirectory( + self, + "Select Screenshots Directory", + self._autosplit_ref.settings_dict["screenshot_directory"] + or self._autosplit_ref.settings_dict["split_image_directory"], + ) + ) + self.screenshot_directory_input.setText( self._autosplit_ref.settings_dict["screenshot_directory"] - or self._autosplit_ref.settings_dict["split_image_directory"], ) - self.screenshot_directory_input.setText(self._autosplit_ref.settings_dict["screenshot_directory"]) def __setup_bindings(self): # Hotkey initial values and bindings - def hotkey_connect(hotkey: Hotkey): - return lambda: set_hotkey(self._autosplit_ref, hotkey) - for hotkey in HOTKEYS: hotkey_input: QtWidgets.QLineEdit = getattr(self, f"{hotkey}_input") - set_hotkey_hotkey_button: QtWidgets.QPushButton = getattr(self, f"set_{hotkey}_hotkey_button") + set_hotkey_hotkey_button: QtWidgets.QPushButton = getattr( + self, + f"set_{hotkey}_hotkey_button", + ) hotkey_input.setText(self._autosplit_ref.settings_dict.get(f"{hotkey}_hotkey", "")) - set_hotkey_hotkey_button.clicked.connect(hotkey_connect(hotkey)) # Make it very clear that hotkeys are not used when auto-controlled - if self._autosplit_ref.is_auto_controlled and hotkey != "toggle_auto_reset_image": + if self._autosplit_ref.is_auto_controlled and hotkey not in HOTKEYS_WHEN_AUTOCONTROLLED: set_hotkey_hotkey_button.setEnabled(False) hotkey_input.setEnabled(False) + else: + set_hotkey_hotkey_button.clicked.connect( + partial(set_hotkey, self._autosplit_ref, hotkey=hotkey) + ) -# region Set initial values + # Debug screenshot selection checkboxes initial values and bindings + screenshot_on_setting = self._autosplit_ref.settings_dict["screenshot_on"] + for command in _DEBUG_SCREENSHOT_COMMANDS: + checkbox: QtWidgets.QCheckBox = getattr(self, f"screenshot_on_{command}_checkbox") + + checkbox.setChecked(command in screenshot_on_setting) + + def add_or_del(checked: Literal[0, 2], command: CommandStr = command): + if checked: + screenshot_on_setting.append(command) + else: + screenshot_on_setting.remove(command) + + checkbox.stateChanged.connect(add_or_del) + + # region Set initial values # Capture Settings self.fps_limit_spinbox.setValue(self._autosplit_ref.settings_dict["fps_limit"]) - self.live_capture_region_checkbox.setChecked(self._autosplit_ref.settings_dict["live_capture_region"]) + self.live_capture_region_checkbox.setChecked( + self._autosplit_ref.settings_dict["live_capture_region"] + ) self.capture_method_combobox.setCurrentIndex( - CAPTURE_METHODS.get_index(self._autosplit_ref.settings_dict["capture_method"]), + CAPTURE_METHODS.get_index(self._autosplit_ref.settings_dict["capture_method"]) ) # No self.capture_device_combobox.setCurrentIndex # It'll set itself asynchronously in self.__set_all_capture_devices() - self.screenshot_directory_input.setText(self._autosplit_ref.settings_dict["screenshot_directory"]) - self.open_screenshot_checkbox.setChecked(self._autosplit_ref.settings_dict["open_screenshot"]) + self.screenshot_directory_input.setText( + self._autosplit_ref.settings_dict["screenshot_directory"] + ) + self.open_screenshot_checkbox.setChecked( + self._autosplit_ref.settings_dict["open_screenshot"] + ) # Image Settings self.default_comparison_method_combobox.setCurrentIndex( - self._autosplit_ref.settings_dict["default_comparison_method"], + self._autosplit_ref.settings_dict["default_comparison_method"] ) self.default_similarity_threshold_spinbox.setValue( - self._autosplit_ref.settings_dict["default_similarity_threshold"], + self._autosplit_ref.settings_dict["default_similarity_threshold"] + ) + self.default_delay_time_spinbox.setValue( + self._autosplit_ref.settings_dict["default_delay_time"] + ) + self.default_pause_time_spinbox.setValue( + self._autosplit_ref.settings_dict["default_pause_time"] ) - self.default_delay_time_spinbox.setValue(self._autosplit_ref.settings_dict["default_delay_time"]) - self.default_pause_time_spinbox.setValue(self._autosplit_ref.settings_dict["default_pause_time"]) self.loop_splits_checkbox.setChecked(self._autosplit_ref.settings_dict["loop_splits"]) - self.start_also_resets_checkbox.setChecked(self._autosplit_ref.settings_dict["start_also_resets"]) - self.enable_auto_reset_image_checkbox.setChecked(self._autosplit_ref.settings_dict["enable_auto_reset"]) -# endregion -# region Binding + self.start_also_resets_checkbox.setChecked( + self._autosplit_ref.settings_dict["start_also_resets"] + ) + self.enable_auto_reset_image_checkbox.setChecked( + self._autosplit_ref.settings_dict["enable_auto_reset"] + ) + # endregion + + # region Binding # Capture Settings self.fps_limit_spinbox.valueChanged.connect(self.__fps_limit_changed) self.live_capture_region_checkbox.stateChanged.connect( - lambda: self.__set_value("live_capture_region", self.live_capture_region_checkbox.isChecked()), + lambda: self.__set_value( + "live_capture_region", + self.live_capture_region_checkbox.isChecked(), + ) ) self.capture_method_combobox.currentIndexChanged.connect( - lambda: self.__set_value("capture_method", self.__capture_method_changed()), + lambda: self.__set_value("capture_method", self.__capture_method_changed()) ) self.capture_device_combobox.currentIndexChanged.connect(self.__capture_device_changed) self.screenshot_directory_browse_button.clicked.connect(self.__select_screenshot_directory) self.open_screenshot_checkbox.stateChanged.connect( - lambda: self.__set_value("open_screenshot", self.open_screenshot_checkbox.isChecked()), + lambda: self.__set_value("open_screenshot", self.open_screenshot_checkbox.isChecked()) ) # Image Settings @@ -332,27 +431,35 @@ def hotkey_connect(hotkey: Hotkey): lambda: self.__set_value( "default_comparison_method", self.default_comparison_method_combobox.currentIndex(), - ), + ) ) self.default_similarity_threshold_spinbox.valueChanged.connect( - lambda: self.__update_default_threshold(self.default_similarity_threshold_spinbox.value()), + lambda: self.__update_default_threshold( + self.default_similarity_threshold_spinbox.value() + ) ) self.default_delay_time_spinbox.valueChanged.connect( - lambda: self.__set_value("default_delay_time", self.default_delay_time_spinbox.value()), + lambda: self.__set_value("default_delay_time", self.default_delay_time_spinbox.value()) ) self.default_pause_time_spinbox.valueChanged.connect( - lambda: self.__set_value("default_pause_time", self.default_pause_time_spinbox.value()), + lambda: self.__set_value("default_pause_time", self.default_pause_time_spinbox.value()) ) self.loop_splits_checkbox.stateChanged.connect( - lambda: self.__set_value("loop_splits", self.loop_splits_checkbox.isChecked()), + lambda: self.__set_value("loop_splits", self.loop_splits_checkbox.isChecked()) ) self.start_also_resets_checkbox.stateChanged.connect( - lambda: self.__set_value("start_also_resets", self.start_also_resets_checkbox.isChecked()), + lambda: self.__set_value( + "start_also_resets", + self.start_also_resets_checkbox.isChecked(), + ) ) self.enable_auto_reset_image_checkbox.stateChanged.connect( - lambda: self.__set_value("enable_auto_reset", self.enable_auto_reset_image_checkbox.isChecked()), + lambda: self.__set_value( + "enable_auto_reset", + self.enable_auto_reset_image_checkbox.isChecked(), + ) ) -# endregion + # endregion def open_settings(autosplit: "AutoSplit"): @@ -371,16 +478,22 @@ def get_default_settings_from_ui(autosplit: "AutoSplit"): "skip_split_hotkey": default_settings_dialog.skip_split_input.text(), "pause_hotkey": default_settings_dialog.pause_input.text(), "screenshot_hotkey": default_settings_dialog.screenshot_input.text(), - "toggle_auto_reset_image_hotkey": default_settings_dialog.toggle_auto_reset_image_input.text(), + "toggle_auto_reset_image_hotkey": ( + default_settings_dialog.toggle_auto_reset_image_input.text() + ), "fps_limit": default_settings_dialog.fps_limit_spinbox.value(), "live_capture_region": default_settings_dialog.live_capture_region_checkbox.isChecked(), "capture_method": CAPTURE_METHODS.get_method_by_index( - default_settings_dialog.capture_method_combobox.currentIndex(), + default_settings_dialog.capture_method_combobox.currentIndex() ), "capture_device_id": default_settings_dialog.capture_device_combobox.currentIndex(), "capture_device_name": "", - "default_comparison_method": default_settings_dialog.default_comparison_method_combobox.currentIndex(), - "default_similarity_threshold": default_settings_dialog.default_similarity_threshold_spinbox.value(), + "default_comparison_method": ( + default_settings_dialog.default_comparison_method_combobox.currentIndex() + ), + "default_similarity_threshold": ( + default_settings_dialog.default_similarity_threshold_spinbox.value() + ), "default_delay_time": default_settings_dialog.default_delay_time_spinbox.value(), "default_pause_time": default_settings_dialog.default_pause_time_spinbox.value(), "loop_splits": default_settings_dialog.loop_splits_checkbox.isChecked(), @@ -389,6 +502,10 @@ def get_default_settings_from_ui(autosplit: "AutoSplit"): "split_image_directory": autosplit.split_image_folder_input.text(), "screenshot_directory": default_settings_dialog.screenshot_directory_input.text(), "open_screenshot": default_settings_dialog.open_screenshot_checkbox.isChecked(), + "screenshot_on": [ + getattr(default_settings_dialog, f"screenshot_on_{command}_checkbox").isChecked() + for command in _DEBUG_SCREENSHOT_COMMANDS + ], "captured_window_title": "", "capture_region": { "x": autosplit.x_spinbox.value(), diff --git a/src/region_selection.py b/src/region_selection.py index 34a61b22..b21e3fc5 100644 --- a/src/region_selection.py +++ b/src/region_selection.py @@ -1,20 +1,14 @@ -import os +import sys from math import ceil -from typing import TYPE_CHECKING, cast +from typing import TYPE_CHECKING import cv2 import numpy as np -import win32api -import win32gui -from cv2.typing import MatLike +from cv2.typing import MatLike, Point from PySide6 import QtCore, QtGui, QtWidgets from PySide6.QtTest import QTest from pywinctl import getTopWindowAt from typing_extensions import override -from win32con import SM_CXVIRTUALSCREEN, SM_CYVIRTUALSCREEN, SM_XVIRTUALSCREEN, SM_YVIRTUALSCREEN -from winsdk._winrt import initialize_with_window -from winsdk.windows.foundation import AsyncStatus, IAsyncOperation -from winsdk.windows.graphics.capture import GraphicsCaptureItem, GraphicsCapturePicker import error_messages from capture_method import Region @@ -24,16 +18,31 @@ ImageShape, auto_split_directory, get_window_bounds, + imread, is_valid_hwnd, is_valid_image, ) +if sys.platform == "win32": + import win32api + import win32gui + from win32con import ( + SM_CXVIRTUALSCREEN, + SM_CYVIRTUALSCREEN, + SM_XVIRTUALSCREEN, + SM_YVIRTUALSCREEN, + ) + +if sys.platform == "linux": + from Xlib.display import Display + if TYPE_CHECKING: from AutoSplit import AutoSplit +GNOME_DESKTOP_ICONS_EXTENSION = "@!0,0;BDHF" ALIGN_REGION_THRESHOLD = 0.9 BORDER_WIDTH = 2 -SUPPORTED_IMREAD_FORMATS = [ +SUPPORTED_IMREAD_FORMATS = ( ("Windows bitmaps", "*.bmp *.dib"), ("JPEG files", "*.jpeg *.jpg *.jpe"), ("JPEG 2000 files", "*.jp2"), @@ -46,41 +55,59 @@ ("TIFF files", "*.tiff *.tif"), ("OpenEXR Image files", "*.exr"), ("Radiance HDR", "*.hdr *.pic"), -] +) """https://docs.opencv.org/4.8.0/d4/da8/group__imgcodecs.html#imread""" IMREAD_EXT_FILTER = ( "All Files (" - + " ".join([f"{extensions}" for _, extensions in SUPPORTED_IMREAD_FORMATS]) + + " ".join(f"{extensions}" for _, extensions in SUPPORTED_IMREAD_FORMATS) + ");;" - + ";;".join([f"{imread_format} ({extensions})" for imread_format, extensions in SUPPORTED_IMREAD_FORMATS]) + + ";;".join( + f"{imread_format} ({extensions})" for imread_format, extensions in SUPPORTED_IMREAD_FORMATS + ) ) +def get_top_window_at(x: int, y: int): + """Give QWidget time to disappear to avoid Xlib.error.BadDrawable on Linux.""" + if sys.platform == "linux": + # Tested in increments of 10ms on my Pop!_OS 22.04 VM + QTest.qWait(80) + return getTopWindowAt(x, y) + + # TODO: For later as a different picker option -def __select_graphics_item(autosplit: "AutoSplit"): # pyright: ignore [reportUnusedFunction] - """Uses the built-in GraphicsCapturePicker to select the Window.""" - - def callback(async_operation: IAsyncOperation[GraphicsCaptureItem], async_status: AsyncStatus): - try: - if async_status != AsyncStatus.COMPLETED: - return - except SystemError as exception: - # HACK: can happen when closing the GraphicsCapturePicker - if str(exception).endswith("returned a result with an error set"): - return - raise - item = async_operation.get_results() - if not item: - return - autosplit.settings_dict["captured_window_title"] = item.display_name - autosplit.capture_method.reinitialize() - - picker = GraphicsCapturePicker() - initialize_with_window(picker, int(autosplit.effectiveWinId())) - async_operation = picker.pick_single_item_async() - # None if the selection is canceled - if async_operation: - async_operation.completed = callback +# def __select_graphics_item(autosplit: "AutoSplit"): +# """Uses the built-in GraphicsCapturePicker to select the Window.""" +# if sys.platform != "win32": +# raise OSError +# from winrt._winrt import initialize_with_window +# from winrt.windows.foundation import AsyncStatus, IAsyncOperation +# from winrt.windows.graphics.capture import GraphicsCaptureItem, GraphicsCapturePicker +# +# def callback( +# async_operation: IAsyncOperation[GraphicsCaptureItem], +# async_status: AsyncStatus, +# ): +# try: +# if async_status != AsyncStatus.COMPLETED: +# return +# except SystemError as exception: +# # HACK: can happen when closing the GraphicsCapturePicker +# if str(exception).endswith("returned a result with an error set"): +# return +# raise +# item = async_operation.get_results() +# if not item: +# return +# autosplit.settings_dict["captured_window_title"] = item.display_name +# autosplit.capture_method.reinitialize() +# +# picker = GraphicsCapturePicker() +# initialize_with_window(picker, autosplit.effectiveWinId()) +# async_operation = picker.pick_single_item_async() +# # None if the selection is canceled +# if async_operation: +# async_operation.completed = callback def select_region(autosplit: "AutoSplit"): @@ -96,7 +123,7 @@ def select_region(autosplit: "AutoSplit"): if selection is None: return # No selection done - window = getTopWindowAt(selection["x"], selection["y"]) + window = get_top_window_at(selection["x"], selection["y"]) if not window: error_messages.region() return @@ -110,10 +137,16 @@ def select_region(autosplit: "AutoSplit"): autosplit.settings_dict["captured_window_title"] = window_text autosplit.capture_method.reinitialize() - left_bounds, top_bounds, *_ = get_window_bounds(hwnd) - window_x, window_y, *_ = win32gui.GetWindowRect(hwnd) - offset_x = window_x + left_bounds - offset_y = window_y + top_bounds + if sys.platform == "win32": + left_bounds, top_bounds, *_ = get_window_bounds(hwnd) + window_x, window_y, *_ = win32gui.GetWindowRect(hwnd) + offset_x = window_x + left_bounds + offset_y = window_y + top_bounds + else: + data = window._xWin.translate_coords(autosplit.hwnd, 0, 0)._data # pyright:ignore[reportPrivateUsage] # noqa: SLF001 + offset_x = data["x"] + offset_y = data["y"] + __set_region_values( autosplit, x=selection["x"] - offset_x, @@ -136,7 +169,7 @@ def select_window(autosplit: "AutoSplit"): if selection is None: return # No selection done - window = getTopWindowAt(selection["x"], selection["y"]) + window = get_top_window_at(selection["x"], selection["y"]) if not window: error_messages.region() return @@ -150,11 +183,18 @@ def select_window(autosplit: "AutoSplit"): autosplit.settings_dict["captured_window_title"] = window_text autosplit.capture_method.reinitialize() - # Exlude the borders and titlebar from the window selection. To only get the client area. - _, __, window_width, window_height = get_window_bounds(hwnd) - _, __, client_width, client_height = win32gui.GetClientRect(hwnd) - border_width = ceil((window_width - client_width) / 2) - titlebar_with_border_height = window_height - client_height - border_width + if sys.platform == "win32": + # Exlude the borders and titlebar from the window selection. To only get the client area. + _, __, window_width, window_height = get_window_bounds(hwnd) + _, __, client_width, client_height = win32gui.GetClientRect(hwnd) + border_width = ceil((window_width - client_width) / 2) + titlebar_with_border_height = window_height - client_height - border_width + else: + data = window._xWin.get_geometry()._data # pyright:ignore[reportPrivateUsage] # noqa: SLF001 + client_height = data["height"] + client_width = data["width"] + border_width = data["border_width"] + titlebar_with_border_height = border_width __set_region_values( autosplit, @@ -182,7 +222,7 @@ def align_region(autosplit: "AutoSplit"): if not template_filename: return - template = cv2.imread(template_filename, cv2.IMREAD_UNCHANGED) + template = imread(template_filename, cv2.IMREAD_UNCHANGED) # Add alpha channel to template if it's missing. if template.shape[ImageShape.Channels] == BGR_CHANNEL_COUNT: template = cv2.cvtColor(template, cv2.COLOR_BGR2BGRA) @@ -208,7 +248,8 @@ def align_region(autosplit: "AutoSplit"): error_messages.alignment_not_matched() return - # The new region can be defined by using the min_loc point and the best_height and best_width of the template. + # The new region can be defined by using the min_loc point + # and the best_height and best_width of the template. __set_region_values( autosplit, x=autosplit.settings_dict["capture_region"]["x"] + best_loc[0], @@ -242,7 +283,7 @@ def __test_alignment(capture: MatLike, template: MatLike): best_match = 0.0 best_height = 0 best_width = 0 - best_loc = (0, 0) + best_loc: Point = (0, 0) # This tests 50 images scaled from 20% to 300% of the original template size for scale in np.linspace(0.2, 3, num=56): @@ -272,32 +313,25 @@ def __test_alignment(capture: MatLike, template: MatLike): return best_match, best_height, best_width, best_loc -def validate_before_parsing(autosplit: "AutoSplit", show_error: bool = True, check_empty_directory: bool = True): - error = None - if not autosplit.settings_dict["split_image_directory"]: - error = error_messages.split_image_directory - elif not os.path.isdir(autosplit.settings_dict["split_image_directory"]): - error = error_messages.split_image_directory_not_found - elif check_empty_directory and not os.listdir(autosplit.settings_dict["split_image_directory"]): - error = error_messages.split_image_directory_empty - elif not autosplit.capture_method.check_selected_region_exists(): - error = error_messages.region - if error and show_error: - error() - return not error - - class BaseSelectWidget(QtWidgets.QWidget): selection: Region | None = None def __init__(self): super().__init__() - # We need to pull the monitor information to correctly draw the geometry covering all portions - # of the user's screen. These parameters create the bounding box with left, top, width, and height - x = cast(int, win32api.GetSystemMetrics(SM_XVIRTUALSCREEN)) - y = cast(int, win32api.GetSystemMetrics(SM_YVIRTUALSCREEN)) - width = cast(int, win32api.GetSystemMetrics(SM_CXVIRTUALSCREEN)) - height = cast(int, win32api.GetSystemMetrics(SM_CYVIRTUALSCREEN)) + # We need to pull the monitor information to correctly draw + # the geometry covering all portions of the user's screen. + # These parameters create the bounding box with left, top, width, and height + if sys.platform == "win32": + x = win32api.GetSystemMetrics(SM_XVIRTUALSCREEN) + y = win32api.GetSystemMetrics(SM_YVIRTUALSCREEN) + width = win32api.GetSystemMetrics(SM_CXVIRTUALSCREEN) + height = win32api.GetSystemMetrics(SM_CYVIRTUALSCREEN) + else: + data = Display().screen().root.get_geometry()._data # noqa: SLF001 + x = data["x"] + y = data["y"] + width = data["width"] + height = data["height"] self.setGeometry(x, y, width, height) self.setFixedSize(width, height) # Prevent move/resizing on Linux self.setWindowTitle(type(self).__name__) @@ -358,7 +392,8 @@ def mouseMoveEvent(self, event: QtGui.QMouseEvent): def mouseReleaseEvent(self, event: QtGui.QMouseEvent): if self.__begin != self.__end: # The coordinates are pulled relative to the top left of the set geometry, - # so the added virtual screen offsets convert them back to the virtual screen coordinates + # so the added virtual screen offsets convert them back to the virtual + # screen coordinates left = min(self.__begin.x(), self.__end.x()) + self.geometry().x() top = min(self.__begin.y(), self.__end.y()) + self.geometry().y() right = max(self.__begin.x(), self.__end.x()) + self.geometry().x() diff --git a/src/split_parser.py b/src/split_parser.py index acdf6dd7..164a66a2 100644 --- a/src/split_parser.py +++ b/src/split_parser.py @@ -1,20 +1,29 @@ import os +import sys from collections.abc import Callable +from functools import partial +from stat import UF_HIDDEN from typing import TYPE_CHECKING, TypeVar import error_messages from AutoSplitImage import RESET_KEYWORD, START_KEYWORD, AutoSplitImage, ImageType from utils import is_valid_image +if sys.platform == "win32": + from stat import FILE_ATTRIBUTE_HIDDEN, FILE_ATTRIBUTE_SYSTEM + + if TYPE_CHECKING: + from _typeshed import StrPath + from AutoSplit import AutoSplit -[ +( DUMMY_FLAG, BELOW_FLAG, PAUSE_FLAG, *_, -] = [1 << i for i in range(31)] # 32 bits of flags +) = tuple(1 << i for i in range(31)) # 32 bits of flags T = TypeVar("T", str, int, float) @@ -40,8 +49,9 @@ def __value_from_filename( def threshold_from_filename(filename: str): """ - Retrieve the threshold from the filename, if there is no threshold or the threshold - doesn't meet the requirements of being [0, 1], then None is returned. + Retrieve the threshold from the filename. + If there is no threshold or the threshold doesn't meet the requirements of being [0, 1], + then None is returned. @param filename: String containing the file's name @return: A valid threshold, if not then None @@ -56,8 +66,9 @@ def threshold_from_filename(filename: str): def pause_from_filename(filename: str): """ - Retrieve the pause time from the filename, if there is no pause time or the pause time - isn't a valid positive number or 0, then None is returned. + Retrieve the pause time from the filename, + if there is no pause time or the pause time isn't a valid positive number or 0, + then None is returned. @param filename: String containing the file's name @return: A valid pause time, if not then None @@ -72,8 +83,9 @@ def pause_from_filename(filename: str): def delay_time_from_filename(filename: str): """ - Retrieve the delay time from the filename, if there is no delay time or the delay time - isn't a valid positive number or 0 number, then None is returned. + Retrieve the delay time from the filename. + If there is no delay time or the delay time isn't a valid positive number or 0 number, + then None is returned. @param filename: String containing the file's name @return: A valid delay time, if not then none @@ -88,8 +100,8 @@ def delay_time_from_filename(filename: str): def loop_from_filename(filename: str): """ - Retrieve the number of loops from filename, if there is no loop number or the loop number isn't valid, - then 1 is returned. + Retrieve the number of loops from filename. + If there is no loop number or the loop number isn't valid, then 1 is returned. @param filename: String containing the file's name @return: A valid loop number, if not then 1 @@ -104,8 +116,8 @@ def loop_from_filename(filename: str): def comparison_method_from_filename(filename: str): """ - Retrieve the comparison method index from filename, if there is no comparison method or the index isn't valid, - then None is returned. + Retrieve the comparison method index from filename. + If there is no comparison method or the index isn't valid, then None is returned. @param filename: String containing the file's name @return: A valid comparison method index, if not then none @@ -120,7 +132,8 @@ def comparison_method_from_filename(filename: str): def flags_from_filename(filename: str): """ - Retrieve the flags from the filename, if there are no flags then 0 is returned. + Retrieve the flags from the filename. + If there are no flags, then 0 is returned. @param filename: String containing the file's name @return: The flags as an integer, if invalid flags are found it returns 0 @@ -172,14 +185,55 @@ def __pop_image_type(split_image: list[AutoSplitImage], image_type: ImageType): return None -def parse_and_validate_images(autosplit: "AutoSplit"): - # Get split images - all_images = [ - AutoSplitImage(os.path.join(autosplit.settings_dict["split_image_directory"], image_name)) - for image_name - in os.listdir(autosplit.settings_dict["split_image_directory"]) - ] +def validate_before_parsing(autosplit: "AutoSplit", *, show_error: bool = True): + error = None + split_image_directory = autosplit.settings_dict["split_image_directory"] + if not split_image_directory: + error = error_messages.split_image_directory + elif not os.path.isdir(split_image_directory): + error = partial(error_messages.invalid_directory, split_image_directory) + elif not os.listdir(split_image_directory): + error = error_messages.split_image_directory_empty + elif not autosplit.capture_method.check_selected_region_exists(): + error = error_messages.region + if error and show_error: + error() + return not error + + +def is_user_file(path: "StrPath"): + """Returns False for hidden files, system files and folders.""" + if os.path.isdir(path) or os.path.basename(path).startswith("."): + return False + stat_result = os.stat(path) + if sys.platform == "win32": + return not ( + (stat_result.st_file_attributes & FILE_ATTRIBUTE_SYSTEM) + | (stat_result.st_file_attributes & FILE_ATTRIBUTE_HIDDEN) + ) + # UF_HIDDEN is present on regular Windows files + return not stat_result.st_mode & UF_HIDDEN + +def __get_images_from_directory(directory: "StrPath"): + """ + Returns a list of AutoSplitImage parsed from a directory. + Hidden files, system files and folders are silently ignored. + """ + file_paths = ( + os.path.join(directory, filename) # format: skip + for filename in os.listdir(directory) + ) + filtered_image_paths = ( + image_path # format: skip + for image_path in file_paths + if is_user_file(image_path) + ) + return [AutoSplitImage(image_path) for image_path in filtered_image_paths] + + +def parse_and_validate_images(autosplit: "AutoSplit"): + all_images = __get_images_from_directory(autosplit.settings_dict["split_image_directory"]) # Find non-split images and then remove them from the list start_image = __pop_image_type(all_images, ImageType.START) reset_image = __pop_image_type(all_images, ImageType.RESET) @@ -187,7 +241,8 @@ def parse_and_validate_images(autosplit: "AutoSplit"): error_message: Callable[[], object] | None = None - # If there is no start hotkey set but a Start Image is present, and is not auto controlled, throw an error. + # If there is no start hotkey set but a Start Image is present, + # and is not auto controlled, throw an error. if ( start_image and not autosplit.settings_dict["split_hotkey"] @@ -195,7 +250,8 @@ def parse_and_validate_images(autosplit: "AutoSplit"): ): error_message = error_messages.load_start_image - # If there is no reset hotkey set but a Reset Image is present, and is not auto controlled, throw an error. + # If there is no reset hotkey set but a Reset Image is present, + # and is not auto controlled, throw an error. elif ( reset_image and not autosplit.settings_dict["reset_hotkey"] @@ -208,12 +264,8 @@ def parse_and_validate_images(autosplit: "AutoSplit"): else: for image in split_images: # Test for image without transparency - if not is_valid_image(image.byte_array): - - def image_validity(filename: str): - return lambda: error_messages.image_validity(filename) - - error_message = image_validity(image.filename) + if not image.is_ocr and not is_valid_image(image.byte_array): + error_message = partial(error_messages.image_validity, image.filename) break # error out if there is a {p} flag but no pause hotkey set and is not auto controlled. diff --git a/src/user_profile.py b/src/user_profile.py index f874e04e..cb27ec40 100644 --- a/src/user_profile.py +++ b/src/user_profile.py @@ -1,15 +1,17 @@ import os +import tomllib from copy import deepcopy from typing import TYPE_CHECKING, TypedDict, cast -import toml +import tomli_w from PySide6 import QtCore, QtWidgets from typing_extensions import deprecated, override import error_messages from capture_method import CAPTURE_METHODS, CaptureMethodEnum, Region, change_capture_method from gen import design -from hotkeys import HOTKEYS, remove_all_hotkeys, set_hotkey +from hotkeys import HOTKEYS, CommandStr, Hotkey, remove_all_hotkeys, set_hotkey +from menu_bar import open_settings from utils import auto_split_directory if TYPE_CHECKING: @@ -39,12 +41,14 @@ class UserProfileDict(TypedDict): split_image_directory: str screenshot_directory: str open_screenshot: bool + screenshot_on: list[CommandStr] captured_window_title: str capture_region: Region @override # pyright: ignore @deprecated("Use `copy.deepcopy` instead") - def copy(): return super().copy() + def copy(): + return super().copy() DEFAULT_PROFILE = UserProfileDict( @@ -70,6 +74,7 @@ def copy(): return super().copy() split_image_directory="", screenshot_directory="", open_screenshot=True, + screenshot_on=[], captured_window_title="", capture_region=Region(x=0, y=0, width=1, height=1), ) @@ -108,8 +113,8 @@ def save_settings_as(autosplit: "AutoSplit"): def __save_settings_to_file(autosplit: "AutoSplit", save_settings_file_path: str): # Save settings to a .toml file - with open(save_settings_file_path, "w", encoding="utf-8") as file: - toml.dump(autosplit.settings_dict, file) + with open(save_settings_file_path, "wb") as file: + tomli_w.dump(autosplit.settings_dict, file) autosplit.last_saved_settings = deepcopy(autosplit.settings_dict) autosplit.last_successfully_loaded_settings_file_path = save_settings_file_path return save_settings_file_path @@ -119,13 +124,23 @@ def __load_settings_from_file(autosplit: "AutoSplit", load_settings_file_path: s if load_settings_file_path.endswith(".pkl"): autosplit.show_error_signal.emit(error_messages.old_version_settings_file) return False + + # Allow seamlessly reloading the entire settings widget + settings_widget_was_open = False + settings_widget = cast(QtWidgets.QWidget | None, autosplit.SettingsWidget) + if settings_widget: + settings_widget_was_open = settings_widget.isVisible() + settings_widget.close() + try: - with open(load_settings_file_path, encoding="utf-8") as file: + with open(load_settings_file_path, mode="rb") as file: # Casting here just so we can build an actual UserProfileDict once we're done validating - # Fallback to default settings if some are missing from the file. This happens when new settings are added. - loaded_settings = DEFAULT_PROFILE | cast(UserProfileDict, toml.load(file)) + # Fallback to default settings if some are missing from the file. + # This happens when new settings are added. + loaded_settings = DEFAULT_PROFILE | cast(UserProfileDict, tomllib.load(file)) # TODO: Data Validation / fallbacks ? + loaded_settings["screenshot_on"] = list(set(loaded_settings["screenshot_on"])) autosplit.settings_dict = UserProfileDict(**loaded_settings) autosplit.last_saved_settings = deepcopy(autosplit.settings_dict) @@ -134,27 +149,34 @@ def __load_settings_from_file(autosplit: "AutoSplit", load_settings_file_path: s autosplit.width_spinbox.setValue(autosplit.settings_dict["capture_region"]["width"]) autosplit.height_spinbox.setValue(autosplit.settings_dict["capture_region"]["height"]) autosplit.split_image_folder_input.setText(autosplit.settings_dict["split_image_directory"]) - except (FileNotFoundError, MemoryError, TypeError, toml.TomlDecodeError): + except (FileNotFoundError, MemoryError, TypeError, tomllib.TOMLDecodeError): autosplit.show_error_signal.emit(error_messages.invalid_settings) return False remove_all_hotkeys() if not autosplit.is_auto_controlled: - for hotkey, hotkey_name in [(hotkey, f"{hotkey}_hotkey") for hotkey in HOTKEYS]: + for hotkey, hotkey_name in ((hotkey, f"{hotkey}_hotkey") for hotkey in HOTKEYS): hotkey_value = autosplit.settings_dict.get(hotkey_name) if hotkey_value: - set_hotkey(autosplit, hotkey, hotkey_value) + # cast caused by a regression in pyright 1.1.365 + set_hotkey(autosplit, cast(Hotkey, hotkey), hotkey_value) - change_capture_method(cast(CaptureMethodEnum, autosplit.settings_dict["capture_method"]), autosplit) + change_capture_method( + cast(CaptureMethodEnum, autosplit.settings_dict["capture_method"]), + autosplit, + ) if autosplit.settings_dict["capture_method"] != CaptureMethodEnum.VIDEO_CAPTURE_DEVICE: autosplit.capture_method.recover_window(autosplit.settings_dict["captured_window_title"]) if not autosplit.capture_method.check_selected_region_exists(): autosplit.live_image.setText( "Reload settings after opening" + f"\n{autosplit.settings_dict['captured_window_title']!r}" - + "\nto automatically load Capture Region", + + "\nto automatically load Capture Region" ) + if settings_widget_was_open: + open_settings(autosplit) + return True @@ -168,19 +190,22 @@ def load_settings(autosplit: "AutoSplit", from_path: str = ""): "TOML (*.toml)", )[0] ) - if not (load_settings_file_path and __load_settings_from_file(autosplit, load_settings_file_path)): + if not ( + load_settings_file_path # fmt: skip + and __load_settings_from_file(autosplit, load_settings_file_path) + ): return autosplit.last_successfully_loaded_settings_file_path = load_settings_file_path # TODO: Should this check be in `__load_start_image` ? if not autosplit.is_running: - autosplit.load_start_image_signal.emit(False, True) + autosplit.reload_start_image_signal.emit(False, True) def load_settings_on_open(autosplit: "AutoSplit"): settings_files = [ - file for file - in os.listdir(auto_split_directory) + file # fmt: skip + for file in os.listdir(auto_split_directory) if file.endswith(".toml") ] @@ -218,7 +243,7 @@ def load_check_for_updates_on_open(autosplit: "AutoSplit"): autosplit.action_check_for_updates_on_open.setChecked(value) -def set_check_for_updates_on_open(design_window: design.Ui_MainWindow, value: bool): +def set_check_for_updates_on_open(design_window: design.Ui_MainWindow, value: bool): # noqa: FBT001 """Sets the "Check For Updates On Open" QSettings value and the checkbox state.""" design_window.action_check_for_updates_on_open.setChecked(value) QtCore.QSettings( diff --git a/src/utils.py b/src/utils.py index d62f855b..3fd57e47 100644 --- a/src/utils.py +++ b/src/utils.py @@ -1,33 +1,67 @@ import asyncio -import ctypes -import ctypes.wintypes import os +import shutil +import subprocess # noqa: S404 import sys -from collections.abc import Callable, Iterable +from collections.abc import Callable, Iterable, Sequence from enum import IntEnum +from functools import partial from itertools import chain from platform import version from threading import Thread -from typing import TYPE_CHECKING, Any, TypeGuard, TypeVar +from typing import TYPE_CHECKING, Any, TypeAlias, TypedDict, TypeGuard, TypeVar -import win32gui -import win32ui +import cv2 +import numpy as np from cv2.typing import MatLike -from winsdk.windows.ai.machinelearning import LearningModelDevice, LearningModelDeviceKind -from winsdk.windows.media.capture import MediaCapture from gen.build_vars import AUTOSPLIT_BUILD_NUMBER, AUTOSPLIT_GITHUB_REPOSITORY +if sys.platform == "win32": + import ctypes + import ctypes.wintypes + from _ctypes import COMError # noqa: PLC2701 # comtypes is untyped + + import win32gui + import win32ui + from pygrabber.dshow_graph import FilterGraph + + STARTUPINFO: TypeAlias = subprocess.STARTUPINFO +else: + STARTUPINFO: TypeAlias = None + +if sys.platform == "linux": + import fcntl + + from pyscreeze import RUNNING_WAYLAND as RUNNING_WAYLAND # noqa: PLC0414 + +else: + RUNNING_WAYLAND = False + + if TYPE_CHECKING: # Source does not exist, keep this under TYPE_CHECKING from _win32typing import PyCDC # pyright: ignore[reportMissingModuleSource] T = TypeVar("T") -ONE_SECOND = 1000 -"""1000 milliseconds in 1 second""" + +def find_tesseract_path(): + search_path = os.environ.get("PATH", os.defpath) + if sys.platform == "win32": + search_path += r";C:\Program Files\Tesseract-OCR;C:\Program Files (x86)\Tesseract-OCR" + return shutil.which(TESSERACT_EXE, path=search_path) + + +TESSERACT_EXE = "tesseract" +TESSERACT_PATH = find_tesseract_path() +"""The path to execute tesseract. `None` if it can't be found.""" +TESSERACT_CMD = (TESSERACT_PATH or TESSERACT_EXE, "-", "-", "--oem", "1", "--psm", "6") + DWMWA_EXTENDED_FRAME_BOUNDS = 9 MAXBYTE = 255 +ONE_SECOND = 1000 +"""1000 milliseconds in 1 second""" BGR_CHANNEL_COUNT = 3 """How many channels in a BGR image""" BGRA_CHANNEL_COUNT = 4 @@ -47,6 +81,14 @@ class ColorChannel(IntEnum): Alpha = 3 +class SubprocessKWArgs(TypedDict): + stdin: int + stdout: int + stderr: int + startupinfo: "STARTUPINFO | None" + env: os._Environ[str] | None # pyright: ignore[reportPrivateUsage] + + def decimal(value: float): # Using ljust instead of :2f because of python float rounding errors return f"{int(value * 100) / 100}".ljust(4, "0") @@ -67,7 +109,10 @@ def is_valid_image(image: MatLike | None) -> TypeGuard[MatLike]: def is_valid_hwnd(hwnd: int): - """Validate the hwnd points to a valid window and not the desktop or whatever window obtained with `""`.""" + """ + Validate the hwnd points to a valid window + and not the desktop or whatever window obtained with `""`. + """ if not hwnd: return False if sys.platform == "win32": @@ -81,6 +126,8 @@ def first(iterable: Iterable[T]) -> T: def try_delete_dc(dc: "PyCDC"): + if sys.platform != "win32": + raise OSError try: dc.DeleteDC() except win32ui.error: @@ -88,6 +135,9 @@ def try_delete_dc(dc: "PyCDC"): def get_window_bounds(hwnd: int) -> tuple[int, int, int, int]: + if sys.platform != "win32": + raise OSError + extended_frame_bounds = ctypes.wintypes.RECT() ctypes.windll.dwmapi.DwmGetWindowAttribute( hwnd, @@ -104,8 +154,37 @@ def get_window_bounds(hwnd: int) -> tuple[int, int, int, int]: return window_left_bounds, window_top_bounds, window_width, window_height +# Note: maybe reorganize capture_method module to have +# different helper modules and a methods submodule +def get_input_device_resolution(index: int) -> tuple[int, int] | None: + if sys.platform != "win32": + return (0, 0) + filter_graph = FilterGraph() + try: + filter_graph.add_video_input_device(index) + # This can happen with virtual cameras throwing errors. + # For example since OBS 29.1 updated FFMPEG breaking VirtualCam 3.0 + # https://github.com/Toufool/AutoSplit/issues/238 + except COMError: + return None + + try: + resolution = filter_graph.get_input_device().get_current_format() + # For unknown reasons, some devices can raise "ValueError: NULL pointer access". + # For instance, Oh_DeeR's AVerMedia HD Capture C985 Bus 12 + except ValueError: + return None + finally: + filter_graph.remove_filters() + return resolution + + def open_file(file_path: str | bytes | os.PathLike[str] | os.PathLike[bytes]): - os.startfile(file_path) # noqa: S606 + if sys.platform == "win32": + os.startfile(file_path) # noqa: S606 + else: + opener = "xdg-open" if sys.platform == "linux" else "open" + subprocess.call([opener, file_path]) # noqa: S603 def get_or_create_eventloop(): @@ -117,41 +196,26 @@ def get_or_create_eventloop(): return asyncio.get_event_loop() -def get_direct3d_device(): - # Note: Must create in the same thread (can't use a global) otherwise when ran from LiveSplit it will raise: - # OSError: The application called an interface that was marshalled for a different thread - media_capture = MediaCapture() - - async def init_mediacapture(): - await media_capture.initialize_async() - - asyncio.run(init_mediacapture()) - direct_3d_device = media_capture.media_capture_settings and media_capture.media_capture_settings.direct3_d11_device - if not direct_3d_device: - try: - # May be problematic? https://github.com/pywinrt/python-winsdk/issues/11#issuecomment-1315345318 - direct_3d_device = LearningModelDevice(LearningModelDeviceKind.DIRECT_X_HIGH_PERFORMANCE).direct3_d11_device - # TODO: Unknown potential error, I don't have an older Win10 machine to test. - except BaseException: # noqa: S110,BLE001 - pass - if not direct_3d_device: - raise OSError("Unable to initialize a Direct3D Device.") - return direct_3d_device - - -def try_get_direct3d_device(): +def try_input_device_access(): + """Same as `make_uinput` in `keyboard/_nixcommon.py`.""" + if sys.platform != "linux": + return False try: - return get_direct3d_device() + UI_SET_EVBIT = 0x40045564 # noqa: N806 + with open("/dev/uinput", "wb") as uinput: + fcntl.ioctl(uinput, UI_SET_EVBIT) except OSError: - return None + return False + return True def fire_and_forget(func: Callable[..., Any]): """ Runs synchronous function asynchronously without waiting for a response. - Uses threads on Windows because ~~`RuntimeError: There is no current event loop in thread 'MainThread'.`~~ - Because maybe asyncio has issues. Unsure. See alpha.5 and https://github.com/Avasam/AutoSplit/issues/36 + Uses threads on Windows because + ~~`RuntimeError: There is no current event loop in thread 'MainThread'`~~ + maybe asyncio has issues. Unsure. See alpha.5 and https://github.com/Avasam/AutoSplit/issues/36 Uses asyncio on Linux because of a `Segmentation fault (core dumped)` """ @@ -161,7 +225,7 @@ def wrapped(*args: Any, **kwargs: Any): thread = Thread(target=func, args=args, kwargs=kwargs) thread.start() return thread - return get_or_create_eventloop().run_in_executor(None, func, *args, *kwargs) + return get_or_create_eventloop().run_in_executor(None, partial(func, *args, **kwargs)) return wrapped @@ -170,6 +234,73 @@ def flatten(nested_iterable: Iterable[Iterable[T]]) -> chain[T]: return chain.from_iterable(nested_iterable) +def imread(filename: str, flags: int = cv2.IMREAD_COLOR): + return cv2.imdecode(np.fromfile(filename, dtype=np.uint8), flags) + + +def imwrite(filename: str, img: MatLike, params: Sequence[int] = ()): + success, encoded_img = cv2.imencode(os.path.splitext(filename)[1], img, params) + if not success: + raise OSError(f"cv2 could not write to path {filename}") + encoded_img.tofile(filename) + + +def subprocess_kwargs(): + """ + Create a set of arguments which make a ``subprocess.Popen`` (and + variants) call work with or without Pyinstaller, ``--noconsole`` or + not, on Windows and Linux. + + Typical use: + ```python + subprocess.call(["program_to_run", "arg_1"], **subprocess_args()) + ``` + --- + Originally found in https://github.com/madmaze/pytesseract/blob/master/pytesseract/pytesseract.py + Recipe from https://github.com/pyinstaller/pyinstaller/wiki/Recipe-subprocess + which itself is taken from https://github.com/bjones1/enki/blob/master/enki/lib/get_console_output.py + """ + # The following is true only on Windows. + if sys.platform == "win32": + # On Windows, subprocess calls will pop up a command window by default when run from + # Pyinstaller with the ``--noconsole`` option. Avoid this distraction. + startupinfo = STARTUPINFO() + startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW + # https://github.com/madmaze/pytesseract/blob/88839f03590578a10e806a5244704437c9d477da/pytesseract/pytesseract.py#L236 + startupinfo.wShowWindow = subprocess.SW_HIDE + # Windows doesn't search the path by default. Pass it an environment so it will. + env = os.environ + else: + startupinfo = None + env = None + # On Windows, running this from the binary produced by Pyinstaller + # with the ``--noconsole`` option requires redirecting everything + # (stdin, stdout, stderr) to avoid an OSError exception + # "[Error 6] the handle is invalid." + return SubprocessKWArgs( + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.DEVNULL, + startupinfo=startupinfo, + env=env, + ) + + +def run_tesseract(png: bytes): + """ + Executes the tesseract CLI and pipes a PNG encoded image to it. + @param png: PNG encoded image as byte array + @return: The recognized output string from tesseract. + """ + return ( + subprocess.Popen( # noqa: S603 # Only using known literal strings + TESSERACT_CMD, **subprocess_kwargs() + ) + .communicate(input=png)[0] + .decode() + ) + + # Environment specifics WINDOWS_BUILD_NUMBER = int(version().split(".")[-1]) if sys.platform == "win32" else -1 FIRST_WIN_11_BUILD = 22000 @@ -183,5 +314,5 @@ def flatten(nested_iterable: Iterable[Iterable[T]]) -> chain[T]: # Shared strings # Check `excludeBuildNumber` during workflow dispatch build generate a clean version number -AUTOSPLIT_VERSION = "2.2.2" + (f"-{AUTOSPLIT_BUILD_NUMBER}" if AUTOSPLIT_BUILD_NUMBER else "") +AUTOSPLIT_VERSION = "2.3" + (f"-{AUTOSPLIT_BUILD_NUMBER}" if AUTOSPLIT_BUILD_NUMBER else "") GITHUB_REPOSITORY = AUTOSPLIT_GITHUB_REPOSITORY diff --git a/typings/cv2/__init__.pyi b/typings/cv2/__init__.pyi deleted file mode 100644 index 28ef8445..00000000 --- a/typings/cv2/__init__.pyi +++ /dev/null @@ -1,10430 +0,0 @@ -import typing - -import cv2.aruco -import cv2.cuda -import cv2.gapi -import cv2.gapi.streaming -import cv2.typing -import numpy -from cv2 import ( - Error as Error, - aruco as aruco, - barcode as barcode, - cuda as cuda, - detail as detail, - dnn as dnn, - fisheye as fisheye, - flann as flann, - gapi as gapi, - ipp as ipp, - ml as ml, - ocl as ocl, - ogl as ogl, - parallel as parallel, - samples as samples, - segmentation as segmentation, - utils as utils, - videoio_registry as videoio_registry, -) -from cv2.mat_wrapper import Mat as Mat - -# Enumerations -SORT_EVERY_ROW: int -SORT_EVERY_COLUMN: int -SORT_ASCENDING: int -SORT_DESCENDING: int -SortFlags = int -"""One of [SORT_EVERY_ROW, SORT_EVERY_COLUMN, SORT_ASCENDING, SORT_DESCENDING]""" - -COVAR_SCRAMBLED: int -COVAR_NORMAL: int -COVAR_USE_AVG: int -COVAR_SCALE: int -COVAR_ROWS: int -COVAR_COLS: int -CovarFlags = int -"""One of [COVAR_SCRAMBLED, COVAR_NORMAL, COVAR_USE_AVG, COVAR_SCALE, COVAR_ROWS, COVAR_COLS]""" - -KMEANS_RANDOM_CENTERS: int -KMEANS_PP_CENTERS: int -KMEANS_USE_INITIAL_LABELS: int -KmeansFlags = int -"""One of [KMEANS_RANDOM_CENTERS, KMEANS_PP_CENTERS, KMEANS_USE_INITIAL_LABELS]""" - -REDUCE_SUM: int -REDUCE_AVG: int -REDUCE_MAX: int -REDUCE_MIN: int -REDUCE_SUM2: int -ReduceTypes = int -"""One of [REDUCE_SUM, REDUCE_AVG, REDUCE_MAX, REDUCE_MIN, REDUCE_SUM2]""" - -ROTATE_90_CLOCKWISE: int -ROTATE_180: int -ROTATE_90_COUNTERCLOCKWISE: int -RotateFlags = int -"""One of [ROTATE_90_CLOCKWISE, ROTATE_180, ROTATE_90_COUNTERCLOCKWISE]""" - -Param_INT: int -PARAM_INT: int -Param_BOOLEAN: int -PARAM_BOOLEAN: int -Param_REAL: int -PARAM_REAL: int -Param_STRING: int -PARAM_STRING: int -Param_MAT: int -PARAM_MAT: int -Param_MAT_VECTOR: int -PARAM_MAT_VECTOR: int -Param_ALGORITHM: int -PARAM_ALGORITHM: int -Param_FLOAT: int -PARAM_FLOAT: int -Param_UNSIGNED_INT: int -PARAM_UNSIGNED_INT: int -Param_UINT64: int -PARAM_UINT64: int -Param_UCHAR: int -PARAM_UCHAR: int -Param_SCALAR: int -PARAM_SCALAR: int -Param = int -"""One of [Param_INT, PARAM_INT, Param_BOOLEAN, PARAM_BOOLEAN, Param_REAL, PARAM_REAL, Param_STRING, PARAM_STRING, -Param_MAT, PARAM_MAT, Param_MAT_VECTOR, PARAM_MAT_VECTOR, Param_ALGORITHM, PARAM_ALGORITHM, Param_FLOAT, PARAM_FLOAT, -Param_UNSIGNED_INT, PARAM_UNSIGNED_INT, Param_UINT64, PARAM_UINT64, Param_UCHAR, PARAM_UCHAR, Param_SCALAR, -PARAM_SCALAR]""" - -DECOMP_LU: int -DECOMP_SVD: int -DECOMP_EIG: int -DECOMP_CHOLESKY: int -DECOMP_QR: int -DECOMP_NORMAL: int -DecompTypes = int -"""One of [DECOMP_LU, DECOMP_SVD, DECOMP_EIG, DECOMP_CHOLESKY, DECOMP_QR, DECOMP_NORMAL]""" - -NORM_INF: int -NORM_L1: int -NORM_L2: int -NORM_L2SQR: int -NORM_HAMMING: int -NORM_HAMMING2: int -NORM_TYPE_MASK: int -NORM_RELATIVE: int -NORM_MINMAX: int -NormTypes = int -"""One of [NORM_INF, NORM_L1, NORM_L2, NORM_L2SQR, NORM_HAMMING, NORM_HAMMING2, NORM_TYPE_MASK, NORM_RELATIVE, -NORM_MINMAX]""" - -CMP_EQ: int -CMP_GT: int -CMP_GE: int -CMP_LT: int -CMP_LE: int -CMP_NE: int -CmpTypes = int -"""One of [CMP_EQ, CMP_GT, CMP_GE, CMP_LT, CMP_LE, CMP_NE]""" - -GEMM_1_T: int -GEMM_2_T: int -GEMM_3_T: int -GemmFlags = int -"""One of [GEMM_1_T, GEMM_2_T, GEMM_3_T]""" - -DFT_INVERSE: int -DFT_SCALE: int -DFT_ROWS: int -DFT_COMPLEX_OUTPUT: int -DFT_REAL_OUTPUT: int -DFT_COMPLEX_INPUT: int -DCT_INVERSE: int -DCT_ROWS: int -DftFlags = int -"""One of [DFT_INVERSE, DFT_SCALE, DFT_ROWS, DFT_COMPLEX_OUTPUT, DFT_REAL_OUTPUT, DFT_COMPLEX_INPUT, DCT_INVERSE, -DCT_ROWS]""" - -BORDER_CONSTANT: int -BORDER_REPLICATE: int -BORDER_REFLECT: int -BORDER_WRAP: int -BORDER_REFLECT_101: int -BORDER_TRANSPARENT: int -BORDER_REFLECT101: int -BORDER_DEFAULT: int -BORDER_ISOLATED: int -BorderTypes = int -"""One of [BORDER_CONSTANT, BORDER_REPLICATE, BORDER_REFLECT, BORDER_WRAP, BORDER_REFLECT_101, BORDER_TRANSPARENT, -BORDER_REFLECT101, BORDER_DEFAULT, BORDER_ISOLATED]""" - -ACCESS_READ: int -ACCESS_WRITE: int -ACCESS_RW: int -ACCESS_MASK: int -ACCESS_FAST: int -AccessFlag = int -"""One of [ACCESS_READ, ACCESS_WRITE, ACCESS_RW, ACCESS_MASK, ACCESS_FAST]""" - -USAGE_DEFAULT: int -USAGE_ALLOCATE_HOST_MEMORY: int -USAGE_ALLOCATE_DEVICE_MEMORY: int -USAGE_ALLOCATE_SHARED_MEMORY: int -__UMAT_USAGE_FLAGS_32BIT: int -UMatUsageFlags = int -"""One of [USAGE_DEFAULT, USAGE_ALLOCATE_HOST_MEMORY, USAGE_ALLOCATE_DEVICE_MEMORY, USAGE_ALLOCATE_SHARED_MEMORY, -__UMAT_USAGE_FLAGS_32BIT]""" - -SOLVELP_LOST: int -SOLVELP_UNBOUNDED: int -SOLVELP_UNFEASIBLE: int -SOLVELP_SINGLE: int -SOLVELP_MULTI: int -SolveLPResult = int -"""One of [SOLVELP_LOST, SOLVELP_UNBOUNDED, SOLVELP_UNFEASIBLE, SOLVELP_SINGLE, SOLVELP_MULTI]""" - -QUAT_ASSUME_NOT_UNIT: int -QUAT_ASSUME_UNIT: int -QuatAssumeType = int -"""One of [QUAT_ASSUME_NOT_UNIT, QUAT_ASSUME_UNIT]""" - -FILTER_SCHARR: int -SpecialFilter = int -"""One of [FILTER_SCHARR]""" - -MORPH_ERODE: int -MORPH_DILATE: int -MORPH_OPEN: int -MORPH_CLOSE: int -MORPH_GRADIENT: int -MORPH_TOPHAT: int -MORPH_BLACKHAT: int -MORPH_HITMISS: int -MorphTypes = int -"""One of [MORPH_ERODE, MORPH_DILATE, MORPH_OPEN, MORPH_CLOSE, MORPH_GRADIENT, MORPH_TOPHAT, MORPH_BLACKHAT, -MORPH_HITMISS]""" - -MORPH_RECT: int -MORPH_CROSS: int -MORPH_ELLIPSE: int -MorphShapes = int -"""One of [MORPH_RECT, MORPH_CROSS, MORPH_ELLIPSE]""" - -INTER_NEAREST: int -INTER_LINEAR: int -INTER_CUBIC: int -INTER_AREA: int -INTER_LANCZOS4: int -INTER_LINEAR_EXACT: int -INTER_NEAREST_EXACT: int -INTER_MAX: int -WARP_FILL_OUTLIERS: int -WARP_INVERSE_MAP: int -InterpolationFlags = int -"""One of [INTER_NEAREST, INTER_LINEAR, INTER_CUBIC, INTER_AREA, INTER_LANCZOS4, INTER_LINEAR_EXACT, -INTER_NEAREST_EXACT, INTER_MAX, WARP_FILL_OUTLIERS, WARP_INVERSE_MAP]""" - -WARP_POLAR_LINEAR: int -WARP_POLAR_LOG: int -WarpPolarMode = int -"""One of [WARP_POLAR_LINEAR, WARP_POLAR_LOG]""" - -INTER_BITS: int -INTER_BITS2: int -INTER_TAB_SIZE: int -INTER_TAB_SIZE2: int -InterpolationMasks = int -"""One of [INTER_BITS, INTER_BITS2, INTER_TAB_SIZE, INTER_TAB_SIZE2]""" - -DIST_USER: int -DIST_L1: int -DIST_L2: int -DIST_C: int -DIST_L12: int -DIST_FAIR: int -DIST_WELSCH: int -DIST_HUBER: int -DistanceTypes = int -"""One of [DIST_USER, DIST_L1, DIST_L2, DIST_C, DIST_L12, DIST_FAIR, DIST_WELSCH, DIST_HUBER]""" - -DIST_MASK_3: int -DIST_MASK_5: int -DIST_MASK_PRECISE: int -DistanceTransformMasks = int -"""One of [DIST_MASK_3, DIST_MASK_5, DIST_MASK_PRECISE]""" - -THRESH_BINARY: int -THRESH_BINARY_INV: int -THRESH_TRUNC: int -THRESH_TOZERO: int -THRESH_TOZERO_INV: int -THRESH_MASK: int -THRESH_OTSU: int -THRESH_TRIANGLE: int -ThresholdTypes = int -"""One of [THRESH_BINARY, THRESH_BINARY_INV, THRESH_TRUNC, THRESH_TOZERO, THRESH_TOZERO_INV, THRESH_MASK, THRESH_OTSU, -THRESH_TRIANGLE]""" - -ADAPTIVE_THRESH_MEAN_C: int -ADAPTIVE_THRESH_GAUSSIAN_C: int -AdaptiveThresholdTypes = int -"""One of [ADAPTIVE_THRESH_MEAN_C, ADAPTIVE_THRESH_GAUSSIAN_C]""" - -GC_BGD: int -GC_FGD: int -GC_PR_BGD: int -GC_PR_FGD: int -GrabCutClasses = int -"""One of [GC_BGD, GC_FGD, GC_PR_BGD, GC_PR_FGD]""" - -GC_INIT_WITH_RECT: int -GC_INIT_WITH_MASK: int -GC_EVAL: int -GC_EVAL_FREEZE_MODEL: int -GrabCutModes = int -"""One of [GC_INIT_WITH_RECT, GC_INIT_WITH_MASK, GC_EVAL, GC_EVAL_FREEZE_MODEL]""" - -DIST_LABEL_CCOMP: int -DIST_LABEL_PIXEL: int -DistanceTransformLabelTypes = int -"""One of [DIST_LABEL_CCOMP, DIST_LABEL_PIXEL]""" - -FLOODFILL_FIXED_RANGE: int -FLOODFILL_MASK_ONLY: int -FloodFillFlags = int -"""One of [FLOODFILL_FIXED_RANGE, FLOODFILL_MASK_ONLY]""" - -CC_STAT_LEFT: int -CC_STAT_TOP: int -CC_STAT_WIDTH: int -CC_STAT_HEIGHT: int -CC_STAT_AREA: int -CC_STAT_MAX: int -ConnectedComponentsTypes = int -"""One of [CC_STAT_LEFT, CC_STAT_TOP, CC_STAT_WIDTH, CC_STAT_HEIGHT, CC_STAT_AREA, CC_STAT_MAX]""" - -CCL_DEFAULT: int -CCL_WU: int -CCL_GRANA: int -CCL_BOLELLI: int -CCL_SAUF: int -CCL_BBDT: int -CCL_SPAGHETTI: int -ConnectedComponentsAlgorithmsTypes = int -"""One of [CCL_DEFAULT, CCL_WU, CCL_GRANA, CCL_BOLELLI, CCL_SAUF, CCL_BBDT, CCL_SPAGHETTI]""" - -RETR_EXTERNAL: int -RETR_LIST: int -RETR_CCOMP: int -RETR_TREE: int -RETR_FLOODFILL: int -RetrievalModes = int -"""One of [RETR_EXTERNAL, RETR_LIST, RETR_CCOMP, RETR_TREE, RETR_FLOODFILL]""" - -CHAIN_APPROX_NONE: int -CHAIN_APPROX_SIMPLE: int -CHAIN_APPROX_TC89_L1: int -CHAIN_APPROX_TC89_KCOS: int -ContourApproximationModes = int -"""One of [CHAIN_APPROX_NONE, CHAIN_APPROX_SIMPLE, CHAIN_APPROX_TC89_L1, CHAIN_APPROX_TC89_KCOS]""" - -CONTOURS_MATCH_I1: int -CONTOURS_MATCH_I2: int -CONTOURS_MATCH_I3: int -ShapeMatchModes = int -"""One of [CONTOURS_MATCH_I1, CONTOURS_MATCH_I2, CONTOURS_MATCH_I3]""" - -HOUGH_STANDARD: int -HOUGH_PROBABILISTIC: int -HOUGH_MULTI_SCALE: int -HOUGH_GRADIENT: int -HOUGH_GRADIENT_ALT: int -HoughModes = int -"""One of [HOUGH_STANDARD, HOUGH_PROBABILISTIC, HOUGH_MULTI_SCALE, HOUGH_GRADIENT, HOUGH_GRADIENT_ALT]""" - -LSD_REFINE_NONE: int -LSD_REFINE_STD: int -LSD_REFINE_ADV: int -LineSegmentDetectorModes = int -"""One of [LSD_REFINE_NONE, LSD_REFINE_STD, LSD_REFINE_ADV]""" - -HISTCMP_CORREL: int -HISTCMP_CHISQR: int -HISTCMP_INTERSECT: int -HISTCMP_BHATTACHARYYA: int -HISTCMP_HELLINGER: int -HISTCMP_CHISQR_ALT: int -HISTCMP_KL_DIV: int -HistCompMethods = int -"""One of [HISTCMP_CORREL, HISTCMP_CHISQR, HISTCMP_INTERSECT, HISTCMP_BHATTACHARYYA, HISTCMP_HELLINGER, -HISTCMP_CHISQR_ALT, HISTCMP_KL_DIV]""" - -COLOR_BGR2BGRA: int -COLOR_RGB2RGBA: int -COLOR_BGRA2BGR: int -COLOR_RGBA2RGB: int -COLOR_BGR2RGBA: int -COLOR_RGB2BGRA: int -COLOR_RGBA2BGR: int -COLOR_BGRA2RGB: int -COLOR_BGR2RGB: int -COLOR_RGB2BGR: int -COLOR_BGRA2RGBA: int -COLOR_RGBA2BGRA: int -COLOR_BGR2GRAY: int -COLOR_RGB2GRAY: int -COLOR_GRAY2BGR: int -COLOR_GRAY2RGB: int -COLOR_GRAY2BGRA: int -COLOR_GRAY2RGBA: int -COLOR_BGRA2GRAY: int -COLOR_RGBA2GRAY: int -COLOR_BGR2BGR565: int -COLOR_RGB2BGR565: int -COLOR_BGR5652BGR: int -COLOR_BGR5652RGB: int -COLOR_BGRA2BGR565: int -COLOR_RGBA2BGR565: int -COLOR_BGR5652BGRA: int -COLOR_BGR5652RGBA: int -COLOR_GRAY2BGR565: int -COLOR_BGR5652GRAY: int -COLOR_BGR2BGR555: int -COLOR_RGB2BGR555: int -COLOR_BGR5552BGR: int -COLOR_BGR5552RGB: int -COLOR_BGRA2BGR555: int -COLOR_RGBA2BGR555: int -COLOR_BGR5552BGRA: int -COLOR_BGR5552RGBA: int -COLOR_GRAY2BGR555: int -COLOR_BGR5552GRAY: int -COLOR_BGR2XYZ: int -COLOR_RGB2XYZ: int -COLOR_XYZ2BGR: int -COLOR_XYZ2RGB: int -COLOR_BGR2YCrCb: int -COLOR_BGR2YCR_CB: int -COLOR_RGB2YCrCb: int -COLOR_RGB2YCR_CB: int -COLOR_YCrCb2BGR: int -COLOR_YCR_CB2BGR: int -COLOR_YCrCb2RGB: int -COLOR_YCR_CB2RGB: int -COLOR_BGR2HSV: int -COLOR_RGB2HSV: int -COLOR_BGR2Lab: int -COLOR_BGR2LAB: int -COLOR_RGB2Lab: int -COLOR_RGB2LAB: int -COLOR_BGR2Luv: int -COLOR_BGR2LUV: int -COLOR_RGB2Luv: int -COLOR_RGB2LUV: int -COLOR_BGR2HLS: int -COLOR_RGB2HLS: int -COLOR_HSV2BGR: int -COLOR_HSV2RGB: int -COLOR_Lab2BGR: int -COLOR_LAB2BGR: int -COLOR_Lab2RGB: int -COLOR_LAB2RGB: int -COLOR_Luv2BGR: int -COLOR_LUV2BGR: int -COLOR_Luv2RGB: int -COLOR_LUV2RGB: int -COLOR_HLS2BGR: int -COLOR_HLS2RGB: int -COLOR_BGR2HSV_FULL: int -COLOR_RGB2HSV_FULL: int -COLOR_BGR2HLS_FULL: int -COLOR_RGB2HLS_FULL: int -COLOR_HSV2BGR_FULL: int -COLOR_HSV2RGB_FULL: int -COLOR_HLS2BGR_FULL: int -COLOR_HLS2RGB_FULL: int -COLOR_LBGR2Lab: int -COLOR_LBGR2LAB: int -COLOR_LRGB2Lab: int -COLOR_LRGB2LAB: int -COLOR_LBGR2Luv: int -COLOR_LBGR2LUV: int -COLOR_LRGB2Luv: int -COLOR_LRGB2LUV: int -COLOR_Lab2LBGR: int -COLOR_LAB2LBGR: int -COLOR_Lab2LRGB: int -COLOR_LAB2LRGB: int -COLOR_Luv2LBGR: int -COLOR_LUV2LBGR: int -COLOR_Luv2LRGB: int -COLOR_LUV2LRGB: int -COLOR_BGR2YUV: int -COLOR_RGB2YUV: int -COLOR_YUV2BGR: int -COLOR_YUV2RGB: int -COLOR_YUV2RGB_NV12: int -COLOR_YUV2BGR_NV12: int -COLOR_YUV2RGB_NV21: int -COLOR_YUV2BGR_NV21: int -COLOR_YUV420sp2RGB: int -COLOR_YUV420SP2RGB: int -COLOR_YUV420sp2BGR: int -COLOR_YUV420SP2BGR: int -COLOR_YUV2RGBA_NV12: int -COLOR_YUV2BGRA_NV12: int -COLOR_YUV2RGBA_NV21: int -COLOR_YUV2BGRA_NV21: int -COLOR_YUV420sp2RGBA: int -COLOR_YUV420SP2RGBA: int -COLOR_YUV420sp2BGRA: int -COLOR_YUV420SP2BGRA: int -COLOR_YUV2RGB_YV12: int -COLOR_YUV2BGR_YV12: int -COLOR_YUV2RGB_IYUV: int -COLOR_YUV2BGR_IYUV: int -COLOR_YUV2RGB_I420: int -COLOR_YUV2BGR_I420: int -COLOR_YUV420p2RGB: int -COLOR_YUV420P2RGB: int -COLOR_YUV420p2BGR: int -COLOR_YUV420P2BGR: int -COLOR_YUV2RGBA_YV12: int -COLOR_YUV2BGRA_YV12: int -COLOR_YUV2RGBA_IYUV: int -COLOR_YUV2BGRA_IYUV: int -COLOR_YUV2RGBA_I420: int -COLOR_YUV2BGRA_I420: int -COLOR_YUV420p2RGBA: int -COLOR_YUV420P2RGBA: int -COLOR_YUV420p2BGRA: int -COLOR_YUV420P2BGRA: int -COLOR_YUV2GRAY_420: int -COLOR_YUV2GRAY_NV21: int -COLOR_YUV2GRAY_NV12: int -COLOR_YUV2GRAY_YV12: int -COLOR_YUV2GRAY_IYUV: int -COLOR_YUV2GRAY_I420: int -COLOR_YUV420sp2GRAY: int -COLOR_YUV420SP2GRAY: int -COLOR_YUV420p2GRAY: int -COLOR_YUV420P2GRAY: int -COLOR_YUV2RGB_UYVY: int -COLOR_YUV2BGR_UYVY: int -COLOR_YUV2RGB_Y422: int -COLOR_YUV2BGR_Y422: int -COLOR_YUV2RGB_UYNV: int -COLOR_YUV2BGR_UYNV: int -COLOR_YUV2RGBA_UYVY: int -COLOR_YUV2BGRA_UYVY: int -COLOR_YUV2RGBA_Y422: int -COLOR_YUV2BGRA_Y422: int -COLOR_YUV2RGBA_UYNV: int -COLOR_YUV2BGRA_UYNV: int -COLOR_YUV2RGB_YUY2: int -COLOR_YUV2BGR_YUY2: int -COLOR_YUV2RGB_YVYU: int -COLOR_YUV2BGR_YVYU: int -COLOR_YUV2RGB_YUYV: int -COLOR_YUV2BGR_YUYV: int -COLOR_YUV2RGB_YUNV: int -COLOR_YUV2BGR_YUNV: int -COLOR_YUV2RGBA_YUY2: int -COLOR_YUV2BGRA_YUY2: int -COLOR_YUV2RGBA_YVYU: int -COLOR_YUV2BGRA_YVYU: int -COLOR_YUV2RGBA_YUYV: int -COLOR_YUV2BGRA_YUYV: int -COLOR_YUV2RGBA_YUNV: int -COLOR_YUV2BGRA_YUNV: int -COLOR_YUV2GRAY_UYVY: int -COLOR_YUV2GRAY_YUY2: int -COLOR_YUV2GRAY_Y422: int -COLOR_YUV2GRAY_UYNV: int -COLOR_YUV2GRAY_YVYU: int -COLOR_YUV2GRAY_YUYV: int -COLOR_YUV2GRAY_YUNV: int -COLOR_RGBA2mRGBA: int -COLOR_RGBA2M_RGBA: int -COLOR_mRGBA2RGBA: int -COLOR_M_RGBA2RGBA: int -COLOR_RGB2YUV_I420: int -COLOR_BGR2YUV_I420: int -COLOR_RGB2YUV_IYUV: int -COLOR_BGR2YUV_IYUV: int -COLOR_RGBA2YUV_I420: int -COLOR_BGRA2YUV_I420: int -COLOR_RGBA2YUV_IYUV: int -COLOR_BGRA2YUV_IYUV: int -COLOR_RGB2YUV_YV12: int -COLOR_BGR2YUV_YV12: int -COLOR_RGBA2YUV_YV12: int -COLOR_BGRA2YUV_YV12: int -COLOR_BayerBG2BGR: int -COLOR_BAYER_BG2BGR: int -COLOR_BayerGB2BGR: int -COLOR_BAYER_GB2BGR: int -COLOR_BayerRG2BGR: int -COLOR_BAYER_RG2BGR: int -COLOR_BayerGR2BGR: int -COLOR_BAYER_GR2BGR: int -COLOR_BayerRGGB2BGR: int -COLOR_BAYER_RGGB2BGR: int -COLOR_BayerGRBG2BGR: int -COLOR_BAYER_GRBG2BGR: int -COLOR_BayerBGGR2BGR: int -COLOR_BAYER_BGGR2BGR: int -COLOR_BayerGBRG2BGR: int -COLOR_BAYER_GBRG2BGR: int -COLOR_BayerRGGB2RGB: int -COLOR_BAYER_RGGB2RGB: int -COLOR_BayerGRBG2RGB: int -COLOR_BAYER_GRBG2RGB: int -COLOR_BayerBGGR2RGB: int -COLOR_BAYER_BGGR2RGB: int -COLOR_BayerGBRG2RGB: int -COLOR_BAYER_GBRG2RGB: int -COLOR_BayerBG2RGB: int -COLOR_BAYER_BG2RGB: int -COLOR_BayerGB2RGB: int -COLOR_BAYER_GB2RGB: int -COLOR_BayerRG2RGB: int -COLOR_BAYER_RG2RGB: int -COLOR_BayerGR2RGB: int -COLOR_BAYER_GR2RGB: int -COLOR_BayerBG2GRAY: int -COLOR_BAYER_BG2GRAY: int -COLOR_BayerGB2GRAY: int -COLOR_BAYER_GB2GRAY: int -COLOR_BayerRG2GRAY: int -COLOR_BAYER_RG2GRAY: int -COLOR_BayerGR2GRAY: int -COLOR_BAYER_GR2GRAY: int -COLOR_BayerRGGB2GRAY: int -COLOR_BAYER_RGGB2GRAY: int -COLOR_BayerGRBG2GRAY: int -COLOR_BAYER_GRBG2GRAY: int -COLOR_BayerBGGR2GRAY: int -COLOR_BAYER_BGGR2GRAY: int -COLOR_BayerGBRG2GRAY: int -COLOR_BAYER_GBRG2GRAY: int -COLOR_BayerBG2BGR_VNG: int -COLOR_BAYER_BG2BGR_VNG: int -COLOR_BayerGB2BGR_VNG: int -COLOR_BAYER_GB2BGR_VNG: int -COLOR_BayerRG2BGR_VNG: int -COLOR_BAYER_RG2BGR_VNG: int -COLOR_BayerGR2BGR_VNG: int -COLOR_BAYER_GR2BGR_VNG: int -COLOR_BayerRGGB2BGR_VNG: int -COLOR_BAYER_RGGB2BGR_VNG: int -COLOR_BayerGRBG2BGR_VNG: int -COLOR_BAYER_GRBG2BGR_VNG: int -COLOR_BayerBGGR2BGR_VNG: int -COLOR_BAYER_BGGR2BGR_VNG: int -COLOR_BayerGBRG2BGR_VNG: int -COLOR_BAYER_GBRG2BGR_VNG: int -COLOR_BayerRGGB2RGB_VNG: int -COLOR_BAYER_RGGB2RGB_VNG: int -COLOR_BayerGRBG2RGB_VNG: int -COLOR_BAYER_GRBG2RGB_VNG: int -COLOR_BayerBGGR2RGB_VNG: int -COLOR_BAYER_BGGR2RGB_VNG: int -COLOR_BayerGBRG2RGB_VNG: int -COLOR_BAYER_GBRG2RGB_VNG: int -COLOR_BayerBG2RGB_VNG: int -COLOR_BAYER_BG2RGB_VNG: int -COLOR_BayerGB2RGB_VNG: int -COLOR_BAYER_GB2RGB_VNG: int -COLOR_BayerRG2RGB_VNG: int -COLOR_BAYER_RG2RGB_VNG: int -COLOR_BayerGR2RGB_VNG: int -COLOR_BAYER_GR2RGB_VNG: int -COLOR_BayerBG2BGR_EA: int -COLOR_BAYER_BG2BGR_EA: int -COLOR_BayerGB2BGR_EA: int -COLOR_BAYER_GB2BGR_EA: int -COLOR_BayerRG2BGR_EA: int -COLOR_BAYER_RG2BGR_EA: int -COLOR_BayerGR2BGR_EA: int -COLOR_BAYER_GR2BGR_EA: int -COLOR_BayerRGGB2BGR_EA: int -COLOR_BAYER_RGGB2BGR_EA: int -COLOR_BayerGRBG2BGR_EA: int -COLOR_BAYER_GRBG2BGR_EA: int -COLOR_BayerBGGR2BGR_EA: int -COLOR_BAYER_BGGR2BGR_EA: int -COLOR_BayerGBRG2BGR_EA: int -COLOR_BAYER_GBRG2BGR_EA: int -COLOR_BayerRGGB2RGB_EA: int -COLOR_BAYER_RGGB2RGB_EA: int -COLOR_BayerGRBG2RGB_EA: int -COLOR_BAYER_GRBG2RGB_EA: int -COLOR_BayerBGGR2RGB_EA: int -COLOR_BAYER_BGGR2RGB_EA: int -COLOR_BayerGBRG2RGB_EA: int -COLOR_BAYER_GBRG2RGB_EA: int -COLOR_BayerBG2RGB_EA: int -COLOR_BAYER_BG2RGB_EA: int -COLOR_BayerGB2RGB_EA: int -COLOR_BAYER_GB2RGB_EA: int -COLOR_BayerRG2RGB_EA: int -COLOR_BAYER_RG2RGB_EA: int -COLOR_BayerGR2RGB_EA: int -COLOR_BAYER_GR2RGB_EA: int -COLOR_BayerBG2BGRA: int -COLOR_BAYER_BG2BGRA: int -COLOR_BayerGB2BGRA: int -COLOR_BAYER_GB2BGRA: int -COLOR_BayerRG2BGRA: int -COLOR_BAYER_RG2BGRA: int -COLOR_BayerGR2BGRA: int -COLOR_BAYER_GR2BGRA: int -COLOR_BayerRGGB2BGRA: int -COLOR_BAYER_RGGB2BGRA: int -COLOR_BayerGRBG2BGRA: int -COLOR_BAYER_GRBG2BGRA: int -COLOR_BayerBGGR2BGRA: int -COLOR_BAYER_BGGR2BGRA: int -COLOR_BayerGBRG2BGRA: int -COLOR_BAYER_GBRG2BGRA: int -COLOR_BayerRGGB2RGBA: int -COLOR_BAYER_RGGB2RGBA: int -COLOR_BayerGRBG2RGBA: int -COLOR_BAYER_GRBG2RGBA: int -COLOR_BayerBGGR2RGBA: int -COLOR_BAYER_BGGR2RGBA: int -COLOR_BayerGBRG2RGBA: int -COLOR_BAYER_GBRG2RGBA: int -COLOR_BayerBG2RGBA: int -COLOR_BAYER_BG2RGBA: int -COLOR_BayerGB2RGBA: int -COLOR_BAYER_GB2RGBA: int -COLOR_BayerRG2RGBA: int -COLOR_BAYER_RG2RGBA: int -COLOR_BayerGR2RGBA: int -COLOR_BAYER_GR2RGBA: int -COLOR_COLORCVT_MAX: int -ColorConversionCodes = int -"""One of [COLOR_BGR2BGRA, COLOR_RGB2RGBA, COLOR_BGRA2BGR, COLOR_RGBA2RGB, COLOR_BGR2RGBA, COLOR_RGB2BGRA, -COLOR_RGBA2BGR, COLOR_BGRA2RGB, COLOR_BGR2RGB, COLOR_RGB2BGR, COLOR_BGRA2RGBA, COLOR_RGBA2BGRA, COLOR_BGR2GRAY, -COLOR_RGB2GRAY, COLOR_GRAY2BGR, COLOR_GRAY2RGB, COLOR_GRAY2BGRA, COLOR_GRAY2RGBA, COLOR_BGRA2GRAY, COLOR_RGBA2GRAY, -COLOR_BGR2BGR565, COLOR_RGB2BGR565, COLOR_BGR5652BGR, COLOR_BGR5652RGB, COLOR_BGRA2BGR565, COLOR_RGBA2BGR565, -COLOR_BGR5652BGRA, COLOR_BGR5652RGBA, COLOR_GRAY2BGR565, COLOR_BGR5652GRAY, COLOR_BGR2BGR555, COLOR_RGB2BGR555, -COLOR_BGR5552BGR, COLOR_BGR5552RGB, COLOR_BGRA2BGR555, COLOR_RGBA2BGR555, COLOR_BGR5552BGRA, COLOR_BGR5552RGBA, -COLOR_GRAY2BGR555, COLOR_BGR5552GRAY, COLOR_BGR2XYZ, COLOR_RGB2XYZ, COLOR_XYZ2BGR, COLOR_XYZ2RGB, COLOR_BGR2YCrCb, -COLOR_BGR2YCR_CB, COLOR_RGB2YCrCb, COLOR_RGB2YCR_CB, COLOR_YCrCb2BGR, COLOR_YCR_CB2BGR, COLOR_YCrCb2RGB, -COLOR_YCR_CB2RGB, COLOR_BGR2HSV, COLOR_RGB2HSV, COLOR_BGR2Lab, COLOR_BGR2LAB, COLOR_RGB2Lab, COLOR_RGB2LAB, -COLOR_BGR2Luv, COLOR_BGR2LUV, COLOR_RGB2Luv, COLOR_RGB2LUV, COLOR_BGR2HLS, COLOR_RGB2HLS, COLOR_HSV2BGR, COLOR_HSV2RGB, -COLOR_Lab2BGR, COLOR_LAB2BGR, COLOR_Lab2RGB, COLOR_LAB2RGB, COLOR_Luv2BGR, COLOR_LUV2BGR, COLOR_Luv2RGB, COLOR_LUV2RGB, -COLOR_HLS2BGR, COLOR_HLS2RGB, COLOR_BGR2HSV_FULL, COLOR_RGB2HSV_FULL, COLOR_BGR2HLS_FULL, COLOR_RGB2HLS_FULL, -COLOR_HSV2BGR_FULL, COLOR_HSV2RGB_FULL, COLOR_HLS2BGR_FULL, COLOR_HLS2RGB_FULL, COLOR_LBGR2Lab, COLOR_LBGR2LAB, -COLOR_LRGB2Lab, COLOR_LRGB2LAB, COLOR_LBGR2Luv, COLOR_LBGR2LUV, COLOR_LRGB2Luv, COLOR_LRGB2LUV, COLOR_Lab2LBGR, -COLOR_LAB2LBGR, COLOR_Lab2LRGB, COLOR_LAB2LRGB, COLOR_Luv2LBGR, COLOR_LUV2LBGR, COLOR_Luv2LRGB, COLOR_LUV2LRGB, -COLOR_BGR2YUV, COLOR_RGB2YUV, COLOR_YUV2BGR, COLOR_YUV2RGB, COLOR_YUV2RGB_NV12, COLOR_YUV2BGR_NV12, COLOR_YUV2RGB_NV21, -COLOR_YUV2BGR_NV21, COLOR_YUV420sp2RGB, COLOR_YUV420SP2RGB, COLOR_YUV420sp2BGR, COLOR_YUV420SP2BGR, COLOR_YUV2RGBA_NV12, -COLOR_YUV2BGRA_NV12, COLOR_YUV2RGBA_NV21, COLOR_YUV2BGRA_NV21, COLOR_YUV420sp2RGBA, COLOR_YUV420SP2RGBA, -COLOR_YUV420sp2BGRA, COLOR_YUV420SP2BGRA, COLOR_YUV2RGB_YV12, COLOR_YUV2BGR_YV12, COLOR_YUV2RGB_IYUV, -COLOR_YUV2BGR_IYUV, COLOR_YUV2RGB_I420, COLOR_YUV2BGR_I420, COLOR_YUV420p2RGB, COLOR_YUV420P2RGB, COLOR_YUV420p2BGR, -COLOR_YUV420P2BGR, COLOR_YUV2RGBA_YV12, COLOR_YUV2BGRA_YV12, COLOR_YUV2RGBA_IYUV, COLOR_YUV2BGRA_IYUV, -COLOR_YUV2RGBA_I420, COLOR_YUV2BGRA_I420, COLOR_YUV420p2RGBA, COLOR_YUV420P2RGBA, COLOR_YUV420p2BGRA, -COLOR_YUV420P2BGRA, COLOR_YUV2GRAY_420, COLOR_YUV2GRAY_NV21, COLOR_YUV2GRAY_NV12, COLOR_YUV2GRAY_YV12, -COLOR_YUV2GRAY_IYUV, COLOR_YUV2GRAY_I420, COLOR_YUV420sp2GRAY, COLOR_YUV420SP2GRAY, COLOR_YUV420p2GRAY, -COLOR_YUV420P2GRAY, COLOR_YUV2RGB_UYVY, COLOR_YUV2BGR_UYVY, COLOR_YUV2RGB_Y422, COLOR_YUV2BGR_Y422, COLOR_YUV2RGB_UYNV, -COLOR_YUV2BGR_UYNV, COLOR_YUV2RGBA_UYVY, COLOR_YUV2BGRA_UYVY, COLOR_YUV2RGBA_Y422, COLOR_YUV2BGRA_Y422, -COLOR_YUV2RGBA_UYNV, COLOR_YUV2BGRA_UYNV, COLOR_YUV2RGB_YUY2, COLOR_YUV2BGR_YUY2, COLOR_YUV2RGB_YVYU, -COLOR_YUV2BGR_YVYU, COLOR_YUV2RGB_YUYV, COLOR_YUV2BGR_YUYV, COLOR_YUV2RGB_YUNV, COLOR_YUV2BGR_YUNV, COLOR_YUV2RGBA_YUY2, -COLOR_YUV2BGRA_YUY2, COLOR_YUV2RGBA_YVYU, COLOR_YUV2BGRA_YVYU, COLOR_YUV2RGBA_YUYV, COLOR_YUV2BGRA_YUYV, -COLOR_YUV2RGBA_YUNV, COLOR_YUV2BGRA_YUNV, COLOR_YUV2GRAY_UYVY, COLOR_YUV2GRAY_YUY2, COLOR_YUV2GRAY_Y422, -COLOR_YUV2GRAY_UYNV, COLOR_YUV2GRAY_YVYU, COLOR_YUV2GRAY_YUYV, COLOR_YUV2GRAY_YUNV, COLOR_RGBA2mRGBA, COLOR_RGBA2M_RGBA, -COLOR_mRGBA2RGBA, COLOR_M_RGBA2RGBA, COLOR_RGB2YUV_I420, COLOR_BGR2YUV_I420, COLOR_RGB2YUV_IYUV, COLOR_BGR2YUV_IYUV, -COLOR_RGBA2YUV_I420, COLOR_BGRA2YUV_I420, COLOR_RGBA2YUV_IYUV, COLOR_BGRA2YUV_IYUV, COLOR_RGB2YUV_YV12, -COLOR_BGR2YUV_YV12, COLOR_RGBA2YUV_YV12, COLOR_BGRA2YUV_YV12, COLOR_BayerBG2BGR, COLOR_BAYER_BG2BGR, COLOR_BayerGB2BGR, -COLOR_BAYER_GB2BGR, COLOR_BayerRG2BGR, COLOR_BAYER_RG2BGR, COLOR_BayerGR2BGR, COLOR_BAYER_GR2BGR, COLOR_BayerRGGB2BGR, -COLOR_BAYER_RGGB2BGR, COLOR_BayerGRBG2BGR, COLOR_BAYER_GRBG2BGR, COLOR_BayerBGGR2BGR, COLOR_BAYER_BGGR2BGR, -COLOR_BayerGBRG2BGR, COLOR_BAYER_GBRG2BGR, COLOR_BayerRGGB2RGB, COLOR_BAYER_RGGB2RGB, COLOR_BayerGRBG2RGB, -COLOR_BAYER_GRBG2RGB, COLOR_BayerBGGR2RGB, COLOR_BAYER_BGGR2RGB, COLOR_BayerGBRG2RGB, COLOR_BAYER_GBRG2RGB, -COLOR_BayerBG2RGB, COLOR_BAYER_BG2RGB, COLOR_BayerGB2RGB, COLOR_BAYER_GB2RGB, COLOR_BayerRG2RGB, COLOR_BAYER_RG2RGB, -COLOR_BayerGR2RGB, COLOR_BAYER_GR2RGB, COLOR_BayerBG2GRAY, COLOR_BAYER_BG2GRAY, COLOR_BayerGB2GRAY, COLOR_BAYER_GB2GRAY, -COLOR_BayerRG2GRAY, COLOR_BAYER_RG2GRAY, COLOR_BayerGR2GRAY, COLOR_BAYER_GR2GRAY, COLOR_BayerRGGB2GRAY, -COLOR_BAYER_RGGB2GRAY, COLOR_BayerGRBG2GRAY, COLOR_BAYER_GRBG2GRAY, COLOR_BayerBGGR2GRAY, COLOR_BAYER_BGGR2GRAY, -COLOR_BayerGBRG2GRAY, COLOR_BAYER_GBRG2GRAY, COLOR_BayerBG2BGR_VNG, COLOR_BAYER_BG2BGR_VNG, COLOR_BayerGB2BGR_VNG, -COLOR_BAYER_GB2BGR_VNG, COLOR_BayerRG2BGR_VNG, COLOR_BAYER_RG2BGR_VNG, COLOR_BayerGR2BGR_VNG, COLOR_BAYER_GR2BGR_VNG, -COLOR_BayerRGGB2BGR_VNG, COLOR_BAYER_RGGB2BGR_VNG, COLOR_BayerGRBG2BGR_VNG, COLOR_BAYER_GRBG2BGR_VNG, -COLOR_BayerBGGR2BGR_VNG, COLOR_BAYER_BGGR2BGR_VNG, COLOR_BayerGBRG2BGR_VNG, COLOR_BAYER_GBRG2BGR_VNG, -COLOR_BayerRGGB2RGB_VNG, COLOR_BAYER_RGGB2RGB_VNG, COLOR_BayerGRBG2RGB_VNG, COLOR_BAYER_GRBG2RGB_VNG, -COLOR_BayerBGGR2RGB_VNG, COLOR_BAYER_BGGR2RGB_VNG, COLOR_BayerGBRG2RGB_VNG, COLOR_BAYER_GBRG2RGB_VNG, -COLOR_BayerBG2RGB_VNG, COLOR_BAYER_BG2RGB_VNG, COLOR_BayerGB2RGB_VNG, COLOR_BAYER_GB2RGB_VNG, COLOR_BayerRG2RGB_VNG, -COLOR_BAYER_RG2RGB_VNG, COLOR_BayerGR2RGB_VNG, COLOR_BAYER_GR2RGB_VNG, COLOR_BayerBG2BGR_EA, COLOR_BAYER_BG2BGR_EA, -COLOR_BayerGB2BGR_EA, COLOR_BAYER_GB2BGR_EA, COLOR_BayerRG2BGR_EA, COLOR_BAYER_RG2BGR_EA, COLOR_BayerGR2BGR_EA, -COLOR_BAYER_GR2BGR_EA, COLOR_BayerRGGB2BGR_EA, COLOR_BAYER_RGGB2BGR_EA, COLOR_BayerGRBG2BGR_EA, COLOR_BAYER_GRBG2BGR_EA, -COLOR_BayerBGGR2BGR_EA, COLOR_BAYER_BGGR2BGR_EA, COLOR_BayerGBRG2BGR_EA, COLOR_BAYER_GBRG2BGR_EA, -COLOR_BayerRGGB2RGB_EA, COLOR_BAYER_RGGB2RGB_EA, COLOR_BayerGRBG2RGB_EA, COLOR_BAYER_GRBG2RGB_EA, -COLOR_BayerBGGR2RGB_EA, COLOR_BAYER_BGGR2RGB_EA, COLOR_BayerGBRG2RGB_EA, COLOR_BAYER_GBRG2RGB_EA, COLOR_BayerBG2RGB_EA, -COLOR_BAYER_BG2RGB_EA, COLOR_BayerGB2RGB_EA, COLOR_BAYER_GB2RGB_EA, COLOR_BayerRG2RGB_EA, COLOR_BAYER_RG2RGB_EA, -COLOR_BayerGR2RGB_EA, COLOR_BAYER_GR2RGB_EA, COLOR_BayerBG2BGRA, COLOR_BAYER_BG2BGRA, COLOR_BayerGB2BGRA, -COLOR_BAYER_GB2BGRA, COLOR_BayerRG2BGRA, COLOR_BAYER_RG2BGRA, COLOR_BayerGR2BGRA, COLOR_BAYER_GR2BGRA, -COLOR_BayerRGGB2BGRA, COLOR_BAYER_RGGB2BGRA, COLOR_BayerGRBG2BGRA, COLOR_BAYER_GRBG2BGRA, COLOR_BayerBGGR2BGRA, -COLOR_BAYER_BGGR2BGRA, COLOR_BayerGBRG2BGRA, COLOR_BAYER_GBRG2BGRA, COLOR_BayerRGGB2RGBA, COLOR_BAYER_RGGB2RGBA, -COLOR_BayerGRBG2RGBA, COLOR_BAYER_GRBG2RGBA, COLOR_BayerBGGR2RGBA, COLOR_BAYER_BGGR2RGBA, COLOR_BayerGBRG2RGBA, -COLOR_BAYER_GBRG2RGBA, COLOR_BayerBG2RGBA, COLOR_BAYER_BG2RGBA, COLOR_BayerGB2RGBA, COLOR_BAYER_GB2RGBA, -COLOR_BayerRG2RGBA, COLOR_BAYER_RG2RGBA, COLOR_BayerGR2RGBA, COLOR_BAYER_GR2RGBA, COLOR_COLORCVT_MAX]""" - -INTERSECT_NONE: int -INTERSECT_PARTIAL: int -INTERSECT_FULL: int -RectanglesIntersectTypes = int -"""One of [INTERSECT_NONE, INTERSECT_PARTIAL, INTERSECT_FULL]""" - -FILLED: int -LINE_4: int -LINE_8: int -LINE_AA: int -LineTypes = int -"""One of [FILLED, LINE_4, LINE_8, LINE_AA]""" - -FONT_HERSHEY_SIMPLEX: int -FONT_HERSHEY_PLAIN: int -FONT_HERSHEY_DUPLEX: int -FONT_HERSHEY_COMPLEX: int -FONT_HERSHEY_TRIPLEX: int -FONT_HERSHEY_COMPLEX_SMALL: int -FONT_HERSHEY_SCRIPT_SIMPLEX: int -FONT_HERSHEY_SCRIPT_COMPLEX: int -FONT_ITALIC: int -HersheyFonts = int -"""One of [FONT_HERSHEY_SIMPLEX, FONT_HERSHEY_PLAIN, FONT_HERSHEY_DUPLEX, FONT_HERSHEY_COMPLEX, FONT_HERSHEY_TRIPLEX, -FONT_HERSHEY_COMPLEX_SMALL, FONT_HERSHEY_SCRIPT_SIMPLEX, FONT_HERSHEY_SCRIPT_COMPLEX, FONT_ITALIC]""" - -MARKER_CROSS: int -MARKER_TILTED_CROSS: int -MARKER_STAR: int -MARKER_DIAMOND: int -MARKER_SQUARE: int -MARKER_TRIANGLE_UP: int -MARKER_TRIANGLE_DOWN: int -MarkerTypes = int -"""One of [MARKER_CROSS, MARKER_TILTED_CROSS, MARKER_STAR, MARKER_DIAMOND, MARKER_SQUARE, MARKER_TRIANGLE_UP, -MARKER_TRIANGLE_DOWN]""" - -TM_SQDIFF: int -TM_SQDIFF_NORMED: int -TM_CCORR: int -TM_CCORR_NORMED: int -TM_CCOEFF: int -TM_CCOEFF_NORMED: int -TemplateMatchModes = int -"""One of [TM_SQDIFF, TM_SQDIFF_NORMED, TM_CCORR, TM_CCORR_NORMED, TM_CCOEFF, TM_CCOEFF_NORMED]""" - -COLORMAP_AUTUMN: int -COLORMAP_BONE: int -COLORMAP_JET: int -COLORMAP_WINTER: int -COLORMAP_RAINBOW: int -COLORMAP_OCEAN: int -COLORMAP_SUMMER: int -COLORMAP_SPRING: int -COLORMAP_COOL: int -COLORMAP_HSV: int -COLORMAP_PINK: int -COLORMAP_HOT: int -COLORMAP_PARULA: int -COLORMAP_MAGMA: int -COLORMAP_INFERNO: int -COLORMAP_PLASMA: int -COLORMAP_VIRIDIS: int -COLORMAP_CIVIDIS: int -COLORMAP_TWILIGHT: int -COLORMAP_TWILIGHT_SHIFTED: int -COLORMAP_TURBO: int -COLORMAP_DEEPGREEN: int -ColormapTypes = int -"""One of [COLORMAP_AUTUMN, COLORMAP_BONE, COLORMAP_JET, COLORMAP_WINTER, COLORMAP_RAINBOW, COLORMAP_OCEAN, -COLORMAP_SUMMER, COLORMAP_SPRING, COLORMAP_COOL, COLORMAP_HSV, COLORMAP_PINK, COLORMAP_HOT, COLORMAP_PARULA, -COLORMAP_MAGMA, COLORMAP_INFERNO, COLORMAP_PLASMA, COLORMAP_VIRIDIS, COLORMAP_CIVIDIS, COLORMAP_TWILIGHT, -COLORMAP_TWILIGHT_SHIFTED, COLORMAP_TURBO, COLORMAP_DEEPGREEN]""" - -INPAINT_NS: int -INPAINT_TELEA: int -LDR_SIZE: int -NORMAL_CLONE: int -MIXED_CLONE: int -MONOCHROME_TRANSFER: int -RECURS_FILTER: int -NORMCONV_FILTER: int -CAP_PROP_DC1394_OFF: int -CAP_PROP_DC1394_MODE_MANUAL: int -CAP_PROP_DC1394_MODE_AUTO: int -CAP_PROP_DC1394_MODE_ONE_PUSH_AUTO: int -CAP_PROP_DC1394_MAX: int -CAP_OPENNI_DEPTH_GENERATOR: int -CAP_OPENNI_IMAGE_GENERATOR: int -CAP_OPENNI_IR_GENERATOR: int -CAP_OPENNI_GENERATORS_MASK: int -CAP_PROP_OPENNI_OUTPUT_MODE: int -CAP_PROP_OPENNI_FRAME_MAX_DEPTH: int -CAP_PROP_OPENNI_BASELINE: int -CAP_PROP_OPENNI_FOCAL_LENGTH: int -CAP_PROP_OPENNI_REGISTRATION: int -CAP_PROP_OPENNI_REGISTRATION_ON: int -CAP_PROP_OPENNI_APPROX_FRAME_SYNC: int -CAP_PROP_OPENNI_MAX_BUFFER_SIZE: int -CAP_PROP_OPENNI_CIRCLE_BUFFER: int -CAP_PROP_OPENNI_MAX_TIME_DURATION: int -CAP_PROP_OPENNI_GENERATOR_PRESENT: int -CAP_PROP_OPENNI2_SYNC: int -CAP_PROP_OPENNI2_MIRROR: int -CAP_OPENNI_IMAGE_GENERATOR_PRESENT: int -CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE: int -CAP_OPENNI_DEPTH_GENERATOR_PRESENT: int -CAP_OPENNI_DEPTH_GENERATOR_BASELINE: int -CAP_OPENNI_DEPTH_GENERATOR_FOCAL_LENGTH: int -CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION: int -CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION_ON: int -CAP_OPENNI_IR_GENERATOR_PRESENT: int -CAP_OPENNI_DEPTH_MAP: int -CAP_OPENNI_POINT_CLOUD_MAP: int -CAP_OPENNI_DISPARITY_MAP: int -CAP_OPENNI_DISPARITY_MAP_32F: int -CAP_OPENNI_VALID_DEPTH_MASK: int -CAP_OPENNI_BGR_IMAGE: int -CAP_OPENNI_GRAY_IMAGE: int -CAP_OPENNI_IR_IMAGE: int -CAP_OPENNI_VGA_30HZ: int -CAP_OPENNI_SXGA_15HZ: int -CAP_OPENNI_SXGA_30HZ: int -CAP_OPENNI_QVGA_30HZ: int -CAP_OPENNI_QVGA_60HZ: int -CAP_PROP_GSTREAMER_QUEUE_LENGTH: int -CAP_PROP_PVAPI_MULTICASTIP: int -CAP_PROP_PVAPI_FRAMESTARTTRIGGERMODE: int -CAP_PROP_PVAPI_DECIMATIONHORIZONTAL: int -CAP_PROP_PVAPI_DECIMATIONVERTICAL: int -CAP_PROP_PVAPI_BINNINGX: int -CAP_PROP_PVAPI_BINNINGY: int -CAP_PROP_PVAPI_PIXELFORMAT: int -CAP_PVAPI_FSTRIGMODE_FREERUN: int -CAP_PVAPI_FSTRIGMODE_SYNCIN1: int -CAP_PVAPI_FSTRIGMODE_SYNCIN2: int -CAP_PVAPI_FSTRIGMODE_FIXEDRATE: int -CAP_PVAPI_FSTRIGMODE_SOFTWARE: int -CAP_PVAPI_DECIMATION_OFF: int -CAP_PVAPI_DECIMATION_2OUTOF4: int -CAP_PVAPI_DECIMATION_2OUTOF8: int -CAP_PVAPI_DECIMATION_2OUTOF16: int -CAP_PVAPI_PIXELFORMAT_MONO8: int -CAP_PVAPI_PIXELFORMAT_MONO16: int -CAP_PVAPI_PIXELFORMAT_BAYER8: int -CAP_PVAPI_PIXELFORMAT_BAYER16: int -CAP_PVAPI_PIXELFORMAT_RGB24: int -CAP_PVAPI_PIXELFORMAT_BGR24: int -CAP_PVAPI_PIXELFORMAT_RGBA32: int -CAP_PVAPI_PIXELFORMAT_BGRA32: int -CAP_PROP_XI_DOWNSAMPLING: int -CAP_PROP_XI_DATA_FORMAT: int -CAP_PROP_XI_OFFSET_X: int -CAP_PROP_XI_OFFSET_Y: int -CAP_PROP_XI_TRG_SOURCE: int -CAP_PROP_XI_TRG_SOFTWARE: int -CAP_PROP_XI_GPI_SELECTOR: int -CAP_PROP_XI_GPI_MODE: int -CAP_PROP_XI_GPI_LEVEL: int -CAP_PROP_XI_GPO_SELECTOR: int -CAP_PROP_XI_GPO_MODE: int -CAP_PROP_XI_LED_SELECTOR: int -CAP_PROP_XI_LED_MODE: int -CAP_PROP_XI_MANUAL_WB: int -CAP_PROP_XI_AUTO_WB: int -CAP_PROP_XI_AEAG: int -CAP_PROP_XI_EXP_PRIORITY: int -CAP_PROP_XI_AE_MAX_LIMIT: int -CAP_PROP_XI_AG_MAX_LIMIT: int -CAP_PROP_XI_AEAG_LEVEL: int -CAP_PROP_XI_TIMEOUT: int -CAP_PROP_XI_EXPOSURE: int -CAP_PROP_XI_EXPOSURE_BURST_COUNT: int -CAP_PROP_XI_GAIN_SELECTOR: int -CAP_PROP_XI_GAIN: int -CAP_PROP_XI_DOWNSAMPLING_TYPE: int -CAP_PROP_XI_BINNING_SELECTOR: int -CAP_PROP_XI_BINNING_VERTICAL: int -CAP_PROP_XI_BINNING_HORIZONTAL: int -CAP_PROP_XI_BINNING_PATTERN: int -CAP_PROP_XI_DECIMATION_SELECTOR: int -CAP_PROP_XI_DECIMATION_VERTICAL: int -CAP_PROP_XI_DECIMATION_HORIZONTAL: int -CAP_PROP_XI_DECIMATION_PATTERN: int -CAP_PROP_XI_TEST_PATTERN_GENERATOR_SELECTOR: int -CAP_PROP_XI_TEST_PATTERN: int -CAP_PROP_XI_IMAGE_DATA_FORMAT: int -CAP_PROP_XI_SHUTTER_TYPE: int -CAP_PROP_XI_SENSOR_TAPS: int -CAP_PROP_XI_AEAG_ROI_OFFSET_X: int -CAP_PROP_XI_AEAG_ROI_OFFSET_Y: int -CAP_PROP_XI_AEAG_ROI_WIDTH: int -CAP_PROP_XI_AEAG_ROI_HEIGHT: int -CAP_PROP_XI_BPC: int -CAP_PROP_XI_WB_KR: int -CAP_PROP_XI_WB_KG: int -CAP_PROP_XI_WB_KB: int -CAP_PROP_XI_WIDTH: int -CAP_PROP_XI_HEIGHT: int -CAP_PROP_XI_REGION_SELECTOR: int -CAP_PROP_XI_REGION_MODE: int -CAP_PROP_XI_LIMIT_BANDWIDTH: int -CAP_PROP_XI_SENSOR_DATA_BIT_DEPTH: int -CAP_PROP_XI_OUTPUT_DATA_BIT_DEPTH: int -CAP_PROP_XI_IMAGE_DATA_BIT_DEPTH: int -CAP_PROP_XI_OUTPUT_DATA_PACKING: int -CAP_PROP_XI_OUTPUT_DATA_PACKING_TYPE: int -CAP_PROP_XI_IS_COOLED: int -CAP_PROP_XI_COOLING: int -CAP_PROP_XI_TARGET_TEMP: int -CAP_PROP_XI_CHIP_TEMP: int -CAP_PROP_XI_HOUS_TEMP: int -CAP_PROP_XI_HOUS_BACK_SIDE_TEMP: int -CAP_PROP_XI_SENSOR_BOARD_TEMP: int -CAP_PROP_XI_CMS: int -CAP_PROP_XI_APPLY_CMS: int -CAP_PROP_XI_IMAGE_IS_COLOR: int -CAP_PROP_XI_COLOR_FILTER_ARRAY: int -CAP_PROP_XI_GAMMAY: int -CAP_PROP_XI_GAMMAC: int -CAP_PROP_XI_SHARPNESS: int -CAP_PROP_XI_CC_MATRIX_00: int -CAP_PROP_XI_CC_MATRIX_01: int -CAP_PROP_XI_CC_MATRIX_02: int -CAP_PROP_XI_CC_MATRIX_03: int -CAP_PROP_XI_CC_MATRIX_10: int -CAP_PROP_XI_CC_MATRIX_11: int -CAP_PROP_XI_CC_MATRIX_12: int -CAP_PROP_XI_CC_MATRIX_13: int -CAP_PROP_XI_CC_MATRIX_20: int -CAP_PROP_XI_CC_MATRIX_21: int -CAP_PROP_XI_CC_MATRIX_22: int -CAP_PROP_XI_CC_MATRIX_23: int -CAP_PROP_XI_CC_MATRIX_30: int -CAP_PROP_XI_CC_MATRIX_31: int -CAP_PROP_XI_CC_MATRIX_32: int -CAP_PROP_XI_CC_MATRIX_33: int -CAP_PROP_XI_DEFAULT_CC_MATRIX: int -CAP_PROP_XI_TRG_SELECTOR: int -CAP_PROP_XI_ACQ_FRAME_BURST_COUNT: int -CAP_PROP_XI_DEBOUNCE_EN: int -CAP_PROP_XI_DEBOUNCE_T0: int -CAP_PROP_XI_DEBOUNCE_T1: int -CAP_PROP_XI_DEBOUNCE_POL: int -CAP_PROP_XI_LENS_MODE: int -CAP_PROP_XI_LENS_APERTURE_VALUE: int -CAP_PROP_XI_LENS_FOCUS_MOVEMENT_VALUE: int -CAP_PROP_XI_LENS_FOCUS_MOVE: int -CAP_PROP_XI_LENS_FOCUS_DISTANCE: int -CAP_PROP_XI_LENS_FOCAL_LENGTH: int -CAP_PROP_XI_LENS_FEATURE_SELECTOR: int -CAP_PROP_XI_LENS_FEATURE: int -CAP_PROP_XI_DEVICE_MODEL_ID: int -CAP_PROP_XI_DEVICE_SN: int -CAP_PROP_XI_IMAGE_DATA_FORMAT_RGB32_ALPHA: int -CAP_PROP_XI_IMAGE_PAYLOAD_SIZE: int -CAP_PROP_XI_TRANSPORT_PIXEL_FORMAT: int -CAP_PROP_XI_SENSOR_CLOCK_FREQ_HZ: int -CAP_PROP_XI_SENSOR_CLOCK_FREQ_INDEX: int -CAP_PROP_XI_SENSOR_OUTPUT_CHANNEL_COUNT: int -CAP_PROP_XI_FRAMERATE: int -CAP_PROP_XI_COUNTER_SELECTOR: int -CAP_PROP_XI_COUNTER_VALUE: int -CAP_PROP_XI_ACQ_TIMING_MODE: int -CAP_PROP_XI_AVAILABLE_BANDWIDTH: int -CAP_PROP_XI_BUFFER_POLICY: int -CAP_PROP_XI_LUT_EN: int -CAP_PROP_XI_LUT_INDEX: int -CAP_PROP_XI_LUT_VALUE: int -CAP_PROP_XI_TRG_DELAY: int -CAP_PROP_XI_TS_RST_MODE: int -CAP_PROP_XI_TS_RST_SOURCE: int -CAP_PROP_XI_IS_DEVICE_EXIST: int -CAP_PROP_XI_ACQ_BUFFER_SIZE: int -CAP_PROP_XI_ACQ_BUFFER_SIZE_UNIT: int -CAP_PROP_XI_ACQ_TRANSPORT_BUFFER_SIZE: int -CAP_PROP_XI_BUFFERS_QUEUE_SIZE: int -CAP_PROP_XI_ACQ_TRANSPORT_BUFFER_COMMIT: int -CAP_PROP_XI_RECENT_FRAME: int -CAP_PROP_XI_DEVICE_RESET: int -CAP_PROP_XI_COLUMN_FPN_CORRECTION: int -CAP_PROP_XI_ROW_FPN_CORRECTION: int -CAP_PROP_XI_SENSOR_MODE: int -CAP_PROP_XI_HDR: int -CAP_PROP_XI_HDR_KNEEPOINT_COUNT: int -CAP_PROP_XI_HDR_T1: int -CAP_PROP_XI_HDR_T2: int -CAP_PROP_XI_KNEEPOINT1: int -CAP_PROP_XI_KNEEPOINT2: int -CAP_PROP_XI_IMAGE_BLACK_LEVEL: int -CAP_PROP_XI_HW_REVISION: int -CAP_PROP_XI_DEBUG_LEVEL: int -CAP_PROP_XI_AUTO_BANDWIDTH_CALCULATION: int -CAP_PROP_XI_FFS_FILE_ID: int -CAP_PROP_XI_FFS_FILE_SIZE: int -CAP_PROP_XI_FREE_FFS_SIZE: int -CAP_PROP_XI_USED_FFS_SIZE: int -CAP_PROP_XI_FFS_ACCESS_KEY: int -CAP_PROP_XI_SENSOR_FEATURE_SELECTOR: int -CAP_PROP_XI_SENSOR_FEATURE_VALUE: int -CAP_PROP_ARAVIS_AUTOTRIGGER: int -CAP_PROP_IOS_DEVICE_FOCUS: int -CAP_PROP_IOS_DEVICE_EXPOSURE: int -CAP_PROP_IOS_DEVICE_FLASH: int -CAP_PROP_IOS_DEVICE_WHITEBALANCE: int -CAP_PROP_IOS_DEVICE_TORCH: int -CAP_PROP_GIGA_FRAME_OFFSET_X: int -CAP_PROP_GIGA_FRAME_OFFSET_Y: int -CAP_PROP_GIGA_FRAME_WIDTH_MAX: int -CAP_PROP_GIGA_FRAME_HEIGH_MAX: int -CAP_PROP_GIGA_FRAME_SENS_WIDTH: int -CAP_PROP_GIGA_FRAME_SENS_HEIGH: int -CAP_PROP_INTELPERC_PROFILE_COUNT: int -CAP_PROP_INTELPERC_PROFILE_IDX: int -CAP_PROP_INTELPERC_DEPTH_LOW_CONFIDENCE_VALUE: int -CAP_PROP_INTELPERC_DEPTH_SATURATION_VALUE: int -CAP_PROP_INTELPERC_DEPTH_CONFIDENCE_THRESHOLD: int -CAP_PROP_INTELPERC_DEPTH_FOCAL_LENGTH_HORZ: int -CAP_PROP_INTELPERC_DEPTH_FOCAL_LENGTH_VERT: int -CAP_INTELPERC_DEPTH_GENERATOR: int -CAP_INTELPERC_IMAGE_GENERATOR: int -CAP_INTELPERC_IR_GENERATOR: int -CAP_INTELPERC_GENERATORS_MASK: int -CAP_INTELPERC_DEPTH_MAP: int -CAP_INTELPERC_UVDEPTH_MAP: int -CAP_INTELPERC_IR_MAP: int -CAP_INTELPERC_IMAGE: int -CAP_PROP_GPHOTO2_PREVIEW: int -CAP_PROP_GPHOTO2_WIDGET_ENUMERATE: int -CAP_PROP_GPHOTO2_RELOAD_CONFIG: int -CAP_PROP_GPHOTO2_RELOAD_ON_CHANGE: int -CAP_PROP_GPHOTO2_COLLECT_MSGS: int -CAP_PROP_GPHOTO2_FLUSH_MSGS: int -CAP_PROP_SPEED: int -CAP_PROP_APERTURE: int -CAP_PROP_EXPOSUREPROGRAM: int -CAP_PROP_VIEWFINDER: int -CAP_PROP_IMAGES_BASE: int -CAP_PROP_IMAGES_LAST: int -LMEDS: int -RANSAC: int -RHO: int -USAC_DEFAULT: int -USAC_PARALLEL: int -USAC_FM_8PTS: int -USAC_FAST: int -USAC_ACCURATE: int -USAC_PROSAC: int -USAC_MAGSAC: int -CALIB_CB_ADAPTIVE_THRESH: int -CALIB_CB_NORMALIZE_IMAGE: int -CALIB_CB_FILTER_QUADS: int -CALIB_CB_FAST_CHECK: int -CALIB_CB_EXHAUSTIVE: int -CALIB_CB_ACCURACY: int -CALIB_CB_LARGER: int -CALIB_CB_MARKER: int -CALIB_CB_SYMMETRIC_GRID: int -CALIB_CB_ASYMMETRIC_GRID: int -CALIB_CB_CLUSTERING: int -CALIB_NINTRINSIC: int -CALIB_USE_INTRINSIC_GUESS: int -CALIB_FIX_ASPECT_RATIO: int -CALIB_FIX_PRINCIPAL_POINT: int -CALIB_ZERO_TANGENT_DIST: int -CALIB_FIX_FOCAL_LENGTH: int -CALIB_FIX_K1: int -CALIB_FIX_K2: int -CALIB_FIX_K3: int -CALIB_FIX_K4: int -CALIB_FIX_K5: int -CALIB_FIX_K6: int -CALIB_RATIONAL_MODEL: int -CALIB_THIN_PRISM_MODEL: int -CALIB_FIX_S1_S2_S3_S4: int -CALIB_TILTED_MODEL: int -CALIB_FIX_TAUX_TAUY: int -CALIB_USE_QR: int -CALIB_FIX_TANGENT_DIST: int -CALIB_FIX_INTRINSIC: int -CALIB_SAME_FOCAL_LENGTH: int -CALIB_ZERO_DISPARITY: int -CALIB_USE_LU: int -CALIB_USE_EXTRINSIC_GUESS: int -FM_7POINT: int -FM_8POINT: int -FM_LMEDS: int -FM_RANSAC: int -CASCADE_DO_CANNY_PRUNING: int -CASCADE_SCALE_IMAGE: int -CASCADE_FIND_BIGGEST_OBJECT: int -CASCADE_DO_ROUGH_SEARCH: int -OPTFLOW_USE_INITIAL_FLOW: int -OPTFLOW_LK_GET_MIN_EIGENVALS: int -OPTFLOW_FARNEBACK_GAUSSIAN: int -MOTION_TRANSLATION: int -MOTION_EUCLIDEAN: int -MOTION_AFFINE: int -MOTION_HOMOGRAPHY: int - -DrawMatchesFlags_DEFAULT: int -DRAW_MATCHES_FLAGS_DEFAULT: int -DrawMatchesFlags_DRAW_OVER_OUTIMG: int -DRAW_MATCHES_FLAGS_DRAW_OVER_OUTIMG: int -DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS: int -DRAW_MATCHES_FLAGS_NOT_DRAW_SINGLE_POINTS: int -DrawMatchesFlags_DRAW_RICH_KEYPOINTS: int -DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS: int -DrawMatchesFlags = int -"""One of [DrawMatchesFlags_DEFAULT, DRAW_MATCHES_FLAGS_DEFAULT, DrawMatchesFlags_DRAW_OVER_OUTIMG, -DRAW_MATCHES_FLAGS_DRAW_OVER_OUTIMG, DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS, DRAW_MATCHES_FLAGS_NOT_DRAW_SINGLE_POINTS, -DrawMatchesFlags_DRAW_RICH_KEYPOINTS, DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS]""" - -IMREAD_UNCHANGED: int -IMREAD_GRAYSCALE: int -IMREAD_COLOR: int -IMREAD_ANYDEPTH: int -IMREAD_ANYCOLOR: int -IMREAD_LOAD_GDAL: int -IMREAD_REDUCED_GRAYSCALE_2: int -IMREAD_REDUCED_COLOR_2: int -IMREAD_REDUCED_GRAYSCALE_4: int -IMREAD_REDUCED_COLOR_4: int -IMREAD_REDUCED_GRAYSCALE_8: int -IMREAD_REDUCED_COLOR_8: int -IMREAD_IGNORE_ORIENTATION: int -ImreadModes = int -"""One of [IMREAD_UNCHANGED, IMREAD_GRAYSCALE, IMREAD_COLOR, IMREAD_ANYDEPTH, IMREAD_ANYCOLOR, IMREAD_LOAD_GDAL, -IMREAD_REDUCED_GRAYSCALE_2, IMREAD_REDUCED_COLOR_2, IMREAD_REDUCED_GRAYSCALE_4, IMREAD_REDUCED_COLOR_4, -IMREAD_REDUCED_GRAYSCALE_8, IMREAD_REDUCED_COLOR_8, IMREAD_IGNORE_ORIENTATION]""" - -IMWRITE_JPEG_QUALITY: int -IMWRITE_JPEG_PROGRESSIVE: int -IMWRITE_JPEG_OPTIMIZE: int -IMWRITE_JPEG_RST_INTERVAL: int -IMWRITE_JPEG_LUMA_QUALITY: int -IMWRITE_JPEG_CHROMA_QUALITY: int -IMWRITE_JPEG_SAMPLING_FACTOR: int -IMWRITE_PNG_COMPRESSION: int -IMWRITE_PNG_STRATEGY: int -IMWRITE_PNG_BILEVEL: int -IMWRITE_PXM_BINARY: int -IMWRITE_EXR_TYPE: int -IMWRITE_EXR_COMPRESSION: int -IMWRITE_EXR_DWA_COMPRESSION_LEVEL: int -IMWRITE_WEBP_QUALITY: int -IMWRITE_HDR_COMPRESSION: int -IMWRITE_PAM_TUPLETYPE: int -IMWRITE_TIFF_RESUNIT: int -IMWRITE_TIFF_XDPI: int -IMWRITE_TIFF_YDPI: int -IMWRITE_TIFF_COMPRESSION: int -IMWRITE_JPEG2000_COMPRESSION_X1000: int -IMWRITE_AVIF_QUALITY: int -IMWRITE_AVIF_DEPTH: int -IMWRITE_AVIF_SPEED: int -ImwriteFlags = int -"""One of [IMWRITE_JPEG_QUALITY, IMWRITE_JPEG_PROGRESSIVE, IMWRITE_JPEG_OPTIMIZE, IMWRITE_JPEG_RST_INTERVAL, -IMWRITE_JPEG_LUMA_QUALITY, IMWRITE_JPEG_CHROMA_QUALITY, IMWRITE_JPEG_SAMPLING_FACTOR, IMWRITE_PNG_COMPRESSION, -IMWRITE_PNG_STRATEGY, IMWRITE_PNG_BILEVEL, IMWRITE_PXM_BINARY, IMWRITE_EXR_TYPE, IMWRITE_EXR_COMPRESSION, -IMWRITE_EXR_DWA_COMPRESSION_LEVEL, IMWRITE_WEBP_QUALITY, IMWRITE_HDR_COMPRESSION, IMWRITE_PAM_TUPLETYPE, -IMWRITE_TIFF_RESUNIT, IMWRITE_TIFF_XDPI, IMWRITE_TIFF_YDPI, IMWRITE_TIFF_COMPRESSION, -IMWRITE_JPEG2000_COMPRESSION_X1000, IMWRITE_AVIF_QUALITY, IMWRITE_AVIF_DEPTH, IMWRITE_AVIF_SPEED]""" - -IMWRITE_JPEG_SAMPLING_FACTOR_411: int -IMWRITE_JPEG_SAMPLING_FACTOR_420: int -IMWRITE_JPEG_SAMPLING_FACTOR_422: int -IMWRITE_JPEG_SAMPLING_FACTOR_440: int -IMWRITE_JPEG_SAMPLING_FACTOR_444: int -ImwriteJPEGSamplingFactorParams = int -"""One of [IMWRITE_JPEG_SAMPLING_FACTOR_411, IMWRITE_JPEG_SAMPLING_FACTOR_420, IMWRITE_JPEG_SAMPLING_FACTOR_422, -IMWRITE_JPEG_SAMPLING_FACTOR_440, IMWRITE_JPEG_SAMPLING_FACTOR_444]""" - -IMWRITE_EXR_TYPE_HALF: int -IMWRITE_EXR_TYPE_FLOAT: int -ImwriteEXRTypeFlags = int -"""One of [IMWRITE_EXR_TYPE_HALF, IMWRITE_EXR_TYPE_FLOAT]""" - -IMWRITE_EXR_COMPRESSION_NO: int -IMWRITE_EXR_COMPRESSION_RLE: int -IMWRITE_EXR_COMPRESSION_ZIPS: int -IMWRITE_EXR_COMPRESSION_ZIP: int -IMWRITE_EXR_COMPRESSION_PIZ: int -IMWRITE_EXR_COMPRESSION_PXR24: int -IMWRITE_EXR_COMPRESSION_B44: int -IMWRITE_EXR_COMPRESSION_B44A: int -IMWRITE_EXR_COMPRESSION_DWAA: int -IMWRITE_EXR_COMPRESSION_DWAB: int -ImwriteEXRCompressionFlags = int -"""One of [IMWRITE_EXR_COMPRESSION_NO, IMWRITE_EXR_COMPRESSION_RLE, IMWRITE_EXR_COMPRESSION_ZIPS, -IMWRITE_EXR_COMPRESSION_ZIP, IMWRITE_EXR_COMPRESSION_PIZ, IMWRITE_EXR_COMPRESSION_PXR24, IMWRITE_EXR_COMPRESSION_B44, -IMWRITE_EXR_COMPRESSION_B44A, IMWRITE_EXR_COMPRESSION_DWAA, IMWRITE_EXR_COMPRESSION_DWAB]""" - -IMWRITE_PNG_STRATEGY_DEFAULT: int -IMWRITE_PNG_STRATEGY_FILTERED: int -IMWRITE_PNG_STRATEGY_HUFFMAN_ONLY: int -IMWRITE_PNG_STRATEGY_RLE: int -IMWRITE_PNG_STRATEGY_FIXED: int -ImwritePNGFlags = int -"""One of [IMWRITE_PNG_STRATEGY_DEFAULT, IMWRITE_PNG_STRATEGY_FILTERED, IMWRITE_PNG_STRATEGY_HUFFMAN_ONLY, -IMWRITE_PNG_STRATEGY_RLE, IMWRITE_PNG_STRATEGY_FIXED]""" - -IMWRITE_PAM_FORMAT_NULL: int -IMWRITE_PAM_FORMAT_BLACKANDWHITE: int -IMWRITE_PAM_FORMAT_GRAYSCALE: int -IMWRITE_PAM_FORMAT_GRAYSCALE_ALPHA: int -IMWRITE_PAM_FORMAT_RGB: int -IMWRITE_PAM_FORMAT_RGB_ALPHA: int -ImwritePAMFlags = int -"""One of [IMWRITE_PAM_FORMAT_NULL, IMWRITE_PAM_FORMAT_BLACKANDWHITE, IMWRITE_PAM_FORMAT_GRAYSCALE, -IMWRITE_PAM_FORMAT_GRAYSCALE_ALPHA, IMWRITE_PAM_FORMAT_RGB, IMWRITE_PAM_FORMAT_RGB_ALPHA]""" - -IMWRITE_HDR_COMPRESSION_NONE: int -IMWRITE_HDR_COMPRESSION_RLE: int -ImwriteHDRCompressionFlags = int -"""One of [IMWRITE_HDR_COMPRESSION_NONE, IMWRITE_HDR_COMPRESSION_RLE]""" - -CAP_ANY: int -CAP_VFW: int -CAP_V4L: int -CAP_V4L2: int -CAP_FIREWIRE: int -CAP_FIREWARE: int -CAP_IEEE1394: int -CAP_DC1394: int -CAP_CMU1394: int -CAP_QT: int -CAP_UNICAP: int -CAP_DSHOW: int -CAP_PVAPI: int -CAP_OPENNI: int -CAP_OPENNI_ASUS: int -CAP_ANDROID: int -CAP_XIAPI: int -CAP_AVFOUNDATION: int -CAP_GIGANETIX: int -CAP_MSMF: int -CAP_WINRT: int -CAP_INTELPERC: int -CAP_REALSENSE: int -CAP_OPENNI2: int -CAP_OPENNI2_ASUS: int -CAP_OPENNI2_ASTRA: int -CAP_GPHOTO2: int -CAP_GSTREAMER: int -CAP_FFMPEG: int -CAP_IMAGES: int -CAP_ARAVIS: int -CAP_OPENCV_MJPEG: int -CAP_INTEL_MFX: int -CAP_XINE: int -CAP_UEYE: int -CAP_OBSENSOR: int -VideoCaptureAPIs = int -"""One of [CAP_ANY, CAP_VFW, CAP_V4L, CAP_V4L2, CAP_FIREWIRE, CAP_FIREWARE, CAP_IEEE1394, CAP_DC1394, CAP_CMU1394, -CAP_QT, CAP_UNICAP, CAP_DSHOW, CAP_PVAPI, CAP_OPENNI, CAP_OPENNI_ASUS, CAP_ANDROID, CAP_XIAPI, CAP_AVFOUNDATION, -CAP_GIGANETIX, CAP_MSMF, CAP_WINRT, CAP_INTELPERC, CAP_REALSENSE, CAP_OPENNI2, CAP_OPENNI2_ASUS, CAP_OPENNI2_ASTRA, -CAP_GPHOTO2, CAP_GSTREAMER, CAP_FFMPEG, CAP_IMAGES, CAP_ARAVIS, CAP_OPENCV_MJPEG, CAP_INTEL_MFX, CAP_XINE, CAP_UEYE, -CAP_OBSENSOR]""" - -CAP_PROP_POS_MSEC: int -CAP_PROP_POS_FRAMES: int -CAP_PROP_POS_AVI_RATIO: int -CAP_PROP_FRAME_WIDTH: int -CAP_PROP_FRAME_HEIGHT: int -CAP_PROP_FPS: int -CAP_PROP_FOURCC: int -CAP_PROP_FRAME_COUNT: int -CAP_PROP_FORMAT: int -CAP_PROP_MODE: int -CAP_PROP_BRIGHTNESS: int -CAP_PROP_CONTRAST: int -CAP_PROP_SATURATION: int -CAP_PROP_HUE: int -CAP_PROP_GAIN: int -CAP_PROP_EXPOSURE: int -CAP_PROP_CONVERT_RGB: int -CAP_PROP_WHITE_BALANCE_BLUE_U: int -CAP_PROP_RECTIFICATION: int -CAP_PROP_MONOCHROME: int -CAP_PROP_SHARPNESS: int -CAP_PROP_AUTO_EXPOSURE: int -CAP_PROP_GAMMA: int -CAP_PROP_TEMPERATURE: int -CAP_PROP_TRIGGER: int -CAP_PROP_TRIGGER_DELAY: int -CAP_PROP_WHITE_BALANCE_RED_V: int -CAP_PROP_ZOOM: int -CAP_PROP_FOCUS: int -CAP_PROP_GUID: int -CAP_PROP_ISO_SPEED: int -CAP_PROP_BACKLIGHT: int -CAP_PROP_PAN: int -CAP_PROP_TILT: int -CAP_PROP_ROLL: int -CAP_PROP_IRIS: int -CAP_PROP_SETTINGS: int -CAP_PROP_BUFFERSIZE: int -CAP_PROP_AUTOFOCUS: int -CAP_PROP_SAR_NUM: int -CAP_PROP_SAR_DEN: int -CAP_PROP_BACKEND: int -CAP_PROP_CHANNEL: int -CAP_PROP_AUTO_WB: int -CAP_PROP_WB_TEMPERATURE: int -CAP_PROP_CODEC_PIXEL_FORMAT: int -CAP_PROP_BITRATE: int -CAP_PROP_ORIENTATION_META: int -CAP_PROP_ORIENTATION_AUTO: int -CAP_PROP_HW_ACCELERATION: int -CAP_PROP_HW_DEVICE: int -CAP_PROP_HW_ACCELERATION_USE_OPENCL: int -CAP_PROP_OPEN_TIMEOUT_MSEC: int -CAP_PROP_READ_TIMEOUT_MSEC: int -CAP_PROP_STREAM_OPEN_TIME_USEC: int -CAP_PROP_VIDEO_TOTAL_CHANNELS: int -CAP_PROP_VIDEO_STREAM: int -CAP_PROP_AUDIO_STREAM: int -CAP_PROP_AUDIO_POS: int -CAP_PROP_AUDIO_SHIFT_NSEC: int -CAP_PROP_AUDIO_DATA_DEPTH: int -CAP_PROP_AUDIO_SAMPLES_PER_SECOND: int -CAP_PROP_AUDIO_BASE_INDEX: int -CAP_PROP_AUDIO_TOTAL_CHANNELS: int -CAP_PROP_AUDIO_TOTAL_STREAMS: int -CAP_PROP_AUDIO_SYNCHRONIZE: int -CAP_PROP_LRF_HAS_KEY_FRAME: int -CAP_PROP_CODEC_EXTRADATA_INDEX: int -CAP_PROP_FRAME_TYPE: int -CAP_PROP_N_THREADS: int -VideoCaptureProperties = int -"""One of [CAP_PROP_POS_MSEC, CAP_PROP_POS_FRAMES, CAP_PROP_POS_AVI_RATIO, CAP_PROP_FRAME_WIDTH, CAP_PROP_FRAME_HEIGHT, -CAP_PROP_FPS, CAP_PROP_FOURCC, CAP_PROP_FRAME_COUNT, CAP_PROP_FORMAT, CAP_PROP_MODE, CAP_PROP_BRIGHTNESS, -CAP_PROP_CONTRAST, CAP_PROP_SATURATION, CAP_PROP_HUE, CAP_PROP_GAIN, CAP_PROP_EXPOSURE, CAP_PROP_CONVERT_RGB, -CAP_PROP_WHITE_BALANCE_BLUE_U, CAP_PROP_RECTIFICATION, CAP_PROP_MONOCHROME, CAP_PROP_SHARPNESS, CAP_PROP_AUTO_EXPOSURE, -CAP_PROP_GAMMA, CAP_PROP_TEMPERATURE, CAP_PROP_TRIGGER, CAP_PROP_TRIGGER_DELAY, CAP_PROP_WHITE_BALANCE_RED_V, -CAP_PROP_ZOOM, CAP_PROP_FOCUS, CAP_PROP_GUID, CAP_PROP_ISO_SPEED, CAP_PROP_BACKLIGHT, CAP_PROP_PAN, CAP_PROP_TILT, -CAP_PROP_ROLL, CAP_PROP_IRIS, CAP_PROP_SETTINGS, CAP_PROP_BUFFERSIZE, CAP_PROP_AUTOFOCUS, CAP_PROP_SAR_NUM, -CAP_PROP_SAR_DEN, CAP_PROP_BACKEND, CAP_PROP_CHANNEL, CAP_PROP_AUTO_WB, CAP_PROP_WB_TEMPERATURE, -CAP_PROP_CODEC_PIXEL_FORMAT, CAP_PROP_BITRATE, CAP_PROP_ORIENTATION_META, CAP_PROP_ORIENTATION_AUTO, -CAP_PROP_HW_ACCELERATION, CAP_PROP_HW_DEVICE, CAP_PROP_HW_ACCELERATION_USE_OPENCL, CAP_PROP_OPEN_TIMEOUT_MSEC, -CAP_PROP_READ_TIMEOUT_MSEC, CAP_PROP_STREAM_OPEN_TIME_USEC, CAP_PROP_VIDEO_TOTAL_CHANNELS, CAP_PROP_VIDEO_STREAM, -CAP_PROP_AUDIO_STREAM, CAP_PROP_AUDIO_POS, CAP_PROP_AUDIO_SHIFT_NSEC, CAP_PROP_AUDIO_DATA_DEPTH, -CAP_PROP_AUDIO_SAMPLES_PER_SECOND, CAP_PROP_AUDIO_BASE_INDEX, CAP_PROP_AUDIO_TOTAL_CHANNELS, -CAP_PROP_AUDIO_TOTAL_STREAMS, CAP_PROP_AUDIO_SYNCHRONIZE, CAP_PROP_LRF_HAS_KEY_FRAME, CAP_PROP_CODEC_EXTRADATA_INDEX, -CAP_PROP_FRAME_TYPE, CAP_PROP_N_THREADS]""" - -VIDEOWRITER_PROP_QUALITY: int -VIDEOWRITER_PROP_FRAMEBYTES: int -VIDEOWRITER_PROP_NSTRIPES: int -VIDEOWRITER_PROP_IS_COLOR: int -VIDEOWRITER_PROP_DEPTH: int -VIDEOWRITER_PROP_HW_ACCELERATION: int -VIDEOWRITER_PROP_HW_DEVICE: int -VIDEOWRITER_PROP_HW_ACCELERATION_USE_OPENCL: int -VideoWriterProperties = int -"""One of [VIDEOWRITER_PROP_QUALITY, VIDEOWRITER_PROP_FRAMEBYTES, VIDEOWRITER_PROP_NSTRIPES, VIDEOWRITER_PROP_IS_COLOR, -VIDEOWRITER_PROP_DEPTH, VIDEOWRITER_PROP_HW_ACCELERATION, VIDEOWRITER_PROP_HW_DEVICE, -VIDEOWRITER_PROP_HW_ACCELERATION_USE_OPENCL]""" - -VIDEO_ACCELERATION_NONE: int -VIDEO_ACCELERATION_ANY: int -VIDEO_ACCELERATION_D3D11: int -VIDEO_ACCELERATION_VAAPI: int -VIDEO_ACCELERATION_MFX: int -VideoAccelerationType = int -"""One of [VIDEO_ACCELERATION_NONE, VIDEO_ACCELERATION_ANY, VIDEO_ACCELERATION_D3D11, VIDEO_ACCELERATION_VAAPI, -VIDEO_ACCELERATION_MFX]""" - -CAP_OBSENSOR_DEPTH_MAP: int -CAP_OBSENSOR_BGR_IMAGE: int -CAP_OBSENSOR_IR_IMAGE: int -VideoCaptureOBSensorDataType = int -"""One of [CAP_OBSENSOR_DEPTH_MAP, CAP_OBSENSOR_BGR_IMAGE, CAP_OBSENSOR_IR_IMAGE]""" - -CAP_OBSENSOR_DEPTH_GENERATOR: int -CAP_OBSENSOR_IMAGE_GENERATOR: int -CAP_OBSENSOR_IR_GENERATOR: int -CAP_OBSENSOR_GENERATORS_MASK: int -VideoCaptureOBSensorGenerators = int -"""One of [CAP_OBSENSOR_DEPTH_GENERATOR, CAP_OBSENSOR_IMAGE_GENERATOR, CAP_OBSENSOR_IR_GENERATOR, -CAP_OBSENSOR_GENERATORS_MASK]""" - -CAP_PROP_OBSENSOR_INTRINSIC_FX: int -CAP_PROP_OBSENSOR_INTRINSIC_FY: int -CAP_PROP_OBSENSOR_INTRINSIC_CX: int -CAP_PROP_OBSENSOR_INTRINSIC_CY: int -VideoCaptureOBSensorProperties = int -"""One of [CAP_PROP_OBSENSOR_INTRINSIC_FX, CAP_PROP_OBSENSOR_INTRINSIC_FY, CAP_PROP_OBSENSOR_INTRINSIC_CX, -CAP_PROP_OBSENSOR_INTRINSIC_CY]""" - -SOLVEPNP_ITERATIVE: int -SOLVEPNP_EPNP: int -SOLVEPNP_P3P: int -SOLVEPNP_DLS: int -SOLVEPNP_UPNP: int -SOLVEPNP_AP3P: int -SOLVEPNP_IPPE: int -SOLVEPNP_IPPE_SQUARE: int -SOLVEPNP_SQPNP: int -SOLVEPNP_MAX_COUNT: int -SolvePnPMethod = int -"""One of [SOLVEPNP_ITERATIVE, SOLVEPNP_EPNP, SOLVEPNP_P3P, SOLVEPNP_DLS, SOLVEPNP_UPNP, SOLVEPNP_AP3P, SOLVEPNP_IPPE, -SOLVEPNP_IPPE_SQUARE, SOLVEPNP_SQPNP, SOLVEPNP_MAX_COUNT]""" - -CALIB_HAND_EYE_TSAI: int -CALIB_HAND_EYE_PARK: int -CALIB_HAND_EYE_HORAUD: int -CALIB_HAND_EYE_ANDREFF: int -CALIB_HAND_EYE_DANIILIDIS: int -HandEyeCalibrationMethod = int -"""One of [CALIB_HAND_EYE_TSAI, CALIB_HAND_EYE_PARK, CALIB_HAND_EYE_HORAUD, CALIB_HAND_EYE_ANDREFF, -CALIB_HAND_EYE_DANIILIDIS]""" - -CALIB_ROBOT_WORLD_HAND_EYE_SHAH: int -CALIB_ROBOT_WORLD_HAND_EYE_LI: int -RobotWorldHandEyeCalibrationMethod = int -"""One of [CALIB_ROBOT_WORLD_HAND_EYE_SHAH, CALIB_ROBOT_WORLD_HAND_EYE_LI]""" - -SAMPLING_UNIFORM: int -SAMPLING_PROGRESSIVE_NAPSAC: int -SAMPLING_NAPSAC: int -SAMPLING_PROSAC: int -SamplingMethod = int -"""One of [SAMPLING_UNIFORM, SAMPLING_PROGRESSIVE_NAPSAC, SAMPLING_NAPSAC, SAMPLING_PROSAC]""" - -LOCAL_OPTIM_NULL: int -LOCAL_OPTIM_INNER_LO: int -LOCAL_OPTIM_INNER_AND_ITER_LO: int -LOCAL_OPTIM_GC: int -LOCAL_OPTIM_SIGMA: int -LocalOptimMethod = int -"""One of [LOCAL_OPTIM_NULL, LOCAL_OPTIM_INNER_LO, LOCAL_OPTIM_INNER_AND_ITER_LO, LOCAL_OPTIM_GC, LOCAL_OPTIM_SIGMA]""" - -SCORE_METHOD_RANSAC: int -SCORE_METHOD_MSAC: int -SCORE_METHOD_MAGSAC: int -SCORE_METHOD_LMEDS: int -ScoreMethod = int -"""One of [SCORE_METHOD_RANSAC, SCORE_METHOD_MSAC, SCORE_METHOD_MAGSAC, SCORE_METHOD_LMEDS]""" - -NEIGH_FLANN_KNN: int -NEIGH_GRID: int -NEIGH_FLANN_RADIUS: int -NeighborSearchMethod = int -"""One of [NEIGH_FLANN_KNN, NEIGH_GRID, NEIGH_FLANN_RADIUS]""" - -NONE_POLISHER: int -LSQ_POLISHER: int -MAGSAC: int -COV_POLISHER: int -PolishingMethod = int -"""One of [NONE_POLISHER, LSQ_POLISHER, MAGSAC, COV_POLISHER]""" - -PROJ_SPHERICAL_ORTHO: int -PROJ_SPHERICAL_EQRECT: int -UndistortTypes = int -"""One of [PROJ_SPHERICAL_ORTHO, PROJ_SPHERICAL_EQRECT]""" - -WINDOW_NORMAL: int -WINDOW_AUTOSIZE: int -WINDOW_OPENGL: int -WINDOW_FULLSCREEN: int -WINDOW_FREERATIO: int -WINDOW_KEEPRATIO: int -WINDOW_GUI_EXPANDED: int -WINDOW_GUI_NORMAL: int -WindowFlags = int -"""One of [WINDOW_NORMAL, WINDOW_AUTOSIZE, WINDOW_OPENGL, WINDOW_FULLSCREEN, WINDOW_FREERATIO, WINDOW_KEEPRATIO, -WINDOW_GUI_EXPANDED, WINDOW_GUI_NORMAL]""" - -WND_PROP_FULLSCREEN: int -WND_PROP_AUTOSIZE: int -WND_PROP_ASPECT_RATIO: int -WND_PROP_OPENGL: int -WND_PROP_VISIBLE: int -WND_PROP_TOPMOST: int -WND_PROP_VSYNC: int -WindowPropertyFlags = int -"""One of [WND_PROP_FULLSCREEN, WND_PROP_AUTOSIZE, WND_PROP_ASPECT_RATIO, WND_PROP_OPENGL, WND_PROP_VISIBLE, -WND_PROP_TOPMOST, WND_PROP_VSYNC]""" - -EVENT_MOUSEMOVE: int -EVENT_LBUTTONDOWN: int -EVENT_RBUTTONDOWN: int -EVENT_MBUTTONDOWN: int -EVENT_LBUTTONUP: int -EVENT_RBUTTONUP: int -EVENT_MBUTTONUP: int -EVENT_LBUTTONDBLCLK: int -EVENT_RBUTTONDBLCLK: int -EVENT_MBUTTONDBLCLK: int -EVENT_MOUSEWHEEL: int -EVENT_MOUSEHWHEEL: int -MouseEventTypes = int -"""One of [EVENT_MOUSEMOVE, EVENT_LBUTTONDOWN, EVENT_RBUTTONDOWN, EVENT_MBUTTONDOWN, EVENT_LBUTTONUP, EVENT_RBUTTONUP, - EVENT_MBUTTONUP, EVENT_LBUTTONDBLCLK, EVENT_RBUTTONDBLCLK, EVENT_MBUTTONDBLCLK, EVENT_MOUSEWHEEL, EVENT_MOUSEHWHEEL]""" - -EVENT_FLAG_LBUTTON: int -EVENT_FLAG_RBUTTON: int -EVENT_FLAG_MBUTTON: int -EVENT_FLAG_CTRLKEY: int -EVENT_FLAG_SHIFTKEY: int -EVENT_FLAG_ALTKEY: int -MouseEventFlags = int -"""One of [EVENT_FLAG_LBUTTON, EVENT_FLAG_RBUTTON, EVENT_FLAG_MBUTTON, EVENT_FLAG_CTRLKEY, EVENT_FLAG_SHIFTKEY, -EVENT_FLAG_ALTKEY]""" - -QT_FONT_LIGHT: int -QT_FONT_NORMAL: int -QT_FONT_DEMIBOLD: int -QT_FONT_BOLD: int -QT_FONT_BLACK: int -QtFontWeights = int -"""One of [QT_FONT_LIGHT, QT_FONT_NORMAL, QT_FONT_DEMIBOLD, QT_FONT_BOLD, QT_FONT_BLACK]""" - -QT_STYLE_NORMAL: int -QT_STYLE_ITALIC: int -QT_STYLE_OBLIQUE: int -QtFontStyles = int -"""One of [QT_STYLE_NORMAL, QT_STYLE_ITALIC, QT_STYLE_OBLIQUE]""" - -QT_PUSH_BUTTON: int -QT_CHECKBOX: int -QT_RADIOBOX: int -QT_NEW_BUTTONBAR: int -QtButtonTypes = int -"""One of [QT_PUSH_BUTTON, QT_CHECKBOX, QT_RADIOBOX, QT_NEW_BUTTONBAR]""" - -GShape_GMAT: int -GSHAPE_GMAT: int -GShape_GSCALAR: int -GSHAPE_GSCALAR: int -GShape_GARRAY: int -GSHAPE_GARRAY: int -GShape_GOPAQUE: int -GSHAPE_GOPAQUE: int -GShape_GFRAME: int -GSHAPE_GFRAME: int -GShape = int -"""One of [GShape_GMAT, GSHAPE_GMAT, GShape_GSCALAR, GSHAPE_GSCALAR, GShape_GARRAY, GSHAPE_GARRAY, GShape_GOPAQUE, -GSHAPE_GOPAQUE, GShape_GFRAME, GSHAPE_GFRAME]""" - -MediaFormat_BGR: int -MEDIA_FORMAT_BGR: int -MediaFormat_NV12: int -MEDIA_FORMAT_NV12: int -MediaFormat_GRAY: int -MEDIA_FORMAT_GRAY: int -MediaFormat = int -"""One of [MediaFormat_BGR, MEDIA_FORMAT_BGR, MediaFormat_NV12, MEDIA_FORMAT_NV12, MediaFormat_GRAY, -MEDIA_FORMAT_GRAY]""" - -FileStorage_READ: int -FILE_STORAGE_READ: int -FileStorage_WRITE: int -FILE_STORAGE_WRITE: int -FileStorage_APPEND: int -FILE_STORAGE_APPEND: int -FileStorage_MEMORY: int -FILE_STORAGE_MEMORY: int -FileStorage_FORMAT_MASK: int -FILE_STORAGE_FORMAT_MASK: int -FileStorage_FORMAT_AUTO: int -FILE_STORAGE_FORMAT_AUTO: int -FileStorage_FORMAT_XML: int -FILE_STORAGE_FORMAT_XML: int -FileStorage_FORMAT_YAML: int -FILE_STORAGE_FORMAT_YAML: int -FileStorage_FORMAT_JSON: int -FILE_STORAGE_FORMAT_JSON: int -FileStorage_BASE64: int -FILE_STORAGE_BASE64: int -FileStorage_WRITE_BASE64: int -FILE_STORAGE_WRITE_BASE64: int -FileStorage_Mode = int -"""One of [FileStorage_READ, FILE_STORAGE_READ, FileStorage_WRITE, FILE_STORAGE_WRITE, FileStorage_APPEND, -FILE_STORAGE_APPEND, FileStorage_MEMORY, FILE_STORAGE_MEMORY, FileStorage_FORMAT_MASK, FILE_STORAGE_FORMAT_MASK, -FileStorage_FORMAT_AUTO, FILE_STORAGE_FORMAT_AUTO, FileStorage_FORMAT_XML, FILE_STORAGE_FORMAT_XML, -FileStorage_FORMAT_YAML, FILE_STORAGE_FORMAT_YAML, FileStorage_FORMAT_JSON, FILE_STORAGE_FORMAT_JSON, -FileStorage_BASE64, FILE_STORAGE_BASE64, FileStorage_WRITE_BASE64, FILE_STORAGE_WRITE_BASE64]""" - -FileStorage_UNDEFINED: int -FILE_STORAGE_UNDEFINED: int -FileStorage_VALUE_EXPECTED: int -FILE_STORAGE_VALUE_EXPECTED: int -FileStorage_NAME_EXPECTED: int -FILE_STORAGE_NAME_EXPECTED: int -FileStorage_INSIDE_MAP: int -FILE_STORAGE_INSIDE_MAP: int -FileStorage_State = int -"""One of [FileStorage_UNDEFINED, FILE_STORAGE_UNDEFINED, FileStorage_VALUE_EXPECTED, FILE_STORAGE_VALUE_EXPECTED, -FileStorage_NAME_EXPECTED, FILE_STORAGE_NAME_EXPECTED, FileStorage_INSIDE_MAP, FILE_STORAGE_INSIDE_MAP]""" - -FileNode_NONE: int -FILE_NODE_NONE: int -FileNode_INT: int -FILE_NODE_INT: int -FileNode_REAL: int -FILE_NODE_REAL: int -FileNode_FLOAT: int -FILE_NODE_FLOAT: int -FileNode_STR: int -FILE_NODE_STR: int -FileNode_STRING: int -FILE_NODE_STRING: int -FileNode_SEQ: int -FILE_NODE_SEQ: int -FileNode_MAP: int -FILE_NODE_MAP: int -FileNode_TYPE_MASK: int -FILE_NODE_TYPE_MASK: int -FileNode_FLOW: int -FILE_NODE_FLOW: int -FileNode_UNIFORM: int -FILE_NODE_UNIFORM: int -FileNode_EMPTY: int -FILE_NODE_EMPTY: int -FileNode_NAMED: int -FILE_NODE_NAMED: int - -UMat_MAGIC_VAL: int -UMAT_MAGIC_VAL: int -UMat_AUTO_STEP: int -UMAT_AUTO_STEP: int -UMat_CONTINUOUS_FLAG: int -UMAT_CONTINUOUS_FLAG: int -UMat_SUBMATRIX_FLAG: int -UMAT_SUBMATRIX_FLAG: int -UMat_MAGIC_MASK: int -UMAT_MAGIC_MASK: int -UMat_TYPE_MASK: int -UMAT_TYPE_MASK: int -UMat_DEPTH_MASK: int -UMAT_DEPTH_MASK: int - -Subdiv2D_PTLOC_ERROR: int -SUBDIV2D_PTLOC_ERROR: int -Subdiv2D_PTLOC_OUTSIDE_RECT: int -SUBDIV2D_PTLOC_OUTSIDE_RECT: int -Subdiv2D_PTLOC_INSIDE: int -SUBDIV2D_PTLOC_INSIDE: int -Subdiv2D_PTLOC_VERTEX: int -SUBDIV2D_PTLOC_VERTEX: int -Subdiv2D_PTLOC_ON_EDGE: int -SUBDIV2D_PTLOC_ON_EDGE: int -Subdiv2D_NEXT_AROUND_ORG: int -SUBDIV2D_NEXT_AROUND_ORG: int -Subdiv2D_NEXT_AROUND_DST: int -SUBDIV2D_NEXT_AROUND_DST: int -Subdiv2D_PREV_AROUND_ORG: int -SUBDIV2D_PREV_AROUND_ORG: int -Subdiv2D_PREV_AROUND_DST: int -SUBDIV2D_PREV_AROUND_DST: int -Subdiv2D_NEXT_AROUND_LEFT: int -SUBDIV2D_NEXT_AROUND_LEFT: int -Subdiv2D_NEXT_AROUND_RIGHT: int -SUBDIV2D_NEXT_AROUND_RIGHT: int -Subdiv2D_PREV_AROUND_LEFT: int -SUBDIV2D_PREV_AROUND_LEFT: int -Subdiv2D_PREV_AROUND_RIGHT: int -SUBDIV2D_PREV_AROUND_RIGHT: int - -ORB_HARRIS_SCORE: int -ORB_FAST_SCORE: int -ORB_ScoreType = int -"""One of [ORB_HARRIS_SCORE, ORB_FAST_SCORE]""" - -FastFeatureDetector_TYPE_5_8: int -FAST_FEATURE_DETECTOR_TYPE_5_8: int -FastFeatureDetector_TYPE_7_12: int -FAST_FEATURE_DETECTOR_TYPE_7_12: int -FastFeatureDetector_TYPE_9_16: int -FAST_FEATURE_DETECTOR_TYPE_9_16: int -FastFeatureDetector_DetectorType = int -"""One of [FastFeatureDetector_TYPE_5_8, FAST_FEATURE_DETECTOR_TYPE_5_8, FastFeatureDetector_TYPE_7_12, -FAST_FEATURE_DETECTOR_TYPE_7_12, FastFeatureDetector_TYPE_9_16, FAST_FEATURE_DETECTOR_TYPE_9_16]""" - -FastFeatureDetector_THRESHOLD: int -FAST_FEATURE_DETECTOR_THRESHOLD: int -FastFeatureDetector_NONMAX_SUPPRESSION: int -FAST_FEATURE_DETECTOR_NONMAX_SUPPRESSION: int -FastFeatureDetector_FAST_N: int -FAST_FEATURE_DETECTOR_FAST_N: int - -AgastFeatureDetector_AGAST_5_8: int -AGAST_FEATURE_DETECTOR_AGAST_5_8: int -AgastFeatureDetector_AGAST_7_12d: int -AGAST_FEATURE_DETECTOR_AGAST_7_12D: int -AgastFeatureDetector_AGAST_7_12s: int -AGAST_FEATURE_DETECTOR_AGAST_7_12S: int -AgastFeatureDetector_OAST_9_16: int -AGAST_FEATURE_DETECTOR_OAST_9_16: int -AgastFeatureDetector_DetectorType = int -"""One of [AgastFeatureDetector_AGAST_5_8, AGAST_FEATURE_DETECTOR_AGAST_5_8, AgastFeatureDetector_AGAST_7_12d, -AGAST_FEATURE_DETECTOR_AGAST_7_12D, AgastFeatureDetector_AGAST_7_12s, AGAST_FEATURE_DETECTOR_AGAST_7_12S, -AgastFeatureDetector_OAST_9_16, AGAST_FEATURE_DETECTOR_OAST_9_16]""" - -AgastFeatureDetector_THRESHOLD: int -AGAST_FEATURE_DETECTOR_THRESHOLD: int -AgastFeatureDetector_NONMAX_SUPPRESSION: int -AGAST_FEATURE_DETECTOR_NONMAX_SUPPRESSION: int - -KAZE_DIFF_PM_G1: int -KAZE_DIFF_PM_G2: int -KAZE_DIFF_WEICKERT: int -KAZE_DIFF_CHARBONNIER: int -KAZE_DiffusivityType = int -"""One of [KAZE_DIFF_PM_G1, KAZE_DIFF_PM_G2, KAZE_DIFF_WEICKERT, KAZE_DIFF_CHARBONNIER]""" - -AKAZE_DESCRIPTOR_KAZE_UPRIGHT: int -AKAZE_DESCRIPTOR_KAZE: int -AKAZE_DESCRIPTOR_MLDB_UPRIGHT: int -AKAZE_DESCRIPTOR_MLDB: int -AKAZE_DescriptorType = int -"""One of [AKAZE_DESCRIPTOR_KAZE_UPRIGHT, AKAZE_DESCRIPTOR_KAZE, AKAZE_DESCRIPTOR_MLDB_UPRIGHT, -AKAZE_DESCRIPTOR_MLDB]""" - -DescriptorMatcher_FLANNBASED: int -DESCRIPTOR_MATCHER_FLANNBASED: int -DescriptorMatcher_BRUTEFORCE: int -DESCRIPTOR_MATCHER_BRUTEFORCE: int -DescriptorMatcher_BRUTEFORCE_L1: int -DESCRIPTOR_MATCHER_BRUTEFORCE_L1: int -DescriptorMatcher_BRUTEFORCE_HAMMING: int -DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING: int -DescriptorMatcher_BRUTEFORCE_HAMMINGLUT: int -DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMINGLUT: int -DescriptorMatcher_BRUTEFORCE_SL2: int -DESCRIPTOR_MATCHER_BRUTEFORCE_SL2: int -DescriptorMatcher_MatcherType = int -"""One of [DescriptorMatcher_FLANNBASED, DESCRIPTOR_MATCHER_FLANNBASED, DescriptorMatcher_BRUTEFORCE, -DESCRIPTOR_MATCHER_BRUTEFORCE, DescriptorMatcher_BRUTEFORCE_L1, DESCRIPTOR_MATCHER_BRUTEFORCE_L1, -DescriptorMatcher_BRUTEFORCE_HAMMING, DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING, DescriptorMatcher_BRUTEFORCE_HAMMINGLUT, -DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMINGLUT, DescriptorMatcher_BRUTEFORCE_SL2, DESCRIPTOR_MATCHER_BRUTEFORCE_SL2]""" - -CirclesGridFinderParameters_SYMMETRIC_GRID: int -CIRCLES_GRID_FINDER_PARAMETERS_SYMMETRIC_GRID: int -CirclesGridFinderParameters_ASYMMETRIC_GRID: int -CIRCLES_GRID_FINDER_PARAMETERS_ASYMMETRIC_GRID: int -CirclesGridFinderParameters_GridType = int -"""One of [CirclesGridFinderParameters_SYMMETRIC_GRID, CIRCLES_GRID_FINDER_PARAMETERS_SYMMETRIC_GRID, -CirclesGridFinderParameters_ASYMMETRIC_GRID, CIRCLES_GRID_FINDER_PARAMETERS_ASYMMETRIC_GRID]""" - -StereoMatcher_DISP_SHIFT: int -STEREO_MATCHER_DISP_SHIFT: int -StereoMatcher_DISP_SCALE: int -STEREO_MATCHER_DISP_SCALE: int - -StereoBM_PREFILTER_NORMALIZED_RESPONSE: int -STEREO_BM_PREFILTER_NORMALIZED_RESPONSE: int -StereoBM_PREFILTER_XSOBEL: int -STEREO_BM_PREFILTER_XSOBEL: int - -StereoSGBM_MODE_SGBM: int -STEREO_SGBM_MODE_SGBM: int -StereoSGBM_MODE_HH: int -STEREO_SGBM_MODE_HH: int -StereoSGBM_MODE_SGBM_3WAY: int -STEREO_SGBM_MODE_SGBM_3WAY: int -StereoSGBM_MODE_HH4: int -STEREO_SGBM_MODE_HH4: int - -HOGDescriptor_L2Hys: int -HOGDESCRIPTOR_L2HYS: int -HOGDescriptor_HistogramNormType = int -"""One of [HOGDescriptor_L2Hys, HOGDESCRIPTOR_L2HYS]""" - -HOGDescriptor_DEFAULT_NLEVELS: int -HOGDESCRIPTOR_DEFAULT_NLEVELS: int - -HOGDescriptor_DESCR_FORMAT_COL_BY_COL: int -HOGDESCRIPTOR_DESCR_FORMAT_COL_BY_COL: int -HOGDescriptor_DESCR_FORMAT_ROW_BY_ROW: int -HOGDESCRIPTOR_DESCR_FORMAT_ROW_BY_ROW: int -HOGDescriptor_DescriptorStorageFormat = int -"""One of [HOGDescriptor_DESCR_FORMAT_COL_BY_COL, HOGDESCRIPTOR_DESCR_FORMAT_COL_BY_COL, -HOGDescriptor_DESCR_FORMAT_ROW_BY_ROW, HOGDESCRIPTOR_DESCR_FORMAT_ROW_BY_ROW]""" - -QRCodeEncoder_MODE_AUTO: int -QRCODE_ENCODER_MODE_AUTO: int -QRCodeEncoder_MODE_NUMERIC: int -QRCODE_ENCODER_MODE_NUMERIC: int -QRCodeEncoder_MODE_ALPHANUMERIC: int -QRCODE_ENCODER_MODE_ALPHANUMERIC: int -QRCodeEncoder_MODE_BYTE: int -QRCODE_ENCODER_MODE_BYTE: int -QRCodeEncoder_MODE_ECI: int -QRCODE_ENCODER_MODE_ECI: int -QRCodeEncoder_MODE_KANJI: int -QRCODE_ENCODER_MODE_KANJI: int -QRCodeEncoder_MODE_STRUCTURED_APPEND: int -QRCODE_ENCODER_MODE_STRUCTURED_APPEND: int -QRCodeEncoder_EncodeMode = int -"""One of [QRCodeEncoder_MODE_AUTO, QRCODE_ENCODER_MODE_AUTO, QRCodeEncoder_MODE_NUMERIC, QRCODE_ENCODER_MODE_NUMERIC, -QRCodeEncoder_MODE_ALPHANUMERIC, QRCODE_ENCODER_MODE_ALPHANUMERIC, QRCodeEncoder_MODE_BYTE, QRCODE_ENCODER_MODE_BYTE, -QRCodeEncoder_MODE_ECI, QRCODE_ENCODER_MODE_ECI, QRCodeEncoder_MODE_KANJI, QRCODE_ENCODER_MODE_KANJI, - QRCodeEncoder_MODE_STRUCTURED_APPEND, QRCODE_ENCODER_MODE_STRUCTURED_APPEND]""" - -QRCodeEncoder_CORRECT_LEVEL_L: int -QRCODE_ENCODER_CORRECT_LEVEL_L: int -QRCodeEncoder_CORRECT_LEVEL_M: int -QRCODE_ENCODER_CORRECT_LEVEL_M: int -QRCodeEncoder_CORRECT_LEVEL_Q: int -QRCODE_ENCODER_CORRECT_LEVEL_Q: int -QRCodeEncoder_CORRECT_LEVEL_H: int -QRCODE_ENCODER_CORRECT_LEVEL_H: int -QRCodeEncoder_CorrectionLevel = int -"""One of [QRCodeEncoder_CORRECT_LEVEL_L, QRCODE_ENCODER_CORRECT_LEVEL_L, QRCodeEncoder_CORRECT_LEVEL_M, -QRCODE_ENCODER_CORRECT_LEVEL_M, QRCodeEncoder_CORRECT_LEVEL_Q, QRCODE_ENCODER_CORRECT_LEVEL_Q, -QRCodeEncoder_CORRECT_LEVEL_H, QRCODE_ENCODER_CORRECT_LEVEL_H]""" - -QRCodeEncoder_ECI_UTF8: int -QRCODE_ENCODER_ECI_UTF8: int -QRCodeEncoder_ECIEncodings = int -"""One of [QRCodeEncoder_ECI_UTF8, QRCODE_ENCODER_ECI_UTF8]""" - -FaceRecognizerSF_FR_COSINE: int -FACE_RECOGNIZER_SF_FR_COSINE: int -FaceRecognizerSF_FR_NORM_L2: int -FACE_RECOGNIZER_SF_FR_NORM_L2: int -FaceRecognizerSF_DisType = int -"""One of [FaceRecognizerSF_FR_COSINE, FACE_RECOGNIZER_SF_FR_COSINE, FaceRecognizerSF_FR_NORM_L2, -FACE_RECOGNIZER_SF_FR_NORM_L2]""" - -Stitcher_OK: int -STITCHER_OK: int -Stitcher_ERR_NEED_MORE_IMGS: int -STITCHER_ERR_NEED_MORE_IMGS: int -Stitcher_ERR_HOMOGRAPHY_EST_FAIL: int -STITCHER_ERR_HOMOGRAPHY_EST_FAIL: int -Stitcher_ERR_CAMERA_PARAMS_ADJUST_FAIL: int -STITCHER_ERR_CAMERA_PARAMS_ADJUST_FAIL: int -Stitcher_Status = int -"""One of [Stitcher_OK, STITCHER_OK, Stitcher_ERR_NEED_MORE_IMGS, STITCHER_ERR_NEED_MORE_IMGS, -Stitcher_ERR_HOMOGRAPHY_EST_FAIL, STITCHER_ERR_HOMOGRAPHY_EST_FAIL, Stitcher_ERR_CAMERA_PARAMS_ADJUST_FAIL, -STITCHER_ERR_CAMERA_PARAMS_ADJUST_FAIL]""" - -Stitcher_PANORAMA: int -STITCHER_PANORAMA: int -Stitcher_SCANS: int -STITCHER_SCANS: int -Stitcher_Mode = int -"""One of [Stitcher_PANORAMA, STITCHER_PANORAMA, Stitcher_SCANS, STITCHER_SCANS]""" - -DISOpticalFlow_PRESET_ULTRAFAST: int -DISOPTICAL_FLOW_PRESET_ULTRAFAST: int -DISOpticalFlow_PRESET_FAST: int -DISOPTICAL_FLOW_PRESET_FAST: int -DISOpticalFlow_PRESET_MEDIUM: int -DISOPTICAL_FLOW_PRESET_MEDIUM: int - -PCA_DATA_AS_ROW: int -PCA_DATA_AS_COL: int -PCA_USE_AVG: int -PCA_Flags = int -"""One of [PCA_DATA_AS_ROW, PCA_DATA_AS_COL, PCA_USE_AVG]""" - -SVD_MODIFY_A: int -SVD_NO_UV: int -SVD_FULL_UV: int -SVD_Flags = int -"""One of [SVD_MODIFY_A, SVD_NO_UV, SVD_FULL_UV]""" - -RNG_UNIFORM: int -RNG_NORMAL: int - -Formatter_FMT_DEFAULT: int -FORMATTER_FMT_DEFAULT: int -Formatter_FMT_MATLAB: int -FORMATTER_FMT_MATLAB: int -Formatter_FMT_CSV: int -FORMATTER_FMT_CSV: int -Formatter_FMT_PYTHON: int -FORMATTER_FMT_PYTHON: int -Formatter_FMT_NUMPY: int -FORMATTER_FMT_NUMPY: int -Formatter_FMT_C: int -FORMATTER_FMT_C: int -Formatter_FormatType = int -"""One of [Formatter_FMT_DEFAULT, FORMATTER_FMT_DEFAULT, Formatter_FMT_MATLAB, FORMATTER_FMT_MATLAB, Formatter_FMT_CSV, -FORMATTER_FMT_CSV, Formatter_FMT_PYTHON, FORMATTER_FMT_PYTHON, Formatter_FMT_NUMPY, FORMATTER_FMT_NUMPY, -Formatter_FMT_C, FORMATTER_FMT_C]""" - -_InputArray_KIND_SHIFT: int -_INPUT_ARRAY_KIND_SHIFT: int -_InputArray_FIXED_TYPE: int -_INPUT_ARRAY_FIXED_TYPE: int -_InputArray_FIXED_SIZE: int -_INPUT_ARRAY_FIXED_SIZE: int -_InputArray_KIND_MASK: int -_INPUT_ARRAY_KIND_MASK: int -_InputArray_NONE: int -_INPUT_ARRAY_NONE: int -_InputArray_MAT: int -_INPUT_ARRAY_MAT: int -_InputArray_MATX: int -_INPUT_ARRAY_MATX: int -_InputArray_STD_VECTOR: int -_INPUT_ARRAY_STD_VECTOR: int -_InputArray_STD_VECTOR_VECTOR: int -_INPUT_ARRAY_STD_VECTOR_VECTOR: int -_InputArray_STD_VECTOR_MAT: int -_INPUT_ARRAY_STD_VECTOR_MAT: int -_InputArray_EXPR: int -_INPUT_ARRAY_EXPR: int -_InputArray_OPENGL_BUFFER: int -_INPUT_ARRAY_OPENGL_BUFFER: int -_InputArray_CUDA_HOST_MEM: int -_INPUT_ARRAY_CUDA_HOST_MEM: int -_InputArray_CUDA_GPU_MAT: int -_INPUT_ARRAY_CUDA_GPU_MAT: int -_InputArray_UMAT: int -_INPUT_ARRAY_UMAT: int -_InputArray_STD_VECTOR_UMAT: int -_INPUT_ARRAY_STD_VECTOR_UMAT: int -_InputArray_STD_BOOL_VECTOR: int -_INPUT_ARRAY_STD_BOOL_VECTOR: int -_InputArray_STD_VECTOR_CUDA_GPU_MAT: int -_INPUT_ARRAY_STD_VECTOR_CUDA_GPU_MAT: int -_InputArray_STD_ARRAY: int -_INPUT_ARRAY_STD_ARRAY: int -_InputArray_STD_ARRAY_MAT: int -_INPUT_ARRAY_STD_ARRAY_MAT: int -_InputArray_KindFlag = int -"""One of [_InputArray_KIND_SHIFT, _INPUT_ARRAY_KIND_SHIFT, _InputArray_FIXED_TYPE, _INPUT_ARRAY_FIXED_TYPE, -_InputArray_FIXED_SIZE, _INPUT_ARRAY_FIXED_SIZE, _InputArray_KIND_MASK, _INPUT_ARRAY_KIND_MASK, _InputArray_NONE, -_INPUT_ARRAY_NONE, _InputArray_MAT, _INPUT_ARRAY_MAT, _InputArray_MATX, _INPUT_ARRAY_MATX, _InputArray_STD_VECTOR, -_INPUT_ARRAY_STD_VECTOR, _InputArray_STD_VECTOR_VECTOR, _INPUT_ARRAY_STD_VECTOR_VECTOR, _InputArray_STD_VECTOR_MAT, -_INPUT_ARRAY_STD_VECTOR_MAT, _InputArray_EXPR, _INPUT_ARRAY_EXPR, _InputArray_OPENGL_BUFFER, _INPUT_ARRAY_OPENGL_BUFFER, -_InputArray_CUDA_HOST_MEM, _INPUT_ARRAY_CUDA_HOST_MEM, _InputArray_CUDA_GPU_MAT, _INPUT_ARRAY_CUDA_GPU_MAT, -_InputArray_UMAT, _INPUT_ARRAY_UMAT, _InputArray_STD_VECTOR_UMAT, _INPUT_ARRAY_STD_VECTOR_UMAT, -_InputArray_STD_BOOL_VECTOR, _INPUT_ARRAY_STD_BOOL_VECTOR, _InputArray_STD_VECTOR_CUDA_GPU_MAT, -_INPUT_ARRAY_STD_VECTOR_CUDA_GPU_MAT, _InputArray_STD_ARRAY, _INPUT_ARRAY_STD_ARRAY, _InputArray_STD_ARRAY_MAT, -_INPUT_ARRAY_STD_ARRAY_MAT]""" - -_OutputArray_DEPTH_MASK_8U: int -_OUTPUT_ARRAY_DEPTH_MASK_8U: int -_OutputArray_DEPTH_MASK_8S: int -_OUTPUT_ARRAY_DEPTH_MASK_8S: int -_OutputArray_DEPTH_MASK_16U: int -_OUTPUT_ARRAY_DEPTH_MASK_16U: int -_OutputArray_DEPTH_MASK_16S: int -_OUTPUT_ARRAY_DEPTH_MASK_16S: int -_OutputArray_DEPTH_MASK_32S: int -_OUTPUT_ARRAY_DEPTH_MASK_32S: int -_OutputArray_DEPTH_MASK_32F: int -_OUTPUT_ARRAY_DEPTH_MASK_32F: int -_OutputArray_DEPTH_MASK_64F: int -_OUTPUT_ARRAY_DEPTH_MASK_64F: int -_OutputArray_DEPTH_MASK_16F: int -_OUTPUT_ARRAY_DEPTH_MASK_16F: int -_OutputArray_DEPTH_MASK_ALL: int -_OUTPUT_ARRAY_DEPTH_MASK_ALL: int -_OutputArray_DEPTH_MASK_ALL_BUT_8S: int -_OUTPUT_ARRAY_DEPTH_MASK_ALL_BUT_8S: int -_OutputArray_DEPTH_MASK_ALL_16F: int -_OUTPUT_ARRAY_DEPTH_MASK_ALL_16F: int -_OutputArray_DEPTH_MASK_FLT: int -_OUTPUT_ARRAY_DEPTH_MASK_FLT: int -_OutputArray_DepthMask = int -"""One of [_OutputArray_DEPTH_MASK_8U, _OUTPUT_ARRAY_DEPTH_MASK_8U, _OutputArray_DEPTH_MASK_8S, -_OUTPUT_ARRAY_DEPTH_MASK_8S, _OutputArray_DEPTH_MASK_16U, _OUTPUT_ARRAY_DEPTH_MASK_16U, _OutputArray_DEPTH_MASK_16S, -_OUTPUT_ARRAY_DEPTH_MASK_16S, _OutputArray_DEPTH_MASK_32S, _OUTPUT_ARRAY_DEPTH_MASK_32S, _OutputArray_DEPTH_MASK_32F, -_OUTPUT_ARRAY_DEPTH_MASK_32F, _OutputArray_DEPTH_MASK_64F, _OUTPUT_ARRAY_DEPTH_MASK_64F, _OutputArray_DEPTH_MASK_16F, -_OUTPUT_ARRAY_DEPTH_MASK_16F, _OutputArray_DEPTH_MASK_ALL, _OUTPUT_ARRAY_DEPTH_MASK_ALL, -_OutputArray_DEPTH_MASK_ALL_BUT_8S, _OUTPUT_ARRAY_DEPTH_MASK_ALL_BUT_8S, _OutputArray_DEPTH_MASK_ALL_16F, -_OUTPUT_ARRAY_DEPTH_MASK_ALL_16F, _OutputArray_DEPTH_MASK_FLT, _OUTPUT_ARRAY_DEPTH_MASK_FLT]""" - -UMatData_COPY_ON_MAP: int -UMAT_DATA_COPY_ON_MAP: int -UMatData_HOST_COPY_OBSOLETE: int -UMAT_DATA_HOST_COPY_OBSOLETE: int -UMatData_DEVICE_COPY_OBSOLETE: int -UMAT_DATA_DEVICE_COPY_OBSOLETE: int -UMatData_TEMP_UMAT: int -UMAT_DATA_TEMP_UMAT: int -UMatData_TEMP_COPIED_UMAT: int -UMAT_DATA_TEMP_COPIED_UMAT: int -UMatData_USER_ALLOCATED: int -UMAT_DATA_USER_ALLOCATED: int -UMatData_DEVICE_MEM_MAPPED: int -UMAT_DATA_DEVICE_MEM_MAPPED: int -UMatData_ASYNC_CLEANUP: int -UMAT_DATA_ASYNC_CLEANUP: int -UMatData_MemoryFlag = int -"""One of [UMatData_COPY_ON_MAP, UMAT_DATA_COPY_ON_MAP, UMatData_HOST_COPY_OBSOLETE, UMAT_DATA_HOST_COPY_OBSOLETE, -UMatData_DEVICE_COPY_OBSOLETE, UMAT_DATA_DEVICE_COPY_OBSOLETE, UMatData_TEMP_UMAT, UMAT_DATA_TEMP_UMAT, -UMatData_TEMP_COPIED_UMAT, UMAT_DATA_TEMP_COPIED_UMAT, UMatData_USER_ALLOCATED, UMAT_DATA_USER_ALLOCATED, -UMatData_DEVICE_MEM_MAPPED, UMAT_DATA_DEVICE_MEM_MAPPED, UMatData_ASYNC_CLEANUP, UMAT_DATA_ASYNC_CLEANUP]""" - -Mat_MAGIC_VAL: int -MAT_MAGIC_VAL: int -Mat_AUTO_STEP: int -MAT_AUTO_STEP: int -Mat_CONTINUOUS_FLAG: int -MAT_CONTINUOUS_FLAG: int -Mat_SUBMATRIX_FLAG: int -MAT_SUBMATRIX_FLAG: int -Mat_MAGIC_MASK: int -MAT_MAGIC_MASK: int -Mat_TYPE_MASK: int -MAT_TYPE_MASK: int -Mat_DEPTH_MASK: int -MAT_DEPTH_MASK: int - -SparseMat_MAGIC_VAL: int -SPARSE_MAT_MAGIC_VAL: int -SparseMat_MAX_DIM: int -SPARSE_MAT_MAX_DIM: int -SparseMat_HASH_SCALE: int -SPARSE_MAT_HASH_SCALE: int -SparseMat_HASH_BIT: int -SPARSE_MAT_HASH_BIT: int - -QuatEnum_INT_XYZ: int -QUAT_ENUM_INT_XYZ: int -QuatEnum_INT_XZY: int -QUAT_ENUM_INT_XZY: int -QuatEnum_INT_YXZ: int -QUAT_ENUM_INT_YXZ: int -QuatEnum_INT_YZX: int -QUAT_ENUM_INT_YZX: int -QuatEnum_INT_ZXY: int -QUAT_ENUM_INT_ZXY: int -QuatEnum_INT_ZYX: int -QUAT_ENUM_INT_ZYX: int -QuatEnum_INT_XYX: int -QUAT_ENUM_INT_XYX: int -QuatEnum_INT_XZX: int -QUAT_ENUM_INT_XZX: int -QuatEnum_INT_YXY: int -QUAT_ENUM_INT_YXY: int -QuatEnum_INT_YZY: int -QUAT_ENUM_INT_YZY: int -QuatEnum_INT_ZXZ: int -QUAT_ENUM_INT_ZXZ: int -QuatEnum_INT_ZYZ: int -QUAT_ENUM_INT_ZYZ: int -QuatEnum_EXT_XYZ: int -QUAT_ENUM_EXT_XYZ: int -QuatEnum_EXT_XZY: int -QUAT_ENUM_EXT_XZY: int -QuatEnum_EXT_YXZ: int -QUAT_ENUM_EXT_YXZ: int -QuatEnum_EXT_YZX: int -QUAT_ENUM_EXT_YZX: int -QuatEnum_EXT_ZXY: int -QUAT_ENUM_EXT_ZXY: int -QuatEnum_EXT_ZYX: int -QUAT_ENUM_EXT_ZYX: int -QuatEnum_EXT_XYX: int -QUAT_ENUM_EXT_XYX: int -QuatEnum_EXT_XZX: int -QUAT_ENUM_EXT_XZX: int -QuatEnum_EXT_YXY: int -QUAT_ENUM_EXT_YXY: int -QuatEnum_EXT_YZY: int -QUAT_ENUM_EXT_YZY: int -QuatEnum_EXT_ZXZ: int -QUAT_ENUM_EXT_ZXZ: int -QuatEnum_EXT_ZYZ: int -QUAT_ENUM_EXT_ZYZ: int -QuatEnum_EULER_ANGLES_MAX_VALUE: int -QUAT_ENUM_EULER_ANGLES_MAX_VALUE: int -QuatEnum_EulerAnglesType = int -"""One of [QuatEnum_INT_XYZ, QUAT_ENUM_INT_XYZ, QuatEnum_INT_XZY, QUAT_ENUM_INT_XZY, QuatEnum_INT_YXZ, -QUAT_ENUM_INT_YXZ, QuatEnum_INT_YZX, QUAT_ENUM_INT_YZX, QuatEnum_INT_ZXY, QUAT_ENUM_INT_ZXY, QuatEnum_INT_ZYX, -QUAT_ENUM_INT_ZYX, QuatEnum_INT_XYX, QUAT_ENUM_INT_XYX, QuatEnum_INT_XZX, QUAT_ENUM_INT_XZX, QuatEnum_INT_YXY, -QUAT_ENUM_INT_YXY, QuatEnum_INT_YZY, QUAT_ENUM_INT_YZY, QuatEnum_INT_ZXZ, QUAT_ENUM_INT_ZXZ, QuatEnum_INT_ZYZ, -QUAT_ENUM_INT_ZYZ, QuatEnum_EXT_XYZ, QUAT_ENUM_EXT_XYZ, QuatEnum_EXT_XZY, QUAT_ENUM_EXT_XZY, QuatEnum_EXT_YXZ, -QUAT_ENUM_EXT_YXZ, QuatEnum_EXT_YZX, QUAT_ENUM_EXT_YZX, QuatEnum_EXT_ZXY, QUAT_ENUM_EXT_ZXY, QuatEnum_EXT_ZYX, -QUAT_ENUM_EXT_ZYX, QuatEnum_EXT_XYX, QUAT_ENUM_EXT_XYX, QuatEnum_EXT_XZX, QUAT_ENUM_EXT_XZX, QuatEnum_EXT_YXY, -QUAT_ENUM_EXT_YXY, QuatEnum_EXT_YZY, QUAT_ENUM_EXT_YZY, QuatEnum_EXT_ZXZ, QUAT_ENUM_EXT_ZXZ, QuatEnum_EXT_ZYZ, -QUAT_ENUM_EXT_ZYZ, QuatEnum_EULER_ANGLES_MAX_VALUE, QUAT_ENUM_EULER_ANGLES_MAX_VALUE]""" - -TermCriteria_COUNT: int -TERM_CRITERIA_COUNT: int -TermCriteria_MAX_ITER: int -TERM_CRITERIA_MAX_ITER: int -TermCriteria_EPS: int -TERM_CRITERIA_EPS: int -TermCriteria_Type = int -"""One of [TermCriteria_COUNT, TERM_CRITERIA_COUNT, TermCriteria_MAX_ITER, TERM_CRITERIA_MAX_ITER, TermCriteria_EPS, -TERM_CRITERIA_EPS]""" - -GFluidKernel_Kind_Filter: int -GFLUID_KERNEL_KIND_FILTER: int -GFluidKernel_Kind_Resize: int -GFLUID_KERNEL_KIND_RESIZE: int -GFluidKernel_Kind_YUV420toRGB: int -GFLUID_KERNEL_KIND_YUV420TO_RGB: int -GFluidKernel_Kind = int -"""One of [GFluidKernel_Kind_Filter, GFLUID_KERNEL_KIND_FILTER, GFluidKernel_Kind_Resize, GFLUID_KERNEL_KIND_RESIZE, -GFluidKernel_Kind_YUV420toRGB, GFLUID_KERNEL_KIND_YUV420TO_RGB]""" - -MediaFrame_Access_R: int -MEDIA_FRAME_ACCESS_R: int -MediaFrame_Access_W: int -MEDIA_FRAME_ACCESS_W: int -MediaFrame_Access = int -"""One of [MediaFrame_Access_R, MEDIA_FRAME_ACCESS_R, MediaFrame_Access_W, MEDIA_FRAME_ACCESS_W]""" - -RMat_Access_R: int -RMAT_ACCESS_R: int -RMat_Access_W: int -RMAT_ACCESS_W: int -RMat_Access = int -"""One of [RMat_Access_R, RMAT_ACCESS_R, RMat_Access_W, RMAT_ACCESS_W]""" - -# Classes -class Algorithm: - # Functions - def clear(self) -> None: ... - @typing.overload - def write(self, fs: FileStorage) -> None: ... - @typing.overload - def write(self, fs: FileStorage, name: str) -> None: ... - def read(self, fn: FileNode) -> None: ... - def empty(self) -> bool: ... - def save(self, filename: str) -> None: ... - def getDefaultName(self) -> str: ... - -class AsyncArray: - # Functions - def __init__(self) -> None: ... - def release(self) -> None: ... - @typing.overload - def get(self, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... - @typing.overload - def get(self, dst: UMat | None = ...) -> UMat: ... - @typing.overload - def get(self, timeoutNs: float, dst: cv2.typing.MatLike | None = ...) -> tuple[bool, cv2.typing.MatLike]: ... - @typing.overload - def get(self, timeoutNs: float, dst: UMat | None = ...) -> tuple[bool, UMat]: ... - def wait_for(self, timeoutNs: float) -> bool: ... - def valid(self) -> bool: ... - -class FileStorage: - # Functions - @typing.overload - def __init__(self) -> None: ... - @typing.overload - def __init__(self, filename: str, flags: int, encoding: str = ...) -> None: ... - def open(self, filename: str, flags: int, encoding: str = ...) -> bool: ... - def isOpened(self) -> bool: ... - def release(self) -> None: ... - def releaseAndGetString(self) -> str: ... - def getFirstTopLevelNode(self) -> FileNode: ... - def root(self, streamidx: int = ...) -> FileNode: ... - def getNode(self, nodename: str) -> FileNode: ... - @typing.overload - def write(self, name: str, val: int) -> None: ... - @typing.overload - def write(self, name: str, val: float) -> None: ... - @typing.overload - def write(self, name: str, val: str) -> None: ... - @typing.overload - def write(self, name: str, val: cv2.typing.MatLike) -> None: ... - @typing.overload - def write(self, name: str, val: typing.Sequence[str]) -> None: ... - def writeComment(self, comment: str, append: bool = ...) -> None: ... - def startWriteStruct(self, name: str, flags: int, typeName: str = ...) -> None: ... - def endWriteStruct(self) -> None: ... - def getFormat(self) -> int: ... - -class FileNode: - # Functions - def __init__(self) -> None: ... - def getNode(self, nodename: str) -> FileNode: ... - def at(self, i: int) -> FileNode: ... - def keys(self) -> typing.Sequence[str]: ... - def type(self) -> int: ... - def empty(self) -> bool: ... - def isNone(self) -> bool: ... - def isSeq(self) -> bool: ... - def isMap(self) -> bool: ... - def isInt(self) -> bool: ... - def isReal(self) -> bool: ... - def isString(self) -> bool: ... - def isNamed(self) -> bool: ... - def name(self) -> str: ... - def size(self) -> int: ... - def rawSize(self) -> int: ... - def real(self) -> float: ... - def string(self) -> str: ... - def mat(self) -> cv2.typing.MatLike: ... - -class RotatedRect: - center: cv2.typing.Point2f - size: cv2.typing.Size2f - angle: float - - # Functions - @typing.overload - def __init__(self) -> None: ... - @typing.overload - def __init__(self, center: cv2.typing.Point2f, size: cv2.typing.Size2f, angle: float) -> None: ... - @typing.overload - def __init__(self, point1: cv2.typing.Point2f, point2: cv2.typing.Point2f, point3: cv2.typing.Point2f) -> None: ... - def points(self) -> typing.Sequence[cv2.typing.Point2f]: ... - def boundingRect(self) -> cv2.typing.Rect: ... - -class KeyPoint: - pt: cv2.typing.Point2f - size: float - angle: float - response: float - octave: int - class_id: int - - # Functions - @typing.overload - def __init__(self) -> None: ... - @typing.overload - def __init__( - self, - x: float, - y: float, - size: float, - angle: float = ..., - response: float = ..., - octave: int = ..., - class_id: int = ..., - ) -> None: ... - @staticmethod - @typing.overload - def convert( - keypoints: typing.Sequence[KeyPoint], - keypointIndexes: typing.Sequence[int] = ..., - ) -> typing.Sequence[cv2.typing.Point2f]: ... - @staticmethod - @typing.overload - def convert( - points2f: typing.Sequence[cv2.typing.Point2f], - size: float = ..., - response: float = ..., - octave: int = ..., - class_id: int = ..., - ) -> typing.Sequence[KeyPoint]: ... - @staticmethod - def overlap(kp1: KeyPoint, kp2: KeyPoint) -> float: ... - -class DMatch: - queryIdx: int - trainIdx: int - imgIdx: int - distance: float - - # Functions - @typing.overload - def __init__(self) -> None: ... - @typing.overload - def __init__(self, _queryIdx: int, _trainIdx: int, _distance: float) -> None: ... - @typing.overload - def __init__(self, _queryIdx: int, _trainIdx: int, _imgIdx: int, _distance: float) -> None: ... - -class TickMeter: - # Functions - def __init__(self) -> None: ... - def start(self) -> None: ... - def stop(self) -> None: ... - def getTimeTicks(self) -> int: ... - def getTimeMicro(self) -> float: ... - def getTimeMilli(self) -> float: ... - def getTimeSec(self) -> float: ... - def getCounter(self) -> int: ... - def getFPS(self) -> float: ... - def getAvgTimeSec(self) -> float: ... - def getAvgTimeMilli(self) -> float: ... - def reset(self) -> None: ... - -class UMat: - offset: int - - # Functions - @typing.overload - def __init__(self, usageFlags: UMatUsageFlags = ...) -> None: ... - @typing.overload - def __init__(self, rows: int, cols: int, type: int, usageFlags: UMatUsageFlags = ...) -> None: ... - @typing.overload - def __init__(self, size: cv2.typing.Size, type: int, usageFlags: UMatUsageFlags = ...) -> None: ... - @typing.overload - def __init__( - self, - rows: int, - cols: int, - type: int, - s: cv2.typing.Scalar, - usageFlags: UMatUsageFlags = ..., - ) -> None: ... - @typing.overload - def __init__( - self, - size: cv2.typing.Size, - type: int, - s: cv2.typing.Scalar, - usageFlags: UMatUsageFlags = ..., - ) -> None: ... - @typing.overload - def __init__(self, m: UMat) -> None: ... - @typing.overload - def __init__(self, m: UMat, rowRange: cv2.typing.Range, colRange: cv2.typing.Range = ...) -> None: ... - @typing.overload - def __init__(self, m: UMat, roi: cv2.typing.Rect) -> None: ... - @typing.overload - def __init__(self, m: UMat, ranges: typing.Sequence[cv2.typing.Range]) -> None: ... - @staticmethod - def queue() -> cv2.typing.IntPointer: ... - @staticmethod - def context() -> cv2.typing.IntPointer: ... - def get(self) -> cv2.typing.MatLike: ... - def isContinuous(self) -> bool: ... - def isSubmatrix(self) -> bool: ... - def handle(self, accessFlags: AccessFlag) -> cv2.typing.IntPointer: ... - -class Subdiv2D: - # Functions - @typing.overload - def __init__(self) -> None: ... - @typing.overload - def __init__(self, rect: cv2.typing.Rect) -> None: ... - def initDelaunay(self, rect: cv2.typing.Rect) -> None: ... - @typing.overload - def insert(self, pt: cv2.typing.Point2f) -> int: ... - @typing.overload - def insert(self, ptvec: typing.Sequence[cv2.typing.Point2f]) -> None: ... - def locate(self, pt: cv2.typing.Point2f) -> tuple[int, int, int]: ... - def findNearest(self, pt: cv2.typing.Point2f) -> tuple[int, cv2.typing.Point2f]: ... - def getEdgeList(self) -> typing.Sequence[cv2.typing.Vec4f]: ... - def getLeadingEdgeList(self) -> typing.Sequence[int]: ... - def getTriangleList(self) -> typing.Sequence[cv2.typing.Vec6f]: ... - def getVoronoiFacetList( - self, - idx: typing.Sequence[int], - ) -> tuple[ - typing.Sequence[typing.Sequence[cv2.typing.Point2f]], - typing.Sequence[cv2.typing.Point2f], - ]: ... - def getVertex(self, vertex: int) -> tuple[cv2.typing.Point2f, int]: ... - def getEdge(self, edge: int, nextEdgeType: int) -> int: ... - def nextEdge(self, edge: int) -> int: ... - def rotateEdge(self, edge: int, rotate: int) -> int: ... - def symEdge(self, edge: int) -> int: ... - def edgeOrg(self, edge: int) -> tuple[int, cv2.typing.Point2f]: ... - def edgeDst(self, edge: int) -> tuple[int, cv2.typing.Point2f]: ... - -class Feature2D: - # Functions - @typing.overload - def detect(self, image: cv2.typing.MatLike, mask: cv2.typing.MatLike | None = ...) -> typing.Sequence[KeyPoint]: ... - @typing.overload - def detect(self, image: UMat, mask: UMat | None = ...) -> typing.Sequence[KeyPoint]: ... - @typing.overload - def detect( - self, - images: typing.Sequence[cv2.typing.MatLike], - masks: typing.Sequence[cv2.typing.MatLike] | None = ..., - ) -> typing.Sequence[typing.Sequence[KeyPoint]]: ... - @typing.overload - def detect( - self, - images: typing.Sequence[UMat], - masks: typing.Sequence[UMat] | None = ..., - ) -> typing.Sequence[typing.Sequence[KeyPoint]]: ... - @typing.overload - def compute( - self, - image: cv2.typing.MatLike, - keypoints: typing.Sequence[KeyPoint], - descriptors: cv2.typing.MatLike | None = ..., - ) -> tuple[ - typing.Sequence[KeyPoint], - cv2.typing.MatLike, - ]: ... - @typing.overload - def compute( - self, - image: UMat, - keypoints: typing.Sequence[KeyPoint], - descriptors: UMat | None = ..., - ) -> tuple[ - typing.Sequence[KeyPoint], - UMat, - ]: ... - @typing.overload - def compute( - self, - images: typing.Sequence[cv2.typing.MatLike], - keypoints: typing.Sequence[typing.Sequence[KeyPoint]], - descriptors: typing.Sequence[cv2.typing.MatLike] | None = ..., - ) -> tuple[ - typing.Sequence[typing.Sequence[KeyPoint]], - typing.Sequence[cv2.typing.MatLike], - ]: ... - @typing.overload - def compute( - self, - images: typing.Sequence[UMat], - keypoints: typing.Sequence[typing.Sequence[KeyPoint]], - descriptors: typing.Sequence[UMat] | None = ..., - ) -> tuple[ - typing.Sequence[typing.Sequence[KeyPoint]], - typing.Sequence[UMat], - ]: ... - @typing.overload - def detectAndCompute( - self, - image: cv2.typing.MatLike, - mask: cv2.typing.MatLike, - descriptors: cv2.typing.MatLike | None = ..., - useProvidedKeypoints: bool = ..., - ) -> tuple[ - typing.Sequence[KeyPoint], - cv2.typing.MatLike, - ]: ... - @typing.overload - def detectAndCompute( - self, - image: UMat, - mask: UMat, - descriptors: UMat | None = ..., - useProvidedKeypoints: bool = ..., - ) -> tuple[ - typing.Sequence[KeyPoint], - UMat, - ]: ... - def descriptorSize(self) -> int: ... - def descriptorType(self) -> int: ... - def defaultNorm(self) -> int: ... - @typing.overload - def write(self, fileName: str) -> None: ... - @typing.overload - def write(self, fs: FileStorage, name: str) -> None: ... - @typing.overload - def read(self, fileName: str) -> None: ... - @typing.overload - def read(self, arg1: FileNode) -> None: ... - def empty(self) -> bool: ... - def getDefaultName(self) -> str: ... - -class BOWTrainer: - # Functions - def add(self, descriptors: cv2.typing.MatLike) -> None: ... - def getDescriptors(self) -> typing.Sequence[cv2.typing.MatLike]: ... - def descriptorsCount(self) -> int: ... - def clear(self) -> None: ... - @typing.overload - def cluster(self) -> cv2.typing.MatLike: ... - @typing.overload - def cluster(self, descriptors: cv2.typing.MatLike) -> cv2.typing.MatLike: ... - -class BOWImgDescriptorExtractor: - # Functions - def __init__(self, dextractor: cv2.typing.DescriptorExtractor, dmatcher: DescriptorMatcher) -> None: ... - def setVocabulary(self, vocabulary: cv2.typing.MatLike) -> None: ... - def getVocabulary(self) -> cv2.typing.MatLike: ... - def compute( - self, - image: cv2.typing.MatLike, - keypoints: typing.Sequence[KeyPoint], - imgDescriptor: cv2.typing.MatLike | None = ..., - ) -> cv2.typing.MatLike: ... - def descriptorSize(self) -> int: ... - def descriptorType(self) -> int: ... - -class VideoCapture: - # Functions - @typing.overload - def __init__(self) -> None: ... - @typing.overload - def __init__(self, filename: str, apiPreference: int = ...) -> None: ... - @typing.overload - def __init__(self, filename: str, apiPreference: int, params: typing.Sequence[int]) -> None: ... - @typing.overload - def __init__(self, index: int, apiPreference: int = ...) -> None: ... - @typing.overload - def __init__(self, index: int, apiPreference: int, params: typing.Sequence[int]) -> None: ... - @typing.overload - def open(self, filename: str, apiPreference: int = ...) -> bool: ... - @typing.overload - def open(self, filename: str, apiPreference: int, params: typing.Sequence[int]) -> bool: ... - @typing.overload - def open(self, index: int, apiPreference: int = ...) -> bool: ... - @typing.overload - def open(self, index: int, apiPreference: int, params: typing.Sequence[int]) -> bool: ... - def isOpened(self) -> bool: ... - def release(self) -> None: ... - def grab(self) -> bool: ... - @typing.overload - def retrieve(self, image: cv2.typing.MatLike | None = ..., flag: int = ...) -> tuple[bool, cv2.typing.MatLike]: ... - @typing.overload - def retrieve(self, image: UMat | None = ..., flag: int = ...) -> tuple[bool, UMat]: ... - @typing.overload - def read(self, image: cv2.typing.MatLike | None = ...) -> tuple[bool, cv2.typing.MatLike]: ... - @typing.overload - def read(self, image: UMat | None = ...) -> tuple[bool, UMat]: ... - def set(self, propId: int, value: float) -> bool: ... - def get(self, propId: int) -> float: ... - def getBackendName(self) -> str: ... - def setExceptionMode(self, enable: bool) -> None: ... - def getExceptionMode(self) -> bool: ... - @staticmethod - def waitAny(streams: typing.Sequence[VideoCapture], timeoutNs: int = ...) -> tuple[bool, typing.Sequence[int]]: ... - -class VideoWriter: - # Functions - @typing.overload - def __init__(self) -> None: ... - @typing.overload - def __init__( - self, - filename: str, - fourcc: int, - fps: float, - frameSize: cv2.typing.Size, - isColor: bool = ..., - ) -> None: ... - @typing.overload - def __init__( - self, - filename: str, - apiPreference: int, - fourcc: int, - fps: float, - frameSize: cv2.typing.Size, - isColor: bool = ..., - ) -> None: ... - @typing.overload - def __init__( - self, - filename: str, - fourcc: int, - fps: float, - frameSize: cv2.typing.Size, - params: typing.Sequence[int], - ) -> None: ... - @typing.overload - def __init__( - self, - filename: str, - apiPreference: int, - fourcc: int, - fps: float, - frameSize: cv2.typing.Size, - params: typing.Sequence[int], - ) -> None: ... - @typing.overload - def open(self, filename: str, fourcc: int, fps: float, frameSize: cv2.typing.Size, isColor: bool = ...) -> bool: ... - @typing.overload - def open( - self, - filename: str, - apiPreference: int, - fourcc: int, - fps: float, - frameSize: cv2.typing.Size, - isColor: bool = ..., - ) -> bool: ... - @typing.overload - def open( - self, - filename: str, - fourcc: int, - fps: float, - frameSize: cv2.typing.Size, - params: typing.Sequence[int], - ) -> bool: ... - @typing.overload - def open( - self, - filename: str, - apiPreference: int, - fourcc: int, - fps: float, - frameSize: cv2.typing.Size, - params: typing.Sequence[int], - ) -> bool: ... - def isOpened(self) -> bool: ... - def release(self) -> None: ... - @typing.overload - def write(self, image: cv2.typing.MatLike) -> None: ... - @typing.overload - def write(self, image: UMat) -> None: ... - def set(self, propId: int, value: float) -> bool: ... - def get(self, propId: int) -> float: ... - @staticmethod - def fourcc(c1: str, c2: str, c3: str, c4: str) -> int: ... - def getBackendName(self) -> str: ... - -class UsacParams: - confidence: float - isParallel: bool - loIterations: int - loMethod: LocalOptimMethod - loSampleSize: int - maxIterations: int - neighborsSearch: NeighborSearchMethod - randomGeneratorState: int - sampler: SamplingMethod - score: ScoreMethod - threshold: float - final_polisher: PolishingMethod - final_polisher_iterations: int - - # Functions - def __init__(self) -> None: ... - -class CirclesGridFinderParameters: - densityNeighborhoodSize: cv2.typing.Size2f - minDensity: float - kmeansAttempts: int - minDistanceToAddKeypoint: int - keypointScale: int - minGraphConfidence: float - vertexGain: float - vertexPenalty: float - existingVertexGain: float - edgeGain: float - edgePenalty: float - convexHullFactor: float - minRNGEdgeSwitchDist: float - squareSize: float - maxRectifiedDistance: float - - # Functions - def __init__(self) -> None: ... - -class CascadeClassifier: - # Functions - @typing.overload - def __init__(self) -> None: ... - @typing.overload - def __init__(self, filename: str) -> None: ... - def empty(self) -> bool: ... - def load(self, filename: str) -> bool: ... - def read(self, node: FileNode) -> bool: ... - @typing.overload - def detectMultiScale( - self, - image: cv2.typing.MatLike, - scaleFactor: float = ..., - minNeighbors: int = ..., - flags: int = ..., - minSize: cv2.typing.Size = ..., - maxSize: cv2.typing.Size = ..., - ) -> typing.Sequence[cv2.typing.Rect]: ... - @typing.overload - def detectMultiScale( - self, - image: UMat, - scaleFactor: float = ..., - minNeighbors: int = ..., - flags: int = ..., - minSize: cv2.typing.Size = ..., - maxSize: cv2.typing.Size = ..., - ) -> typing.Sequence[cv2.typing.Rect]: ... - @typing.overload - def detectMultiScale2( - self, - image: cv2.typing.MatLike, - scaleFactor: float = ..., - minNeighbors: int = ..., - flags: int = ..., - minSize: cv2.typing.Size = ..., - maxSize: cv2.typing.Size = ..., - ) -> tuple[ - typing.Sequence[cv2.typing.Rect], - typing.Sequence[int], - ]: ... - @typing.overload - def detectMultiScale2( - self, - image: UMat, - scaleFactor: float = ..., - minNeighbors: int = ..., - flags: int = ..., - minSize: cv2.typing.Size = ..., - maxSize: cv2.typing.Size = ..., - ) -> tuple[ - typing.Sequence[cv2.typing.Rect], - typing.Sequence[int], - ]: ... - @typing.overload - def detectMultiScale3( - self, - image: cv2.typing.MatLike, - scaleFactor: float = ..., - minNeighbors: int = ..., - flags: int = ..., - minSize: cv2.typing.Size = ..., - maxSize: cv2.typing.Size = ..., - outputRejectLevels: bool = ..., - ) -> tuple[ - typing.Sequence[cv2.typing.Rect], - typing.Sequence[int], - typing.Sequence[float], - ]: ... - @typing.overload - def detectMultiScale3( - self, - image: UMat, - scaleFactor: float = ..., - minNeighbors: int = ..., - flags: int = ..., - minSize: cv2.typing.Size = ..., - maxSize: cv2.typing.Size = ..., - outputRejectLevels: bool = ..., - ) -> tuple[ - typing.Sequence[cv2.typing.Rect], - typing.Sequence[int], - typing.Sequence[float], - ]: ... - def isOldFormatCascade(self) -> bool: ... - def getOriginalWindowSize(self) -> cv2.typing.Size: ... - def getFeatureType(self) -> int: ... - @staticmethod - def convert(oldcascade: str, newcascade: str) -> bool: ... - -class HOGDescriptor: - @property - def winSize(self) -> cv2.typing.Size: ... - @property - def blockSize(self) -> cv2.typing.Size: ... - @property - def blockStride(self) -> cv2.typing.Size: ... - @property - def cellSize(self) -> cv2.typing.Size: ... - @property - def nbins(self) -> int: ... - @property - def derivAperture(self) -> int: ... - @property - def winSigma(self) -> float: ... - @property - def histogramNormType(self) -> HOGDescriptor_HistogramNormType: ... - @property - def L2HysThreshold(self) -> float: ... - @property - def gammaCorrection(self) -> bool: ... - @property - def svmDetector(self) -> typing.Sequence[float]: ... - @property - def nlevels(self) -> int: ... - @property - def signedGradient(self) -> bool: ... - - # Functions - @typing.overload - def __init__(self) -> None: ... - @typing.overload - def __init__( - self, - _winSize: cv2.typing.Size, - _blockSize: cv2.typing.Size, - _blockStride: cv2.typing.Size, - _cellSize: cv2.typing.Size, - _nbins: int, - _derivAperture: int = ..., - _winSigma: float = ..., - _histogramNormType: HOGDescriptor_HistogramNormType = ..., - _L2HysThreshold: float = ..., - _gammaCorrection: bool = ..., - _nlevels: int = ..., - _signedGradient: bool = ..., - ) -> None: ... - @typing.overload - def __init__(self, filename: str) -> None: ... - def getDescriptorSize(self) -> int: ... - def checkDetectorSize(self) -> bool: ... - def getWinSigma(self) -> float: ... - @typing.overload - def setSVMDetector(self, svmdetector: cv2.typing.MatLike) -> None: ... - @typing.overload - def setSVMDetector(self, svmdetector: UMat) -> None: ... - def load(self, filename: str, objname: str = ...) -> bool: ... - def save(self, filename: str, objname: str = ...) -> None: ... - @typing.overload - def compute( - self, - img: cv2.typing.MatLike, - winStride: cv2.typing.Size = ..., - padding: cv2.typing.Size = ..., - locations: typing.Sequence[cv2.typing.Point] = ..., - ) -> typing.Sequence[float]: ... - @typing.overload - def compute( - self, - img: UMat, - winStride: cv2.typing.Size = ..., - padding: cv2.typing.Size = ..., - locations: typing.Sequence[cv2.typing.Point] = ..., - ) -> typing.Sequence[float]: ... - @typing.overload - def detect( - self, - img: cv2.typing.MatLike, - hitThreshold: float = ..., - winStride: cv2.typing.Size = ..., - padding: cv2.typing.Size = ..., - searchLocations: typing.Sequence[cv2.typing.Point] = ..., - ) -> tuple[ - typing.Sequence[cv2.typing.Point], - typing.Sequence[float], - ]: ... - @typing.overload - def detect( - self, - img: UMat, - hitThreshold: float = ..., - winStride: cv2.typing.Size = ..., - padding: cv2.typing.Size = ..., - searchLocations: typing.Sequence[cv2.typing.Point] = ..., - ) -> tuple[ - typing.Sequence[cv2.typing.Point], - typing.Sequence[float], - ]: ... - @typing.overload - def detectMultiScale( - self, - img: cv2.typing.MatLike, - hitThreshold: float = ..., - winStride: cv2.typing.Size = ..., - padding: cv2.typing.Size = ..., - scale: float = ..., - groupThreshold: float = ..., - useMeanshiftGrouping: bool = ..., - ) -> tuple[ - typing.Sequence[cv2.typing.Rect], - typing.Sequence[float], - ]: ... - @typing.overload - def detectMultiScale( - self, - img: UMat, - hitThreshold: float = ..., - winStride: cv2.typing.Size = ..., - padding: cv2.typing.Size = ..., - scale: float = ..., - groupThreshold: float = ..., - useMeanshiftGrouping: bool = ..., - ) -> tuple[ - typing.Sequence[cv2.typing.Rect], - typing.Sequence[float], - ]: ... - @typing.overload - def computeGradient( - self, - img: cv2.typing.MatLike, - grad: cv2.typing.MatLike, - angleOfs: cv2.typing.MatLike, - paddingTL: cv2.typing.Size = ..., - paddingBR: cv2.typing.Size = ..., - ) -> tuple[ - cv2.typing.MatLike, - cv2.typing.MatLike, - ]: ... - @typing.overload - def computeGradient( - self, - img: UMat, - grad: UMat, - angleOfs: UMat, - paddingTL: cv2.typing.Size = ..., - paddingBR: cv2.typing.Size = ..., - ) -> tuple[ - UMat, - UMat, - ]: ... - @staticmethod - def getDefaultPeopleDetector() -> typing.Sequence[float]: ... - @staticmethod - def getDaimlerPeopleDetector() -> typing.Sequence[float]: ... - -class QRCodeEncoder: - # Classes - class Params: - version: int - correction_level: QRCodeEncoder_CorrectionLevel - mode: QRCodeEncoder_EncodeMode - structure_number: int - - # Functions - def __init__(self) -> None: ... - - # Functions - - @classmethod - def create(cls, parameters: QRCodeEncoder.Params = ...) -> QRCodeEncoder: ... - @typing.overload - def encode(self, encoded_info: str, qrcode: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... - @typing.overload - def encode(self, encoded_info: str, qrcode: UMat | None = ...) -> UMat: ... - @typing.overload - def encodeStructuredAppend( - self, - encoded_info: str, - qrcodes: typing.Sequence[cv2.typing.MatLike] | None = ..., - ) -> typing.Sequence[cv2.typing.MatLike]: ... - @typing.overload - def encodeStructuredAppend( - self, - encoded_info: str, - qrcodes: typing.Sequence[UMat] | None = ..., - ) -> typing.Sequence[UMat]: ... - -class GraphicalCodeDetector: - # Functions - @typing.overload - def detect( - self, - img: cv2.typing.MatLike, - points: cv2.typing.MatLike | None = ..., - ) -> tuple[ - bool, - cv2.typing.MatLike, - ]: ... - @typing.overload - def detect(self, img: UMat, points: UMat | None = ...) -> tuple[bool, UMat]: ... - @typing.overload - def decode( - self, - img: cv2.typing.MatLike, - points: cv2.typing.MatLike, - straight_code: cv2.typing.MatLike | None = ..., - ) -> tuple[ - str, - cv2.typing.MatLike, - ]: ... - @typing.overload - def decode(self, img: UMat, points: UMat, straight_code: UMat | None = ...) -> tuple[str, UMat]: ... - @typing.overload - def detectAndDecode( - self, - img: cv2.typing.MatLike, - points: cv2.typing.MatLike | None = ..., - straight_code: cv2.typing.MatLike | None = ..., - ) -> tuple[ - str, - cv2.typing.MatLike, - cv2.typing.MatLike, - ]: ... - @typing.overload - def detectAndDecode( - self, - img: UMat, - points: UMat | None = ..., - straight_code: UMat | None = ..., - ) -> tuple[ - str, - UMat, - UMat, - ]: ... - @typing.overload - def detectMulti( - self, - img: cv2.typing.MatLike, - points: cv2.typing.MatLike | None = ..., - ) -> tuple[ - bool, - cv2.typing.MatLike, - ]: ... - @typing.overload - def detectMulti(self, img: UMat, points: UMat | None = ...) -> tuple[bool, UMat]: ... - @typing.overload - def decodeMulti( - self, - img: cv2.typing.MatLike, - points: cv2.typing.MatLike, - straight_code: typing.Sequence[cv2.typing.MatLike] | None = ..., - ) -> tuple[ - bool, - typing.Sequence[str], - typing.Sequence[cv2.typing.MatLike], - ]: ... - @typing.overload - def decodeMulti( - self, - img: UMat, - points: UMat, - straight_code: typing.Sequence[UMat] | None = ..., - ) -> tuple[ - bool, - typing.Sequence[str], - typing.Sequence[UMat], - ]: ... - @typing.overload - def detectAndDecodeMulti( - self, - img: cv2.typing.MatLike, - points: cv2.typing.MatLike | None = ..., - straight_code: typing.Sequence[cv2.typing.MatLike] | None = ..., - ) -> tuple[ - bool, - typing.Sequence[str], - cv2.typing.MatLike, - typing.Sequence[cv2.typing.MatLike], - ]: ... - @typing.overload - def detectAndDecodeMulti( - self, - img: UMat, - points: UMat | None = ..., - straight_code: typing.Sequence[UMat] | None = ..., - ) -> tuple[ - bool, - typing.Sequence[str], - UMat, - typing.Sequence[UMat], - ]: ... - -class FaceDetectorYN: - # Functions - def setInputSize(self, input_size: cv2.typing.Size) -> None: ... - def getInputSize(self) -> cv2.typing.Size: ... - def setScoreThreshold(self, score_threshold: float) -> None: ... - def getScoreThreshold(self) -> float: ... - def setNMSThreshold(self, nms_threshold: float) -> None: ... - def getNMSThreshold(self) -> float: ... - def setTopK(self, top_k: int) -> None: ... - def getTopK(self) -> int: ... - @typing.overload - def detect( - self, - image: cv2.typing.MatLike, - faces: cv2.typing.MatLike | None = ..., - ) -> tuple[ - int, - cv2.typing.MatLike, - ]: ... - @typing.overload - def detect(self, image: UMat, faces: UMat | None = ...) -> tuple[int, UMat]: ... - @classmethod - def create( - cls, - model: str, - config: str, - input_size: cv2.typing.Size, - score_threshold: float = ..., - nms_threshold: float = ..., - top_k: int = ..., - backend_id: int = ..., - target_id: int = ..., - ) -> FaceDetectorYN: ... - -class FaceRecognizerSF: - # Functions - @typing.overload - def alignCrop( - self, - src_img: cv2.typing.MatLike, - face_box: cv2.typing.MatLike, - aligned_img: cv2.typing.MatLike | None = ..., - ) -> cv2.typing.MatLike: ... - @typing.overload - def alignCrop(self, src_img: UMat, face_box: UMat, aligned_img: UMat | None = ...) -> UMat: ... - @typing.overload - def feature( - self, - aligned_img: cv2.typing.MatLike, - face_feature: cv2.typing.MatLike | None = ..., - ) -> cv2.typing.MatLike: ... - @typing.overload - def feature(self, aligned_img: UMat, face_feature: UMat | None = ...) -> UMat: ... - @typing.overload - def match( - self, - face_feature1: cv2.typing.MatLike, - face_feature2: cv2.typing.MatLike, - dis_type: int = ..., - ) -> float: ... - @typing.overload - def match(self, face_feature1: UMat, face_feature2: UMat, dis_type: int = ...) -> float: ... - @classmethod - def create(cls, model: str, config: str, backend_id: int = ..., target_id: int = ...) -> FaceRecognizerSF: ... - -class Stitcher: - # Functions - @classmethod - def create(cls, mode: Stitcher_Mode = ...) -> Stitcher: ... - def registrationResol(self) -> float: ... - def setRegistrationResol(self, resol_mpx: float) -> None: ... - def seamEstimationResol(self) -> float: ... - def setSeamEstimationResol(self, resol_mpx: float) -> None: ... - def compositingResol(self) -> float: ... - def setCompositingResol(self, resol_mpx: float) -> None: ... - def panoConfidenceThresh(self) -> float: ... - def setPanoConfidenceThresh(self, conf_thresh: float) -> None: ... - def waveCorrection(self) -> bool: ... - def setWaveCorrection(self, flag: bool) -> None: ... - def interpolationFlags(self) -> InterpolationFlags: ... - def setInterpolationFlags(self, interp_flags: InterpolationFlags) -> None: ... - @typing.overload - def estimateTransform( - self, - images: typing.Sequence[cv2.typing.MatLike], - masks: typing.Sequence[cv2.typing.MatLike] | None = ..., - ) -> Stitcher_Status: ... - @typing.overload - def estimateTransform( - self, - images: typing.Sequence[UMat], - masks: typing.Sequence[UMat] | None = ..., - ) -> Stitcher_Status: ... - @typing.overload - def composePanorama(self, pano: cv2.typing.MatLike | None = ...) -> tuple[Stitcher_Status, cv2.typing.MatLike]: ... - @typing.overload - def composePanorama(self, pano: UMat | None = ...) -> tuple[Stitcher_Status, UMat]: ... - @typing.overload - def composePanorama( - self, - images: typing.Sequence[cv2.typing.MatLike], - pano: cv2.typing.MatLike | None = ..., - ) -> tuple[ - Stitcher_Status, - cv2.typing.MatLike, - ]: ... - @typing.overload - def composePanorama( - self, - images: typing.Sequence[UMat], - pano: UMat | None = ..., - ) -> tuple[ - Stitcher_Status, - UMat, - ]: ... - @typing.overload - def stitch( - self, - images: typing.Sequence[cv2.typing.MatLike], - pano: cv2.typing.MatLike | None = ..., - ) -> tuple[ - Stitcher_Status, - cv2.typing.MatLike, - ]: ... - @typing.overload - def stitch(self, images: typing.Sequence[UMat], pano: UMat | None = ...) -> tuple[Stitcher_Status, UMat]: ... - @typing.overload - def stitch( - self, - images: typing.Sequence[cv2.typing.MatLike], - masks: typing.Sequence[cv2.typing.MatLike], - pano: cv2.typing.MatLike | None = ..., - ) -> tuple[ - Stitcher_Status, - cv2.typing.MatLike, - ]: ... - @typing.overload - def stitch( - self, - images: typing.Sequence[UMat], - masks: typing.Sequence[UMat], - pano: UMat | None = ..., - ) -> tuple[ - Stitcher_Status, - UMat, - ]: ... - def workScale(self) -> float: ... - -class PyRotationWarper: - # Functions - @typing.overload - def __init__(self, type: str, scale: float) -> None: ... - @typing.overload - def __init__(self) -> None: ... - @typing.overload - def warpPoint(self, pt: cv2.typing.Point2f, K: cv2.typing.MatLike, R: cv2.typing.MatLike) -> cv2.typing.Point2f: ... - @typing.overload - def warpPoint(self, pt: cv2.typing.Point2f, K: UMat, R: UMat) -> cv2.typing.Point2f: ... - @typing.overload - def warpPointBackward( - self, - pt: cv2.typing.Point2f, - K: cv2.typing.MatLike, - R: cv2.typing.MatLike, - ) -> cv2.typing.Point2f: ... - @typing.overload - def warpPointBackward(self, pt: cv2.typing.Point2f, K: UMat, R: UMat) -> cv2.typing.Point2f: ... - @typing.overload - def warpPointBackward( - self, - pt: cv2.typing.Point2f, - K: cv2.typing.MatLike, - R: cv2.typing.MatLike, - ) -> cv2.typing.Point2f: ... - @typing.overload - def warpPointBackward(self, pt: cv2.typing.Point2f, K: UMat, R: UMat) -> cv2.typing.Point2f: ... - @typing.overload - def buildMaps( - self, - src_size: cv2.typing.Size, - K: cv2.typing.MatLike, - R: cv2.typing.MatLike, - xmap: cv2.typing.MatLike | None = ..., - ymap: cv2.typing.MatLike | None = ..., - ) -> tuple[ - cv2.typing.Rect, - cv2.typing.MatLike, - cv2.typing.MatLike, - ]: ... - @typing.overload - def buildMaps( - self, - src_size: cv2.typing.Size, - K: UMat, - R: UMat, - xmap: UMat | None = ..., - ymap: UMat | None = ..., - ) -> tuple[ - cv2.typing.Rect, - UMat, - UMat, - ]: ... - @typing.overload - def warp( - self, - src: cv2.typing.MatLike, - K: cv2.typing.MatLike, - R: cv2.typing.MatLike, - interp_mode: int, - border_mode: int, - dst: cv2.typing.MatLike | None = ..., - ) -> tuple[ - cv2.typing.Point, - cv2.typing.MatLike, - ]: ... - @typing.overload - def warp( - self, - src: UMat, - K: UMat, - R: UMat, - interp_mode: int, - border_mode: int, - dst: UMat | None = ..., - ) -> tuple[ - cv2.typing.Point, - UMat, - ]: ... - @typing.overload - def warpBackward( - self, - src: cv2.typing.MatLike, - K: cv2.typing.MatLike, - R: cv2.typing.MatLike, - interp_mode: int, - border_mode: int, - dst_size: cv2.typing.Size, - dst: cv2.typing.MatLike | None = ..., - ) -> cv2.typing.MatLike: ... - @typing.overload - def warpBackward( - self, - src: UMat, - K: UMat, - R: UMat, - interp_mode: int, - border_mode: int, - dst_size: cv2.typing.Size, - dst: UMat | None = ..., - ) -> UMat: ... - @typing.overload - def warpRoi(self, src_size: cv2.typing.Size, K: cv2.typing.MatLike, R: cv2.typing.MatLike) -> cv2.typing.Rect: ... - @typing.overload - def warpRoi(self, src_size: cv2.typing.Size, K: UMat, R: UMat) -> cv2.typing.Rect: ... - def getScale(self) -> float: ... - def setScale(self, arg1: float) -> None: ... - -class WarperCreator: ... - -class KalmanFilter: - statePre: cv2.typing.MatLike - statePost: cv2.typing.MatLike - transitionMatrix: cv2.typing.MatLike - controlMatrix: cv2.typing.MatLike - measurementMatrix: cv2.typing.MatLike - processNoiseCov: cv2.typing.MatLike - measurementNoiseCov: cv2.typing.MatLike - errorCovPre: cv2.typing.MatLike - gain: cv2.typing.MatLike - errorCovPost: cv2.typing.MatLike - - # Functions - @typing.overload - def __init__(self) -> None: ... - @typing.overload - def __init__(self, dynamParams: int, measureParams: int, controlParams: int = ..., type: int = ...) -> None: ... - def predict(self, control: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... - def correct(self, measurement: cv2.typing.MatLike) -> cv2.typing.MatLike: ... - -class Tracker: - # Functions - @typing.overload - def init(self, image: cv2.typing.MatLike, boundingBox: cv2.typing.Rect) -> None: ... - @typing.overload - def init(self, image: UMat, boundingBox: cv2.typing.Rect) -> None: ... - @typing.overload - def update(self, image: cv2.typing.MatLike) -> tuple[bool, cv2.typing.Rect]: ... - @typing.overload - def update(self, image: UMat) -> tuple[bool, cv2.typing.Rect]: ... - -class GArrayDesc: ... - -class GComputation: - # Functions - @typing.overload - def __init__(self, ins: cv2.typing.GProtoInputArgs, outs: cv2.typing.GProtoOutputArgs) -> None: ... - @typing.overload - def __init__(self, in_: GMat, out: GMat) -> None: ... - @typing.overload - def __init__(self, in_: GMat, out: GScalar) -> None: ... - @typing.overload - def __init__(self, in1: GMat, in2: GMat, out: GMat) -> None: ... - def apply( - self, - callback: cv2.typing.ExtractArgsCallback, - args: typing.Sequence[GCompileArg] = ..., - ) -> typing.Sequence[cv2.typing.GRunArg]: ... - @typing.overload - def compileStreaming( - self, - in_metas: typing.Sequence[cv2.typing.GMetaArg], - args: typing.Sequence[GCompileArg] = ..., - ) -> GStreamingCompiled: ... - @typing.overload - def compileStreaming(self, args: typing.Sequence[GCompileArg] = ...) -> GStreamingCompiled: ... - @typing.overload - def compileStreaming( - self, - callback: cv2.typing.ExtractMetaCallback, - args: typing.Sequence[GCompileArg] = ..., - ) -> GStreamingCompiled: ... - -class GFrame: - # Functions - def __init__(self) -> None: ... - -class GKernelPackage: - # Functions - def size(self) -> int: ... - -class GMat: - # Functions - def __init__(self) -> None: ... - -class GMatDesc: - @property - def depth(self) -> int: ... - @property - def chan(self) -> int: ... - @property - def size(self) -> cv2.typing.Size: ... - @property - def planar(self) -> bool: ... - @property - def dims(self) -> typing.Sequence[int]: ... - - # Functions - @typing.overload - def __init__(self, d: int, c: int, s: cv2.typing.Size, p: bool = ...) -> None: ... - @typing.overload - def __init__(self, d: int, dd: typing.Sequence[int]) -> None: ... - @typing.overload - def __init__(self, d: int, dd: typing.Sequence[int]) -> None: ... - @typing.overload - def __init__(self) -> None: ... - @typing.overload - def withSizeDelta(self, delta: cv2.typing.Size) -> GMatDesc: ... - @typing.overload - def withSizeDelta(self, dx: int, dy: int) -> GMatDesc: ... - def withSize(self, sz: cv2.typing.Size) -> GMatDesc: ... - def withDepth(self, ddepth: int) -> GMatDesc: ... - def withType(self, ddepth: int, dchan: int) -> GMatDesc: ... - @typing.overload - def asPlanar(self) -> GMatDesc: ... - @typing.overload - def asPlanar(self, planes: int) -> GMatDesc: ... - def asInterleaved(self) -> GMatDesc: ... - -class GOpaqueDesc: ... - -class GScalar: - # Functions - @typing.overload - def __init__(self) -> None: ... - @typing.overload - def __init__(self, s: cv2.typing.Scalar) -> None: ... - -class GScalarDesc: ... - -class GStreamingCompiled: - # Functions - def __init__(self) -> None: ... - def setSource(self, callback: cv2.typing.ExtractArgsCallback) -> None: ... - def start(self) -> None: ... - def pull(self) -> tuple[bool, typing.Sequence[cv2.typing.GRunArg] | typing.Sequence[cv2.typing.GOptRunArg]]: ... - def stop(self) -> None: ... - def running(self) -> bool: ... - -class GOpaqueT: - # Functions - def __init__(self, type: cv2.gapi.ArgType) -> None: ... - def type(self) -> cv2.gapi.ArgType: ... - -class GArrayT: - # Functions - def __init__(self, type: cv2.gapi.ArgType) -> None: ... - def type(self) -> cv2.gapi.ArgType: ... - -class GCompileArg: - # Functions - @typing.overload - def __init__(self, arg: GKernelPackage) -> None: ... - @typing.overload - def __init__(self, arg: cv2.gapi.GNetPackage) -> None: ... - @typing.overload - def __init__(self, arg: cv2.gapi.streaming.queue_capacity) -> None: ... - -class GInferInputs: - # Functions - def __init__(self) -> None: ... - @typing.overload - def setInput(self, name: str, value: GMat) -> GInferInputs: ... - @typing.overload - def setInput(self, name: str, value: GFrame) -> GInferInputs: ... - -class GInferListInputs: - # Functions - def __init__(self) -> None: ... - @typing.overload - def setInput(self, name: str, value: GArrayT) -> GInferListInputs: ... - @typing.overload - def setInput(self, name: str, value: GArrayT) -> GInferListInputs: ... - -class GInferOutputs: - # Functions - def __init__(self) -> None: ... - def at(self, name: str) -> GMat: ... - -class GInferListOutputs: - # Functions - def __init__(self) -> None: ... - def at(self, name: str) -> GArrayT: ... - -class GeneralizedHough(Algorithm): - # Functions - @typing.overload - def setTemplate(self, templ: cv2.typing.MatLike, templCenter: cv2.typing.Point = ...) -> None: ... - @typing.overload - def setTemplate(self, templ: UMat, templCenter: cv2.typing.Point = ...) -> None: ... - @typing.overload - def setTemplate( - self, - edges: cv2.typing.MatLike, - dx: cv2.typing.MatLike, - dy: cv2.typing.MatLike, - templCenter: cv2.typing.Point = ..., - ) -> None: ... - @typing.overload - def setTemplate(self, edges: UMat, dx: UMat, dy: UMat, templCenter: cv2.typing.Point = ...) -> None: ... - @typing.overload - def detect( - self, - image: cv2.typing.MatLike, - positions: cv2.typing.MatLike | None = ..., - votes: cv2.typing.MatLike | None = ..., - ) -> tuple[ - cv2.typing.MatLike, - cv2.typing.MatLike, - ]: ... - @typing.overload - def detect(self, image: UMat, positions: UMat | None = ..., votes: UMat | None = ...) -> tuple[UMat, UMat]: ... - @typing.overload - def detect( - self, - edges: cv2.typing.MatLike, - dx: cv2.typing.MatLike, - dy: cv2.typing.MatLike, - positions: cv2.typing.MatLike | None = ..., - votes: cv2.typing.MatLike | None = ..., - ) -> tuple[ - cv2.typing.MatLike, - cv2.typing.MatLike, - ]: ... - @typing.overload - def detect( - self, - edges: UMat, - dx: UMat, - dy: UMat, - positions: UMat | None = ..., - votes: UMat | None = ..., - ) -> tuple[ - UMat, - UMat, - ]: ... - def setCannyLowThresh(self, cannyLowThresh: int) -> None: ... - def getCannyLowThresh(self) -> int: ... - def setCannyHighThresh(self, cannyHighThresh: int) -> None: ... - def getCannyHighThresh(self) -> int: ... - def setMinDist(self, minDist: float) -> None: ... - def getMinDist(self) -> float: ... - def setDp(self, dp: float) -> None: ... - def getDp(self) -> float: ... - def setMaxBufferSize(self, maxBufferSize: int) -> None: ... - def getMaxBufferSize(self) -> int: ... - -class CLAHE(Algorithm): - # Functions - @typing.overload - def apply(self, src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... - @typing.overload - def apply(self, src: UMat, dst: UMat | None = ...) -> UMat: ... - def setClipLimit(self, clipLimit: float) -> None: ... - def getClipLimit(self) -> float: ... - def setTilesGridSize(self, tileGridSize: cv2.typing.Size) -> None: ... - def getTilesGridSize(self) -> cv2.typing.Size: ... - def collectGarbage(self) -> None: ... - -class LineSegmentDetector(Algorithm): - # Functions - @typing.overload - def detect( - self, - image: cv2.typing.MatLike, - lines: cv2.typing.MatLike | None = ..., - width: cv2.typing.MatLike | None = ..., - prec: cv2.typing.MatLike | None = ..., - nfa: cv2.typing.MatLike | None = ..., - ) -> tuple[ - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, - ]: ... - @typing.overload - def detect( - self, - image: UMat, - lines: UMat | None = ..., - width: UMat | None = ..., - prec: UMat | None = ..., - nfa: UMat | None = ..., - ) -> tuple[ - UMat, - UMat, - UMat, - UMat, - ]: ... - @typing.overload - def drawSegments(self, image: cv2.typing.MatLike, lines: cv2.typing.MatLike) -> cv2.typing.MatLike: ... - @typing.overload - def drawSegments(self, image: UMat, lines: UMat) -> UMat: ... - @typing.overload - def compareSegments( - self, - size: cv2.typing.Size, - lines1: cv2.typing.MatLike, - lines2: cv2.typing.MatLike, - image: cv2.typing.MatLike | None = ..., - ) -> tuple[ - int, - cv2.typing.MatLike, - ]: ... - @typing.overload - def compareSegments( - self, - size: cv2.typing.Size, - lines1: UMat, - lines2: UMat, - image: UMat | None = ..., - ) -> tuple[ - int, - UMat, - ]: ... - -class Tonemap(Algorithm): - # Functions - @typing.overload - def process(self, src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... - @typing.overload - def process(self, src: UMat, dst: UMat | None = ...) -> UMat: ... - def getGamma(self) -> float: ... - def setGamma(self, gamma: float) -> None: ... - -class AlignExposures(Algorithm): - # Functions - @typing.overload - def process( - self, - src: typing.Sequence[cv2.typing.MatLike], - dst: typing.Sequence[cv2.typing.MatLike], - times: cv2.typing.MatLike, - response: cv2.typing.MatLike, - ) -> None: ... - @typing.overload - def process( - self, - src: typing.Sequence[UMat], - dst: typing.Sequence[cv2.typing.MatLike], - times: UMat, - response: UMat, - ) -> None: ... - -class CalibrateCRF(Algorithm): - # Functions - @typing.overload - def process( - self, - src: typing.Sequence[cv2.typing.MatLike], - times: cv2.typing.MatLike, - dst: cv2.typing.MatLike | None = ..., - ) -> cv2.typing.MatLike: ... - @typing.overload - def process(self, src: typing.Sequence[UMat], times: UMat, dst: UMat | None = ...) -> UMat: ... - -class MergeExposures(Algorithm): - # Functions - @typing.overload - def process( - self, - src: typing.Sequence[cv2.typing.MatLike], - times: cv2.typing.MatLike, - response: cv2.typing.MatLike, - dst: cv2.typing.MatLike | None = ..., - ) -> cv2.typing.MatLike: ... - @typing.overload - def process(self, src: typing.Sequence[UMat], times: UMat, response: UMat, dst: UMat | None = ...) -> UMat: ... - -class AffineFeature(Feature2D): - # Functions - @classmethod - def create( - cls, - backend: Feature2D, - maxTilt: int = ..., - minTilt: int = ..., - tiltStep: float = ..., - rotateStepBase: float = ..., - ) -> AffineFeature: ... - def setViewParams(self, tilts: typing.Sequence[float], rolls: typing.Sequence[float]) -> None: ... - def getViewParams(self, tilts: typing.Sequence[float], rolls: typing.Sequence[float]) -> None: ... - def getDefaultName(self) -> str: ... - -class SIFT(Feature2D): - # Functions - @classmethod - @typing.overload - def create( - cls, - nfeatures: int = ..., - nOctaveLayers: int = ..., - contrastThreshold: float = ..., - edgeThreshold: float = ..., - sigma: float = ..., - enable_precise_upscale: bool = ..., - ) -> SIFT: ... - @classmethod - @typing.overload - def create( - cls, - nfeatures: int, - nOctaveLayers: int, - contrastThreshold: float, - edgeThreshold: float, - sigma: float, - descriptorType: int, - enable_precise_upscale: bool = ..., - ) -> SIFT: ... - def getDefaultName(self) -> str: ... - def setNFeatures(self, maxFeatures: int) -> None: ... - def getNFeatures(self) -> int: ... - def setNOctaveLayers(self, nOctaveLayers: int) -> None: ... - def getNOctaveLayers(self) -> int: ... - def setContrastThreshold(self, contrastThreshold: float) -> None: ... - def getContrastThreshold(self) -> float: ... - def setEdgeThreshold(self, edgeThreshold: float) -> None: ... - def getEdgeThreshold(self) -> float: ... - def setSigma(self, sigma: float) -> None: ... - def getSigma(self) -> float: ... - -class BRISK(Feature2D): - # Functions - @classmethod - @typing.overload - def create(cls, thresh: int = ..., octaves: int = ..., patternScale: float = ...) -> BRISK: ... - @classmethod - @typing.overload - def create( - cls, - radiusList: typing.Sequence[float], - numberList: typing.Sequence[int], - dMax: float = ..., - dMin: float = ..., - indexChange: typing.Sequence[int] = ..., - ) -> BRISK: ... - @classmethod - @typing.overload - def create( - cls, - thresh: int, - octaves: int, - radiusList: typing.Sequence[float], - numberList: typing.Sequence[int], - dMax: float = ..., - dMin: float = ..., - indexChange: typing.Sequence[int] = ..., - ) -> BRISK: ... - def getDefaultName(self) -> str: ... - def setThreshold(self, threshold: int) -> None: ... - def getThreshold(self) -> int: ... - def setOctaves(self, octaves: int) -> None: ... - def getOctaves(self) -> int: ... - def setPatternScale(self, patternScale: float) -> None: ... - def getPatternScale(self) -> float: ... - -class ORB(Feature2D): - # Functions - @classmethod - def create( - cls, - nfeatures: int = ..., - scaleFactor: float = ..., - nlevels: int = ..., - edgeThreshold: int = ..., - firstLevel: int = ..., - WTA_K: int = ..., - scoreType: ORB_ScoreType = ..., - patchSize: int = ..., - fastThreshold: int = ..., - ) -> ORB: ... - def setMaxFeatures(self, maxFeatures: int) -> None: ... - def getMaxFeatures(self) -> int: ... - def setScaleFactor(self, scaleFactor: float) -> None: ... - def getScaleFactor(self) -> float: ... - def setNLevels(self, nlevels: int) -> None: ... - def getNLevels(self) -> int: ... - def setEdgeThreshold(self, edgeThreshold: int) -> None: ... - def getEdgeThreshold(self) -> int: ... - def setFirstLevel(self, firstLevel: int) -> None: ... - def getFirstLevel(self) -> int: ... - def setWTA_K(self, wta_k: int) -> None: ... - def getWTA_K(self) -> int: ... - def setScoreType(self, scoreType: ORB_ScoreType) -> None: ... - def getScoreType(self) -> ORB_ScoreType: ... - def setPatchSize(self, patchSize: int) -> None: ... - def getPatchSize(self) -> int: ... - def setFastThreshold(self, fastThreshold: int) -> None: ... - def getFastThreshold(self) -> int: ... - def getDefaultName(self) -> str: ... - -class MSER(Feature2D): - # Functions - @classmethod - def create( - cls, - delta: int = ..., - min_area: int = ..., - max_area: int = ..., - max_variation: float = ..., - min_diversity: float = ..., - max_evolution: int = ..., - area_threshold: float = ..., - min_margin: float = ..., - edge_blur_size: int = ..., - ) -> MSER: ... - @typing.overload - def detectRegions( - self, - image: cv2.typing.MatLike, - ) -> tuple[ - typing.Sequence[typing.Sequence[cv2.typing.Point]], - typing.Sequence[cv2.typing.Rect], - ]: ... - @typing.overload - def detectRegions( - self, - image: UMat, - ) -> tuple[ - typing.Sequence[typing.Sequence[cv2.typing.Point]], - typing.Sequence[cv2.typing.Rect], - ]: ... - def setDelta(self, delta: int) -> None: ... - def getDelta(self) -> int: ... - def setMinArea(self, minArea: int) -> None: ... - def getMinArea(self) -> int: ... - def setMaxArea(self, maxArea: int) -> None: ... - def getMaxArea(self) -> int: ... - def setMaxVariation(self, maxVariation: float) -> None: ... - def getMaxVariation(self) -> float: ... - def setMinDiversity(self, minDiversity: float) -> None: ... - def getMinDiversity(self) -> float: ... - def setMaxEvolution(self, maxEvolution: int) -> None: ... - def getMaxEvolution(self) -> int: ... - def setAreaThreshold(self, areaThreshold: float) -> None: ... - def getAreaThreshold(self) -> float: ... - def setMinMargin(self, min_margin: float) -> None: ... - def getMinMargin(self) -> float: ... - def setEdgeBlurSize(self, edge_blur_size: int) -> None: ... - def getEdgeBlurSize(self) -> int: ... - def setPass2Only(self, f: bool) -> None: ... - def getPass2Only(self) -> bool: ... - def getDefaultName(self) -> str: ... - -class FastFeatureDetector(Feature2D): - # Functions - @classmethod - def create( - cls, - threshold: int = ..., - nonmaxSuppression: bool = ..., - type: FastFeatureDetector_DetectorType = ..., - ) -> FastFeatureDetector: ... - def setThreshold(self, threshold: int) -> None: ... - def getThreshold(self) -> int: ... - def setNonmaxSuppression(self, f: bool) -> None: ... - def getNonmaxSuppression(self) -> bool: ... - def setType(self, type: FastFeatureDetector_DetectorType) -> None: ... - def getType(self) -> FastFeatureDetector_DetectorType: ... - def getDefaultName(self) -> str: ... - -class AgastFeatureDetector(Feature2D): - # Functions - @classmethod - def create( - cls, - threshold: int = ..., - nonmaxSuppression: bool = ..., - type: AgastFeatureDetector_DetectorType = ..., - ) -> AgastFeatureDetector: ... - def setThreshold(self, threshold: int) -> None: ... - def getThreshold(self) -> int: ... - def setNonmaxSuppression(self, f: bool) -> None: ... - def getNonmaxSuppression(self) -> bool: ... - def setType(self, type: AgastFeatureDetector_DetectorType) -> None: ... - def getType(self) -> AgastFeatureDetector_DetectorType: ... - def getDefaultName(self) -> str: ... - -class GFTTDetector(Feature2D): - # Functions - @classmethod - @typing.overload - def create( - cls, - maxCorners: int = ..., - qualityLevel: float = ..., - minDistance: float = ..., - blockSize: int = ..., - useHarrisDetector: bool = ..., - k: float = ..., - ) -> GFTTDetector: ... - @classmethod - @typing.overload - def create( - cls, - maxCorners: int, - qualityLevel: float, - minDistance: float, - blockSize: int, - gradiantSize: int, - useHarrisDetector: bool = ..., - k: float = ..., - ) -> GFTTDetector: ... - def setMaxFeatures(self, maxFeatures: int) -> None: ... - def getMaxFeatures(self) -> int: ... - def setQualityLevel(self, qlevel: float) -> None: ... - def getQualityLevel(self) -> float: ... - def setMinDistance(self, minDistance: float) -> None: ... - def getMinDistance(self) -> float: ... - def setBlockSize(self, blockSize: int) -> None: ... - def getBlockSize(self) -> int: ... - def setGradientSize(self, gradientSize_: int) -> None: ... - def getGradientSize(self) -> int: ... - def setHarrisDetector(self, val: bool) -> None: ... - def getHarrisDetector(self) -> bool: ... - def setK(self, k: float) -> None: ... - def getK(self) -> float: ... - def getDefaultName(self) -> str: ... - -class SimpleBlobDetector(Feature2D): - # Classes - class Params: - thresholdStep: float - minThreshold: float - maxThreshold: float - minRepeatability: int - minDistBetweenBlobs: float - filterByColor: bool - blobColor: int - filterByArea: bool - minArea: float - maxArea: float - filterByCircularity: bool - minCircularity: float - maxCircularity: float - filterByInertia: bool - minInertiaRatio: float - maxInertiaRatio: float - filterByConvexity: bool - minConvexity: float - maxConvexity: float - collectContours: bool - - # Functions - def __init__(self) -> None: ... - - # Functions - - @classmethod - def create(cls, parameters: SimpleBlobDetector.Params = ...) -> SimpleBlobDetector: ... - def setParams(self, params: SimpleBlobDetector.Params) -> None: ... - def getParams(self) -> SimpleBlobDetector.Params: ... - def getDefaultName(self) -> str: ... - def getBlobContours(self) -> typing.Sequence[typing.Sequence[cv2.typing.Point]]: ... - -class KAZE(Feature2D): - # Functions - @classmethod - def create( - cls, - extended: bool = ..., - upright: bool = ..., - threshold: float = ..., - nOctaves: int = ..., - nOctaveLayers: int = ..., - diffusivity: KAZE_DiffusivityType = ..., - ) -> KAZE: ... - def setExtended(self, extended: bool) -> None: ... - def getExtended(self) -> bool: ... - def setUpright(self, upright: bool) -> None: ... - def getUpright(self) -> bool: ... - def setThreshold(self, threshold: float) -> None: ... - def getThreshold(self) -> float: ... - def setNOctaves(self, octaves: int) -> None: ... - def getNOctaves(self) -> int: ... - def setNOctaveLayers(self, octaveLayers: int) -> None: ... - def getNOctaveLayers(self) -> int: ... - def setDiffusivity(self, diff: KAZE_DiffusivityType) -> None: ... - def getDiffusivity(self) -> KAZE_DiffusivityType: ... - def getDefaultName(self) -> str: ... - -class AKAZE(Feature2D): - # Functions - @classmethod - def create( - cls, - descriptor_type: AKAZE_DescriptorType = ..., - descriptor_size: int = ..., - descriptor_channels: int = ..., - threshold: float = ..., - nOctaves: int = ..., - nOctaveLayers: int = ..., - diffusivity: KAZE_DiffusivityType = ..., - ) -> AKAZE: ... - def setDescriptorType(self, dtype: AKAZE_DescriptorType) -> None: ... - def getDescriptorType(self) -> AKAZE_DescriptorType: ... - def setDescriptorSize(self, dsize: int) -> None: ... - def getDescriptorSize(self) -> int: ... - def setDescriptorChannels(self, dch: int) -> None: ... - def getDescriptorChannels(self) -> int: ... - def setThreshold(self, threshold: float) -> None: ... - def getThreshold(self) -> float: ... - def setNOctaves(self, octaves: int) -> None: ... - def getNOctaves(self) -> int: ... - def setNOctaveLayers(self, octaveLayers: int) -> None: ... - def getNOctaveLayers(self) -> int: ... - def setDiffusivity(self, diff: KAZE_DiffusivityType) -> None: ... - def getDiffusivity(self) -> KAZE_DiffusivityType: ... - def getDefaultName(self) -> str: ... - -class DescriptorMatcher(Algorithm): - # Functions - @typing.overload - def add(self, descriptors: typing.Sequence[cv2.typing.MatLike]) -> None: ... - @typing.overload - def add(self, descriptors: typing.Sequence[UMat]) -> None: ... - def getTrainDescriptors(self) -> typing.Sequence[cv2.typing.MatLike]: ... - def clear(self) -> None: ... - def empty(self) -> bool: ... - def isMaskSupported(self) -> bool: ... - def train(self) -> None: ... - @typing.overload - def match( - self, - queryDescriptors: cv2.typing.MatLike, - trainDescriptors: cv2.typing.MatLike, - mask: cv2.typing.MatLike | None = ..., - ) -> typing.Sequence[DMatch]: ... - @typing.overload - def match( - self, - queryDescriptors: UMat, - trainDescriptors: UMat, - mask: UMat | None = ..., - ) -> typing.Sequence[DMatch]: ... - @typing.overload - def match( - self, - queryDescriptors: cv2.typing.MatLike, - masks: typing.Sequence[cv2.typing.MatLike] | None = ..., - ) -> typing.Sequence[DMatch]: ... - @typing.overload - def match(self, queryDescriptors: UMat, masks: typing.Sequence[UMat] | None = ...) -> typing.Sequence[DMatch]: ... - @typing.overload - def knnMatch( - self, - queryDescriptors: cv2.typing.MatLike, - trainDescriptors: cv2.typing.MatLike, - k: int, - mask: cv2.typing.MatLike | None = ..., - compactResult: bool = ..., - ) -> typing.Sequence[typing.Sequence[DMatch]]: ... - @typing.overload - def knnMatch( - self, - queryDescriptors: UMat, - trainDescriptors: UMat, - k: int, - mask: UMat | None = ..., - compactResult: bool = ..., - ) -> typing.Sequence[typing.Sequence[DMatch]]: ... - @typing.overload - def knnMatch( - self, - queryDescriptors: cv2.typing.MatLike, - k: int, - masks: typing.Sequence[cv2.typing.MatLike] | None = ..., - compactResult: bool = ..., - ) -> typing.Sequence[typing.Sequence[DMatch]]: ... - @typing.overload - def knnMatch( - self, - queryDescriptors: UMat, - k: int, - masks: typing.Sequence[UMat] | None = ..., - compactResult: bool = ..., - ) -> typing.Sequence[typing.Sequence[DMatch]]: ... - @typing.overload - def radiusMatch( - self, - queryDescriptors: cv2.typing.MatLike, - trainDescriptors: cv2.typing.MatLike, - maxDistance: float, - mask: cv2.typing.MatLike | None = ..., - compactResult: bool = ..., - ) -> typing.Sequence[typing.Sequence[DMatch]]: ... - @typing.overload - def radiusMatch( - self, - queryDescriptors: UMat, - trainDescriptors: UMat, - maxDistance: float, - mask: UMat | None = ..., - compactResult: bool = ..., - ) -> typing.Sequence[typing.Sequence[DMatch]]: ... - @typing.overload - def radiusMatch( - self, - queryDescriptors: cv2.typing.MatLike, - maxDistance: float, - masks: typing.Sequence[cv2.typing.MatLike] | None = ..., - compactResult: bool = ..., - ) -> typing.Sequence[typing.Sequence[DMatch]]: ... - @typing.overload - def radiusMatch( - self, - queryDescriptors: UMat, - maxDistance: float, - masks: typing.Sequence[UMat] | None = ..., - compactResult: bool = ..., - ) -> typing.Sequence[typing.Sequence[DMatch]]: ... - @typing.overload - def write(self, fileName: str) -> None: ... - @typing.overload - def write(self, fs: FileStorage, name: str) -> None: ... - @typing.overload - def read(self, fileName: str) -> None: ... - @typing.overload - def read(self, arg1: FileNode) -> None: ... - def clone(self, emptyTrainData: bool = ...) -> DescriptorMatcher: ... - @classmethod - @typing.overload - def create(cls, descriptorMatcherType: str) -> DescriptorMatcher: ... - @classmethod - @typing.overload - def create(cls, matcherType: DescriptorMatcher_MatcherType) -> DescriptorMatcher: ... - -class BOWKMeansTrainer(BOWTrainer): - # Functions - def __init__( - self, - clusterCount: int, - termcrit: cv2.typing.TermCriteria = ..., - attempts: int = ..., - flags: int = ..., - ) -> None: ... - @typing.overload - def cluster(self) -> cv2.typing.MatLike: ... - @typing.overload - def cluster(self, descriptors: cv2.typing.MatLike) -> cv2.typing.MatLike: ... - -class StereoMatcher(Algorithm): - # Functions - @typing.overload - def compute( - self, - left: cv2.typing.MatLike, - right: cv2.typing.MatLike, - disparity: cv2.typing.MatLike | None = ..., - ) -> cv2.typing.MatLike: ... - @typing.overload - def compute(self, left: UMat, right: UMat, disparity: UMat | None = ...) -> UMat: ... - def getMinDisparity(self) -> int: ... - def setMinDisparity(self, minDisparity: int) -> None: ... - def getNumDisparities(self) -> int: ... - def setNumDisparities(self, numDisparities: int) -> None: ... - def getBlockSize(self) -> int: ... - def setBlockSize(self, blockSize: int) -> None: ... - def getSpeckleWindowSize(self) -> int: ... - def setSpeckleWindowSize(self, speckleWindowSize: int) -> None: ... - def getSpeckleRange(self) -> int: ... - def setSpeckleRange(self, speckleRange: int) -> None: ... - def getDisp12MaxDiff(self) -> int: ... - def setDisp12MaxDiff(self, disp12MaxDiff: int) -> None: ... - -class BaseCascadeClassifier(Algorithm): ... - -class QRCodeDetector(GraphicalCodeDetector): - # Functions - def __init__(self) -> None: ... - def setEpsX(self, epsX: float) -> QRCodeDetector: ... - def setEpsY(self, epsY: float) -> QRCodeDetector: ... - def setUseAlignmentMarkers(self, useAlignmentMarkers: bool) -> QRCodeDetector: ... - @typing.overload - def decodeCurved( - self, - img: cv2.typing.MatLike, - points: cv2.typing.MatLike, - straight_qrcode: cv2.typing.MatLike | None = ..., - ) -> tuple[ - str, - cv2.typing.MatLike, - ]: ... - @typing.overload - def decodeCurved(self, img: UMat, points: UMat, straight_qrcode: UMat | None = ...) -> tuple[str, UMat]: ... - @typing.overload - def detectAndDecodeCurved( - self, - img: cv2.typing.MatLike, - points: cv2.typing.MatLike | None = ..., - straight_qrcode: cv2.typing.MatLike | None = ..., - ) -> tuple[ - str, - cv2.typing.MatLike, - cv2.typing.MatLike, - ]: ... - @typing.overload - def detectAndDecodeCurved( - self, - img: UMat, - points: UMat | None = ..., - straight_qrcode: UMat | None = ..., - ) -> tuple[ - str, - UMat, - UMat, - ]: ... - -class QRCodeDetectorAruco(GraphicalCodeDetector): - # Classes - class Params: - minModuleSizeInPyramid: float - maxRotation: float - maxModuleSizeMismatch: float - maxTimingPatternMismatch: float - maxPenalties: float - maxColorsMismatch: float - scaleTimingPatternScore: float - - # Functions - def __init__(self) -> None: ... - - # Functions - - @typing.overload - def __init__(self) -> None: ... - @typing.overload - def __init__(self, params: QRCodeDetectorAruco.Params) -> None: ... - def getDetectorParameters(self) -> QRCodeDetectorAruco.Params: ... - def setDetectorParameters(self, params: QRCodeDetectorAruco.Params) -> QRCodeDetectorAruco: ... - def getArucoParameters(self) -> cv2.aruco.DetectorParameters: ... - def setArucoParameters(self, params: cv2.aruco.DetectorParameters) -> None: ... - -class BackgroundSubtractor(Algorithm): - # Functions - @typing.overload - def apply( - self, - image: cv2.typing.MatLike, - fgmask: cv2.typing.MatLike | None = ..., - learningRate: float = ..., - ) -> cv2.typing.MatLike: ... - @typing.overload - def apply(self, image: UMat, fgmask: UMat | None = ..., learningRate: float = ...) -> UMat: ... - @typing.overload - def getBackgroundImage(self, backgroundImage: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... - @typing.overload - def getBackgroundImage(self, backgroundImage: UMat | None = ...) -> UMat: ... - -class DenseOpticalFlow(Algorithm): - # Functions - @typing.overload - def calc(self, I0: cv2.typing.MatLike, I1: cv2.typing.MatLike, flow: cv2.typing.MatLike) -> cv2.typing.MatLike: ... - @typing.overload - def calc(self, I0: UMat, I1: UMat, flow: UMat) -> UMat: ... - def collectGarbage(self) -> None: ... - -class SparseOpticalFlow(Algorithm): - # Functions - @typing.overload - def calc( - self, - prevImg: cv2.typing.MatLike, - nextImg: cv2.typing.MatLike, - prevPts: cv2.typing.MatLike, - nextPts: cv2.typing.MatLike, - status: cv2.typing.MatLike | None = ..., - err: cv2.typing.MatLike | None = ..., - ) -> tuple[ - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, - ]: ... - @typing.overload - def calc( - self, - prevImg: UMat, - nextImg: UMat, - prevPts: UMat, - nextPts: UMat, - status: UMat | None = ..., - err: UMat | None = ..., - ) -> tuple[ - UMat, - UMat, - UMat, - ]: ... - -class TrackerMIL(Tracker): - # Classes - class Params: - samplerInitInRadius: float - samplerInitMaxNegNum: int - samplerSearchWinSize: float - samplerTrackInRadius: float - samplerTrackMaxPosNum: int - samplerTrackMaxNegNum: int - featureSetNumFeatures: int - - # Functions - def __init__(self) -> None: ... - - # Functions - - @classmethod - def create(cls, parameters: TrackerMIL.Params = ...) -> TrackerMIL: ... - -class TrackerGOTURN(Tracker): - # Classes - class Params: - modelTxt: str - modelBin: str - - # Functions - def __init__(self) -> None: ... - - # Functions - - @classmethod - def create(cls, parameters: TrackerGOTURN.Params = ...) -> TrackerGOTURN: ... - -class TrackerDaSiamRPN(Tracker): - # Classes - class Params: - model: str - kernel_cls1: str - kernel_r1: str - backend: int - target: int - - # Functions - def __init__(self) -> None: ... - - # Functions - - @classmethod - def create(cls, parameters: TrackerDaSiamRPN.Params = ...) -> TrackerDaSiamRPN: ... - def getTrackingScore(self) -> float: ... - -class TrackerNano(Tracker): - # Classes - class Params: - backbone: str - neckhead: str - backend: int - target: int - - # Functions - def __init__(self) -> None: ... - - # Functions - - @classmethod - def create(cls, parameters: TrackerNano.Params = ...) -> TrackerNano: ... - def getTrackingScore(self) -> float: ... - -class error(Exception): - code: int - err: str - file: str - func: str - line: int - msg: str - -class GeneralizedHoughBallard(GeneralizedHough): - # Functions - def setLevels(self, levels: int) -> None: ... - def getLevels(self) -> int: ... - def setVotesThreshold(self, votesThreshold: int) -> None: ... - def getVotesThreshold(self) -> int: ... - -class GeneralizedHoughGuil(GeneralizedHough): - # Functions - def setXi(self, xi: float) -> None: ... - def getXi(self) -> float: ... - def setLevels(self, levels: int) -> None: ... - def getLevels(self) -> int: ... - def setAngleEpsilon(self, angleEpsilon: float) -> None: ... - def getAngleEpsilon(self) -> float: ... - def setMinAngle(self, minAngle: float) -> None: ... - def getMinAngle(self) -> float: ... - def setMaxAngle(self, maxAngle: float) -> None: ... - def getMaxAngle(self) -> float: ... - def setAngleStep(self, angleStep: float) -> None: ... - def getAngleStep(self) -> float: ... - def setAngleThresh(self, angleThresh: int) -> None: ... - def getAngleThresh(self) -> int: ... - def setMinScale(self, minScale: float) -> None: ... - def getMinScale(self) -> float: ... - def setMaxScale(self, maxScale: float) -> None: ... - def getMaxScale(self) -> float: ... - def setScaleStep(self, scaleStep: float) -> None: ... - def getScaleStep(self) -> float: ... - def setScaleThresh(self, scaleThresh: int) -> None: ... - def getScaleThresh(self) -> int: ... - def setPosThresh(self, posThresh: int) -> None: ... - def getPosThresh(self) -> int: ... - -class TonemapDrago(Tonemap): - # Functions - def getSaturation(self) -> float: ... - def setSaturation(self, saturation: float) -> None: ... - def getBias(self) -> float: ... - def setBias(self, bias: float) -> None: ... - -class TonemapReinhard(Tonemap): - # Functions - def getIntensity(self) -> float: ... - def setIntensity(self, intensity: float) -> None: ... - def getLightAdaptation(self) -> float: ... - def setLightAdaptation(self, light_adapt: float) -> None: ... - def getColorAdaptation(self) -> float: ... - def setColorAdaptation(self, color_adapt: float) -> None: ... - -class TonemapMantiuk(Tonemap): - # Functions - def getScale(self) -> float: ... - def setScale(self, scale: float) -> None: ... - def getSaturation(self) -> float: ... - def setSaturation(self, saturation: float) -> None: ... - -class AlignMTB(AlignExposures): - # Functions - @typing.overload - def process( - self, - src: typing.Sequence[cv2.typing.MatLike], - dst: typing.Sequence[cv2.typing.MatLike], - times: cv2.typing.MatLike, - response: cv2.typing.MatLike, - ) -> None: ... - @typing.overload - def process( - self, - src: typing.Sequence[UMat], - dst: typing.Sequence[cv2.typing.MatLike], - times: UMat, - response: UMat, - ) -> None: ... - @typing.overload - def process(self, src: typing.Sequence[cv2.typing.MatLike], dst: typing.Sequence[cv2.typing.MatLike]) -> None: ... - @typing.overload - def process(self, src: typing.Sequence[UMat], dst: typing.Sequence[cv2.typing.MatLike]) -> None: ... - @typing.overload - def calculateShift(self, img0: cv2.typing.MatLike, img1: cv2.typing.MatLike) -> cv2.typing.Point: ... - @typing.overload - def calculateShift(self, img0: UMat, img1: UMat) -> cv2.typing.Point: ... - @typing.overload - def shiftMat( - self, - src: cv2.typing.MatLike, - shift: cv2.typing.Point, - dst: cv2.typing.MatLike | None = ..., - ) -> cv2.typing.MatLike: ... - @typing.overload - def shiftMat(self, src: UMat, shift: cv2.typing.Point, dst: UMat | None = ...) -> UMat: ... - @typing.overload - def computeBitmaps( - self, - img: cv2.typing.MatLike, - tb: cv2.typing.MatLike | None = ..., - eb: cv2.typing.MatLike | None = ..., - ) -> tuple[ - cv2.typing.MatLike, - cv2.typing.MatLike, - ]: ... - @typing.overload - def computeBitmaps(self, img: UMat, tb: UMat | None = ..., eb: UMat | None = ...) -> tuple[UMat, UMat]: ... - def getMaxBits(self) -> int: ... - def setMaxBits(self, max_bits: int) -> None: ... - def getExcludeRange(self) -> int: ... - def setExcludeRange(self, exclude_range: int) -> None: ... - def getCut(self) -> bool: ... - def setCut(self, value: bool) -> None: ... - -class CalibrateDebevec(CalibrateCRF): - # Functions - def getLambda(self) -> float: ... - def setLambda(self, lambda_: float) -> None: ... - def getSamples(self) -> int: ... - def setSamples(self, samples: int) -> None: ... - def getRandom(self) -> bool: ... - def setRandom(self, random: bool) -> None: ... - -class CalibrateRobertson(CalibrateCRF): - # Functions - def getMaxIter(self) -> int: ... - def setMaxIter(self, max_iter: int) -> None: ... - def getThreshold(self) -> float: ... - def setThreshold(self, threshold: float) -> None: ... - def getRadiance(self) -> cv2.typing.MatLike: ... - -class MergeDebevec(MergeExposures): - # Functions - @typing.overload - def process( - self, - src: typing.Sequence[cv2.typing.MatLike], - times: cv2.typing.MatLike, - response: cv2.typing.MatLike, - dst: cv2.typing.MatLike | None = ..., - ) -> cv2.typing.MatLike: ... - @typing.overload - def process(self, src: typing.Sequence[UMat], times: UMat, response: UMat, dst: UMat | None = ...) -> UMat: ... - @typing.overload - def process( - self, - src: typing.Sequence[cv2.typing.MatLike], - times: cv2.typing.MatLike, - dst: cv2.typing.MatLike | None = ..., - ) -> cv2.typing.MatLike: ... - @typing.overload - def process(self, src: typing.Sequence[UMat], times: UMat, dst: UMat | None = ...) -> UMat: ... - -class MergeMertens(MergeExposures): - # Functions - @typing.overload - def process( - self, - src: typing.Sequence[cv2.typing.MatLike], - times: cv2.typing.MatLike, - response: cv2.typing.MatLike, - dst: cv2.typing.MatLike | None = ..., - ) -> cv2.typing.MatLike: ... - @typing.overload - def process(self, src: typing.Sequence[UMat], times: UMat, response: UMat, dst: UMat | None = ...) -> UMat: ... - @typing.overload - def process( - self, - src: typing.Sequence[cv2.typing.MatLike], - dst: cv2.typing.MatLike | None = ..., - ) -> cv2.typing.MatLike: ... - @typing.overload - def process(self, src: typing.Sequence[UMat], dst: UMat | None = ...) -> UMat: ... - def getContrastWeight(self) -> float: ... - def setContrastWeight(self, contrast_weiht: float) -> None: ... - def getSaturationWeight(self) -> float: ... - def setSaturationWeight(self, saturation_weight: float) -> None: ... - def getExposureWeight(self) -> float: ... - def setExposureWeight(self, exposure_weight: float) -> None: ... - -class MergeRobertson(MergeExposures): - # Functions - @typing.overload - def process( - self, - src: typing.Sequence[cv2.typing.MatLike], - times: cv2.typing.MatLike, - response: cv2.typing.MatLike, - dst: cv2.typing.MatLike | None = ..., - ) -> cv2.typing.MatLike: ... - @typing.overload - def process(self, src: typing.Sequence[UMat], times: UMat, response: UMat, dst: UMat | None = ...) -> UMat: ... - @typing.overload - def process( - self, - src: typing.Sequence[cv2.typing.MatLike], - times: cv2.typing.MatLike, - dst: cv2.typing.MatLike | None = ..., - ) -> cv2.typing.MatLike: ... - @typing.overload - def process(self, src: typing.Sequence[UMat], times: UMat, dst: UMat | None = ...) -> UMat: ... - -class BFMatcher(DescriptorMatcher): - # Functions - def __init__(self, normType: int = ..., crossCheck: bool = ...) -> None: ... - @classmethod - def create(cls, normType: int = ..., crossCheck: bool = ...) -> BFMatcher: ... - -class FlannBasedMatcher(DescriptorMatcher): - # Functions - def __init__( - self, - indexParams: cv2.typing.IndexParams = ..., - searchParams: cv2.typing.SearchParams = ..., - ) -> None: ... - @classmethod - def create(cls) -> FlannBasedMatcher: ... - -class StereoBM(StereoMatcher): - # Functions - def getPreFilterType(self) -> int: ... - def setPreFilterType(self, preFilterType: int) -> None: ... - def getPreFilterSize(self) -> int: ... - def setPreFilterSize(self, preFilterSize: int) -> None: ... - def getPreFilterCap(self) -> int: ... - def setPreFilterCap(self, preFilterCap: int) -> None: ... - def getTextureThreshold(self) -> int: ... - def setTextureThreshold(self, textureThreshold: int) -> None: ... - def getUniquenessRatio(self) -> int: ... - def setUniquenessRatio(self, uniquenessRatio: int) -> None: ... - def getSmallerBlockSize(self) -> int: ... - def setSmallerBlockSize(self, blockSize: int) -> None: ... - def getROI1(self) -> cv2.typing.Rect: ... - def setROI1(self, roi1: cv2.typing.Rect) -> None: ... - def getROI2(self) -> cv2.typing.Rect: ... - def setROI2(self, roi2: cv2.typing.Rect) -> None: ... - @classmethod - def create(cls, numDisparities: int = ..., blockSize: int = ...) -> StereoBM: ... - -class StereoSGBM(StereoMatcher): - # Functions - def getPreFilterCap(self) -> int: ... - def setPreFilterCap(self, preFilterCap: int) -> None: ... - def getUniquenessRatio(self) -> int: ... - def setUniquenessRatio(self, uniquenessRatio: int) -> None: ... - def getP1(self) -> int: ... - def setP1(self, P1: int) -> None: ... - def getP2(self) -> int: ... - def setP2(self, P2: int) -> None: ... - def getMode(self) -> int: ... - def setMode(self, mode: int) -> None: ... - @classmethod - def create( - cls, - minDisparity: int = ..., - numDisparities: int = ..., - blockSize: int = ..., - P1: int = ..., - P2: int = ..., - disp12MaxDiff: int = ..., - preFilterCap: int = ..., - uniquenessRatio: int = ..., - speckleWindowSize: int = ..., - speckleRange: int = ..., - mode: int = ..., - ) -> StereoSGBM: ... - -class BackgroundSubtractorMOG2(BackgroundSubtractor): - # Functions - def getHistory(self) -> int: ... - def setHistory(self, history: int) -> None: ... - def getNMixtures(self) -> int: ... - def setNMixtures(self, nmixtures: int) -> None: ... - def getBackgroundRatio(self) -> float: ... - def setBackgroundRatio(self, ratio: float) -> None: ... - def getVarThreshold(self) -> float: ... - def setVarThreshold(self, varThreshold: float) -> None: ... - def getVarThresholdGen(self) -> float: ... - def setVarThresholdGen(self, varThresholdGen: float) -> None: ... - def getVarInit(self) -> float: ... - def setVarInit(self, varInit: float) -> None: ... - def getVarMin(self) -> float: ... - def setVarMin(self, varMin: float) -> None: ... - def getVarMax(self) -> float: ... - def setVarMax(self, varMax: float) -> None: ... - def getComplexityReductionThreshold(self) -> float: ... - def setComplexityReductionThreshold(self, ct: float) -> None: ... - def getDetectShadows(self) -> bool: ... - def setDetectShadows(self, detectShadows: bool) -> None: ... - def getShadowValue(self) -> int: ... - def setShadowValue(self, value: int) -> None: ... - def getShadowThreshold(self) -> float: ... - def setShadowThreshold(self, threshold: float) -> None: ... - @typing.overload - def apply( - self, - image: cv2.typing.MatLike, - fgmask: cv2.typing.MatLike | None = ..., - learningRate: float = ..., - ) -> cv2.typing.MatLike: ... - @typing.overload - def apply(self, image: UMat, fgmask: UMat | None = ..., learningRate: float = ...) -> UMat: ... - -class BackgroundSubtractorKNN(BackgroundSubtractor): - # Functions - def getHistory(self) -> int: ... - def setHistory(self, history: int) -> None: ... - def getNSamples(self) -> int: ... - def setNSamples(self, _nN: int) -> None: ... - def getDist2Threshold(self) -> float: ... - def setDist2Threshold(self, _dist2Threshold: float) -> None: ... - def getkNNSamples(self) -> int: ... - def setkNNSamples(self, _nkNN: int) -> None: ... - def getDetectShadows(self) -> bool: ... - def setDetectShadows(self, detectShadows: bool) -> None: ... - def getShadowValue(self) -> int: ... - def setShadowValue(self, value: int) -> None: ... - def getShadowThreshold(self) -> float: ... - def setShadowThreshold(self, threshold: float) -> None: ... - -class FarnebackOpticalFlow(DenseOpticalFlow): - # Functions - def getNumLevels(self) -> int: ... - def setNumLevels(self, numLevels: int) -> None: ... - def getPyrScale(self) -> float: ... - def setPyrScale(self, pyrScale: float) -> None: ... - def getFastPyramids(self) -> bool: ... - def setFastPyramids(self, fastPyramids: bool) -> None: ... - def getWinSize(self) -> int: ... - def setWinSize(self, winSize: int) -> None: ... - def getNumIters(self) -> int: ... - def setNumIters(self, numIters: int) -> None: ... - def getPolyN(self) -> int: ... - def setPolyN(self, polyN: int) -> None: ... - def getPolySigma(self) -> float: ... - def setPolySigma(self, polySigma: float) -> None: ... - def getFlags(self) -> int: ... - def setFlags(self, flags: int) -> None: ... - @classmethod - def create( - cls, - numLevels: int = ..., - pyrScale: float = ..., - fastPyramids: bool = ..., - winSize: int = ..., - numIters: int = ..., - polyN: int = ..., - polySigma: float = ..., - flags: int = ..., - ) -> FarnebackOpticalFlow: ... - -class VariationalRefinement(DenseOpticalFlow): - # Functions - @typing.overload - def calcUV( - self, - I0: cv2.typing.MatLike, - I1: cv2.typing.MatLike, - flow_u: cv2.typing.MatLike, - flow_v: cv2.typing.MatLike, - ) -> tuple[ - cv2.typing.MatLike, - cv2.typing.MatLike, - ]: ... - @typing.overload - def calcUV(self, I0: UMat, I1: UMat, flow_u: UMat, flow_v: UMat) -> tuple[UMat, UMat]: ... - def getFixedPointIterations(self) -> int: ... - def setFixedPointIterations(self, val: int) -> None: ... - def getSorIterations(self) -> int: ... - def setSorIterations(self, val: int) -> None: ... - def getOmega(self) -> float: ... - def setOmega(self, val: float) -> None: ... - def getAlpha(self) -> float: ... - def setAlpha(self, val: float) -> None: ... - def getDelta(self) -> float: ... - def setDelta(self, val: float) -> None: ... - def getGamma(self) -> float: ... - def setGamma(self, val: float) -> None: ... - @classmethod - def create(cls) -> VariationalRefinement: ... - -class DISOpticalFlow(DenseOpticalFlow): - # Functions - def getFinestScale(self) -> int: ... - def setFinestScale(self, val: int) -> None: ... - def getPatchSize(self) -> int: ... - def setPatchSize(self, val: int) -> None: ... - def getPatchStride(self) -> int: ... - def setPatchStride(self, val: int) -> None: ... - def getGradientDescentIterations(self) -> int: ... - def setGradientDescentIterations(self, val: int) -> None: ... - def getVariationalRefinementIterations(self) -> int: ... - def setVariationalRefinementIterations(self, val: int) -> None: ... - def getVariationalRefinementAlpha(self) -> float: ... - def setVariationalRefinementAlpha(self, val: float) -> None: ... - def getVariationalRefinementDelta(self) -> float: ... - def setVariationalRefinementDelta(self, val: float) -> None: ... - def getVariationalRefinementGamma(self) -> float: ... - def setVariationalRefinementGamma(self, val: float) -> None: ... - def getUseMeanNormalization(self) -> bool: ... - def setUseMeanNormalization(self, val: bool) -> None: ... - def getUseSpatialPropagation(self) -> bool: ... - def setUseSpatialPropagation(self, val: bool) -> None: ... - @classmethod - def create(cls, preset: int = ...) -> DISOpticalFlow: ... - -class SparsePyrLKOpticalFlow(SparseOpticalFlow): - # Functions - def getWinSize(self) -> cv2.typing.Size: ... - def setWinSize(self, winSize: cv2.typing.Size) -> None: ... - def getMaxLevel(self) -> int: ... - def setMaxLevel(self, maxLevel: int) -> None: ... - def getTermCriteria(self) -> cv2.typing.TermCriteria: ... - def setTermCriteria(self, crit: cv2.typing.TermCriteria) -> None: ... - def getFlags(self) -> int: ... - def setFlags(self, flags: int) -> None: ... - def getMinEigThreshold(self) -> float: ... - def setMinEigThreshold(self, minEigThreshold: float) -> None: ... - @classmethod - def create( - cls, - winSize: cv2.typing.Size = ..., - maxLevel: int = ..., - crit: cv2.typing.TermCriteria = ..., - flags: int = ..., - minEigThreshold: float = ..., - ) -> SparsePyrLKOpticalFlow: ... - -# Functions -@typing.overload -def CamShift( - probImage: cv2.typing.MatLike, - window: cv2.typing.Rect, - criteria: cv2.typing.TermCriteria, -) -> tuple[ - cv2.typing.RotatedRect, - cv2.typing.Rect, -]: ... -@typing.overload -def CamShift( - probImage: UMat, - window: cv2.typing.Rect, - criteria: cv2.typing.TermCriteria, -) -> tuple[ - cv2.typing.RotatedRect, - cv2.typing.Rect, -]: ... -@typing.overload -def Canny( - image: cv2.typing.MatLike, - threshold1: float, - threshold2: float, - edges: cv2.typing.MatLike | None = ..., - apertureSize: int = ..., - L2gradient: bool = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def Canny( - image: UMat, - threshold1: float, - threshold2: float, - edges: UMat | None = ..., - apertureSize: int = ..., - L2gradient: bool = ..., -) -> UMat: ... -@typing.overload -def Canny( - dx: cv2.typing.MatLike, - dy: cv2.typing.MatLike, - threshold1: float, - threshold2: float, - edges: cv2.typing.MatLike | None = ..., - L2gradient: bool = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def Canny( - dx: UMat, - dy: UMat, - threshold1: float, - threshold2: float, - edges: UMat | None = ..., - L2gradient: bool = ..., -) -> UMat: ... -@typing.overload -def EMD( - signature1: cv2.typing.MatLike, - signature2: cv2.typing.MatLike, - distType: int, - cost: cv2.typing.MatLike | None = ..., - lowerBound: float | None = ..., - flow: cv2.typing.MatLike | None = ..., -) -> tuple[ - float, - float, - cv2.typing.MatLike, -]: ... -@typing.overload -def EMD( - signature1: UMat, - signature2: UMat, - distType: int, - cost: UMat | None = ..., - lowerBound: float | None = ..., - flow: UMat | None = ..., -) -> tuple[ - float, - float, - UMat, -]: ... -@typing.overload -def GaussianBlur( - src: cv2.typing.MatLike, - ksize: cv2.typing.Size, - sigmaX: float, - dst: cv2.typing.MatLike | None = ..., - sigmaY: float = ..., - borderType: int = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def GaussianBlur( - src: UMat, - ksize: cv2.typing.Size, - sigmaX: float, - dst: UMat | None = ..., - sigmaY: float = ..., - borderType: int = ..., -) -> UMat: ... -@typing.overload -def HoughCircles( - image: cv2.typing.MatLike, - method: int, - dp: float, - minDist: float, - circles: cv2.typing.MatLike | None = ..., - param1: float = ..., - param2: float = ..., - minRadius: int = ..., - maxRadius: int = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def HoughCircles( - image: UMat, - method: int, - dp: float, - minDist: float, - circles: UMat | None = ..., - param1: float = ..., - param2: float = ..., - minRadius: int = ..., - maxRadius: int = ..., -) -> UMat: ... -@typing.overload -def HoughLines( - image: cv2.typing.MatLike, - rho: float, - theta: float, - threshold: int, - lines: cv2.typing.MatLike | None = ..., - srn: float = ..., - stn: float = ..., - min_theta: float = ..., - max_theta: float = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def HoughLines( - image: UMat, - rho: float, - theta: float, - threshold: int, - lines: UMat | None = ..., - srn: float = ..., - stn: float = ..., - min_theta: float = ..., - max_theta: float = ..., -) -> UMat: ... -@typing.overload -def HoughLinesP( - image: cv2.typing.MatLike, - rho: float, - theta: float, - threshold: int, - lines: cv2.typing.MatLike | None = ..., - minLineLength: float = ..., - maxLineGap: float = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def HoughLinesP( - image: UMat, - rho: float, - theta: float, - threshold: int, - lines: UMat | None = ..., - minLineLength: float = ..., - maxLineGap: float = ..., -) -> UMat: ... -@typing.overload -def HoughLinesPointSet( - point: cv2.typing.MatLike, - lines_max: int, - threshold: int, - min_rho: float, - max_rho: float, - rho_step: float, - min_theta: float, - max_theta: float, - theta_step: float, - lines: cv2.typing.MatLike | None = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def HoughLinesPointSet( - point: UMat, - lines_max: int, - threshold: int, - min_rho: float, - max_rho: float, - rho_step: float, - min_theta: float, - max_theta: float, - theta_step: float, - lines: UMat | None = ..., -) -> UMat: ... -@typing.overload -def HoughLinesWithAccumulator( - image: cv2.typing.MatLike, - rho: float, - theta: float, - threshold: int, - lines: cv2.typing.MatLike | None = ..., - srn: float = ..., - stn: float = ..., - min_theta: float = ..., - max_theta: float = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def HoughLinesWithAccumulator( - image: UMat, - rho: float, - theta: float, - threshold: int, - lines: UMat | None = ..., - srn: float = ..., - stn: float = ..., - min_theta: float = ..., - max_theta: float = ..., -) -> UMat: ... -@typing.overload -def HuMoments(m: cv2.typing.Moments, hu: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... -@typing.overload -def HuMoments(m: cv2.typing.Moments, hu: UMat | None = ...) -> UMat: ... -@typing.overload -def LUT( - src: cv2.typing.MatLike, - lut: cv2.typing.MatLike, - dst: cv2.typing.MatLike | None = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def LUT(src: UMat, lut: UMat, dst: UMat | None = ...) -> UMat: ... -@typing.overload -def Laplacian( - src: cv2.typing.MatLike, - ddepth: int, - dst: cv2.typing.MatLike | None = ..., - ksize: int = ..., - scale: float = ..., - delta: float = ..., - borderType: int = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def Laplacian( - src: UMat, - ddepth: int, - dst: UMat | None = ..., - ksize: int = ..., - scale: float = ..., - delta: float = ..., - borderType: int = ..., -) -> UMat: ... -@typing.overload -def Mahalanobis(v1: cv2.typing.MatLike, v2: cv2.typing.MatLike, icovar: cv2.typing.MatLike) -> float: ... -@typing.overload -def Mahalanobis(v1: UMat, v2: UMat, icovar: UMat) -> float: ... -@typing.overload -def PCABackProject( - data: cv2.typing.MatLike, - mean: cv2.typing.MatLike, - eigenvectors: cv2.typing.MatLike, - result: cv2.typing.MatLike | None = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def PCABackProject(data: UMat, mean: UMat, eigenvectors: UMat, result: UMat | None = ...) -> UMat: ... -@typing.overload -def PCACompute( - data: cv2.typing.MatLike, - mean: cv2.typing.MatLike, - eigenvectors: cv2.typing.MatLike | None = ..., - maxComponents: int = ..., -) -> tuple[ - cv2.typing.MatLike, - cv2.typing.MatLike, -]: ... -@typing.overload -def PCACompute( - data: UMat, - mean: UMat, - eigenvectors: UMat | None = ..., - maxComponents: int = ..., -) -> tuple[ - UMat, - UMat, -]: ... -@typing.overload -def PCACompute( - data: cv2.typing.MatLike, - mean: cv2.typing.MatLike, - retainedVariance: float, - eigenvectors: cv2.typing.MatLike | None = ..., -) -> tuple[ - cv2.typing.MatLike, - cv2.typing.MatLike, -]: ... -@typing.overload -def PCACompute( - data: UMat, - mean: UMat, - retainedVariance: float, - eigenvectors: UMat | None = ..., -) -> tuple[ - UMat, - UMat, -]: ... -@typing.overload -def PCACompute2( - data: cv2.typing.MatLike, - mean: cv2.typing.MatLike, - eigenvectors: cv2.typing.MatLike | None = ..., - eigenvalues: cv2.typing.MatLike | None = ..., - maxComponents: int = ..., -) -> tuple[ - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, -]: ... -@typing.overload -def PCACompute2( - data: UMat, - mean: UMat, - eigenvectors: UMat | None = ..., - eigenvalues: UMat | None = ..., - maxComponents: int = ..., -) -> tuple[ - UMat, - UMat, - UMat, -]: ... -@typing.overload -def PCACompute2( - data: cv2.typing.MatLike, - mean: cv2.typing.MatLike, - retainedVariance: float, - eigenvectors: cv2.typing.MatLike | None = ..., - eigenvalues: cv2.typing.MatLike | None = ..., -) -> tuple[ - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, -]: ... -@typing.overload -def PCACompute2( - data: UMat, - mean: UMat, - retainedVariance: float, - eigenvectors: UMat | None = ..., - eigenvalues: UMat | None = ..., -) -> tuple[ - UMat, - UMat, - UMat, -]: ... -@typing.overload -def PCAProject( - data: cv2.typing.MatLike, - mean: cv2.typing.MatLike, - eigenvectors: cv2.typing.MatLike, - result: cv2.typing.MatLike | None = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def PCAProject(data: UMat, mean: UMat, eigenvectors: UMat, result: UMat | None = ...) -> UMat: ... -@typing.overload -def PSNR(src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, R: float = ...) -> float: ... -@typing.overload -def PSNR(src1: UMat, src2: UMat, R: float = ...) -> float: ... -@typing.overload -def RQDecomp3x3( - src: cv2.typing.MatLike, - mtxR: cv2.typing.MatLike | None = ..., - mtxQ: cv2.typing.MatLike | None = ..., - Qx: cv2.typing.MatLike | None = ..., - Qy: cv2.typing.MatLike | None = ..., - Qz: cv2.typing.MatLike | None = ..., -) -> tuple[ - cv2.typing.Vec3d, - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, -]: ... -@typing.overload -def RQDecomp3x3( - src: UMat, - mtxR: UMat | None = ..., - mtxQ: UMat | None = ..., - Qx: UMat | None = ..., - Qy: UMat | None = ..., - Qz: UMat | None = ..., -) -> tuple[ - cv2.typing.Vec3d, - UMat, - UMat, - UMat, - UMat, - UMat, -]: ... -@typing.overload -def Rodrigues( - src: cv2.typing.MatLike, - dst: cv2.typing.MatLike | None = ..., - jacobian: cv2.typing.MatLike | None = ..., -) -> tuple[ - cv2.typing.MatLike, - cv2.typing.MatLike, -]: ... -@typing.overload -def Rodrigues(src: UMat, dst: UMat | None = ..., jacobian: UMat | None = ...) -> tuple[UMat, UMat]: ... -@typing.overload -def SVBackSubst( - w: cv2.typing.MatLike, - u: cv2.typing.MatLike, - vt: cv2.typing.MatLike, - rhs: cv2.typing.MatLike, - dst: cv2.typing.MatLike | None = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def SVBackSubst(w: UMat, u: UMat, vt: UMat, rhs: UMat, dst: UMat | None = ...) -> UMat: ... -@typing.overload -def SVDecomp( - src: cv2.typing.MatLike, - w: cv2.typing.MatLike | None = ..., - u: cv2.typing.MatLike | None = ..., - vt: cv2.typing.MatLike | None = ..., - flags: int = ..., -) -> tuple[ - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, -]: ... -@typing.overload -def SVDecomp( - src: UMat, - w: UMat | None = ..., - u: UMat | None = ..., - vt: UMat | None = ..., - flags: int = ..., -) -> tuple[ - UMat, - UMat, - UMat, -]: ... -@typing.overload -def Scharr( - src: cv2.typing.MatLike, - ddepth: int, - dx: int, - dy: int, - dst: cv2.typing.MatLike | None = ..., - scale: float = ..., - delta: float = ..., - borderType: int = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def Scharr( - src: UMat, - ddepth: int, - dx: int, - dy: int, - dst: UMat | None = ..., - scale: float = ..., - delta: float = ..., - borderType: int = ..., -) -> UMat: ... -@typing.overload -def Sobel( - src: cv2.typing.MatLike, - ddepth: int, - dx: int, - dy: int, - dst: cv2.typing.MatLike | None = ..., - ksize: int = ..., - scale: float = ..., - delta: float = ..., - borderType: int = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def Sobel( - src: UMat, - ddepth: int, - dx: int, - dy: int, - dst: UMat | None = ..., - ksize: int = ..., - scale: float = ..., - delta: float = ..., - borderType: int = ..., -) -> UMat: ... -@typing.overload -def absdiff( - src1: cv2.typing.MatLike, - src2: cv2.typing.MatLike, - dst: cv2.typing.MatLike | None = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def absdiff(src1: UMat, src2: UMat, dst: UMat | None = ...) -> UMat: ... -@typing.overload -def accumulate( - src: cv2.typing.MatLike, - dst: cv2.typing.MatLike, - mask: cv2.typing.MatLike | None = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def accumulate(src: UMat, dst: UMat, mask: UMat | None = ...) -> UMat: ... -@typing.overload -def accumulateProduct( - src1: cv2.typing.MatLike, - src2: cv2.typing.MatLike, - dst: cv2.typing.MatLike, - mask: cv2.typing.MatLike | None = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def accumulateProduct(src1: UMat, src2: UMat, dst: UMat, mask: UMat | None = ...) -> UMat: ... -@typing.overload -def accumulateSquare( - src: cv2.typing.MatLike, - dst: cv2.typing.MatLike, - mask: cv2.typing.MatLike | None = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def accumulateSquare(src: UMat, dst: UMat, mask: UMat | None = ...) -> UMat: ... -@typing.overload -def accumulateWeighted( - src: cv2.typing.MatLike, - dst: cv2.typing.MatLike, - alpha: float, - mask: cv2.typing.MatLike | None = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def accumulateWeighted(src: UMat, dst: UMat, alpha: float, mask: UMat | None = ...) -> UMat: ... -@typing.overload -def adaptiveThreshold( - src: cv2.typing.MatLike, - maxValue: float, - adaptiveMethod: int, - thresholdType: int, - blockSize: int, - C: float, - dst: cv2.typing.MatLike | None = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def adaptiveThreshold( - src: UMat, - maxValue: float, - adaptiveMethod: int, - thresholdType: int, - blockSize: int, - C: float, - dst: UMat | None = ..., -) -> UMat: ... -@typing.overload -def add( - src1: cv2.typing.MatLike, - src2: cv2.typing.MatLike, - dst: cv2.typing.MatLike | None = ..., - mask: cv2.typing.MatLike | None = ..., - dtype: int = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def add(src1: UMat, src2: UMat, dst: UMat | None = ..., mask: UMat | None = ..., dtype: int = ...) -> UMat: ... -def addText( - img: cv2.typing.MatLike, - text: str, - org: cv2.typing.Point, - nameFont: str, - pointSize: int = ..., - color: cv2.typing.Scalar = ..., - weight: int = ..., - style: int = ..., - spacing: int = ..., -) -> None: ... -@typing.overload -def addWeighted( - src1: cv2.typing.MatLike, - alpha: float, - src2: cv2.typing.MatLike, - beta: float, - gamma: float, - dst: cv2.typing.MatLike | None = ..., - dtype: int = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def addWeighted( - src1: UMat, - alpha: float, - src2: UMat, - beta: float, - gamma: float, - dst: UMat | None = ..., - dtype: int = ..., -) -> UMat: ... -@typing.overload -def applyColorMap( - src: cv2.typing.MatLike, - colormap: int, - dst: cv2.typing.MatLike | None = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def applyColorMap(src: UMat, colormap: int, dst: UMat | None = ...) -> UMat: ... -@typing.overload -def applyColorMap( - src: cv2.typing.MatLike, - userColor: cv2.typing.MatLike, - dst: cv2.typing.MatLike | None = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def applyColorMap(src: UMat, userColor: UMat, dst: UMat | None = ...) -> UMat: ... -@typing.overload -def approxPolyDP( - curve: cv2.typing.MatLike, - epsilon: float, - closed: bool, - approxCurve: cv2.typing.MatLike | None = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def approxPolyDP(curve: UMat, epsilon: float, closed: bool, approxCurve: UMat | None = ...) -> UMat: ... -@typing.overload -def arcLength(curve: cv2.typing.MatLike, closed: bool) -> float: ... -@typing.overload -def arcLength(curve: UMat, closed: bool) -> float: ... -@typing.overload -def arrowedLine( - img: cv2.typing.MatLike, - pt1: cv2.typing.Point, - pt2: cv2.typing.Point, - color: cv2.typing.Scalar, - thickness: int = ..., - line_type: int = ..., - shift: int = ..., - tipLength: float = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def arrowedLine( - img: UMat, - pt1: cv2.typing.Point, - pt2: cv2.typing.Point, - color: cv2.typing.Scalar, - thickness: int = ..., - line_type: int = ..., - shift: int = ..., - tipLength: float = ..., -) -> UMat: ... -@typing.overload -def batchDistance( - src1: cv2.typing.MatLike, - src2: cv2.typing.MatLike, - dtype: int, - dist: cv2.typing.MatLike | None = ..., - nidx: cv2.typing.MatLike | None = ..., - normType: int = ..., - K: int = ..., - mask: cv2.typing.MatLike | None = ..., - update: int = ..., - crosscheck: bool = ..., -) -> tuple[ - cv2.typing.MatLike, - cv2.typing.MatLike, -]: ... -@typing.overload -def batchDistance( - src1: UMat, - src2: UMat, - dtype: int, - dist: UMat | None = ..., - nidx: UMat | None = ..., - normType: int = ..., - K: int = ..., - mask: UMat | None = ..., - update: int = ..., - crosscheck: bool = ..., -) -> tuple[ - UMat, - UMat, -]: ... -@typing.overload -def bilateralFilter( - src: cv2.typing.MatLike, - d: int, - sigmaColor: float, - sigmaSpace: float, - dst: cv2.typing.MatLike | None = ..., - borderType: int = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def bilateralFilter( - src: UMat, - d: int, - sigmaColor: float, - sigmaSpace: float, - dst: UMat | None = ..., - borderType: int = ..., -) -> UMat: ... -@typing.overload -def bitwise_and( - src1: cv2.typing.MatLike, - src2: cv2.typing.MatLike, - dst: cv2.typing.MatLike | None = ..., - mask: cv2.typing.MatLike | None = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def bitwise_and(src1: UMat, src2: UMat, dst: UMat | None = ..., mask: UMat | None = ...) -> UMat: ... -@typing.overload -def bitwise_not( - src: cv2.typing.MatLike, - dst: cv2.typing.MatLike | None = ..., - mask: cv2.typing.MatLike | None = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def bitwise_not(src: UMat, dst: UMat | None = ..., mask: UMat | None = ...) -> UMat: ... -@typing.overload -def bitwise_or( - src1: cv2.typing.MatLike, - src2: cv2.typing.MatLike, - dst: cv2.typing.MatLike | None = ..., - mask: cv2.typing.MatLike | None = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def bitwise_or(src1: UMat, src2: UMat, dst: UMat | None = ..., mask: UMat | None = ...) -> UMat: ... -@typing.overload -def bitwise_xor( - src1: cv2.typing.MatLike, - src2: cv2.typing.MatLike, - dst: cv2.typing.MatLike | None = ..., - mask: cv2.typing.MatLike | None = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def bitwise_xor(src1: UMat, src2: UMat, dst: UMat | None = ..., mask: UMat | None = ...) -> UMat: ... -@typing.overload -def blendLinear( - src1: cv2.typing.MatLike, - src2: cv2.typing.MatLike, - weights1: cv2.typing.MatLike, - weights2: cv2.typing.MatLike, - dst: cv2.typing.MatLike | None = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def blendLinear(src1: UMat, src2: UMat, weights1: UMat, weights2: UMat, dst: UMat | None = ...) -> UMat: ... -@typing.overload -def blur( - src: cv2.typing.MatLike, - ksize: cv2.typing.Size, - dst: cv2.typing.MatLike | None = ..., - anchor: cv2.typing.Point = ..., - borderType: int = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def blur( - src: UMat, - ksize: cv2.typing.Size, - dst: UMat | None = ..., - anchor: cv2.typing.Point = ..., - borderType: int = ..., -) -> UMat: ... -def borderInterpolate(p: int, len: int, borderType: int) -> int: ... -@typing.overload -def boundingRect(array: cv2.typing.MatLike) -> cv2.typing.Rect: ... -@typing.overload -def boundingRect(array: UMat) -> cv2.typing.Rect: ... -@typing.overload -def boxFilter( - src: cv2.typing.MatLike, - ddepth: int, - ksize: cv2.typing.Size, - dst: cv2.typing.MatLike | None = ..., - anchor: cv2.typing.Point = ..., - normalize: bool = ..., - borderType: int = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def boxFilter( - src: UMat, - ddepth: int, - ksize: cv2.typing.Size, - dst: UMat | None = ..., - anchor: cv2.typing.Point = ..., - normalize: bool = ..., - borderType: int = ..., -) -> UMat: ... -@typing.overload -def boxPoints(box: cv2.typing.RotatedRect, points: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... -@typing.overload -def boxPoints(box: cv2.typing.RotatedRect, points: UMat | None = ...) -> UMat: ... -@typing.overload -def buildOpticalFlowPyramid( - img: cv2.typing.MatLike, - winSize: cv2.typing.Size, - maxLevel: int, - pyramid: typing.Sequence[cv2.typing.MatLike] | None = ..., - withDerivatives: bool = ..., - pyrBorder: int = ..., - derivBorder: int = ..., - tryReuseInputImage: bool = ..., -) -> tuple[ - int, - typing.Sequence[cv2.typing.MatLike], -]: ... -@typing.overload -def buildOpticalFlowPyramid( - img: UMat, - winSize: cv2.typing.Size, - maxLevel: int, - pyramid: typing.Sequence[UMat] | None = ..., - withDerivatives: bool = ..., - pyrBorder: int = ..., - derivBorder: int = ..., - tryReuseInputImage: bool = ..., -) -> tuple[ - int, - typing.Sequence[UMat], -]: ... -@typing.overload -def calcBackProject( - images: typing.Sequence[cv2.typing.MatLike], - channels: typing.Sequence[int], - hist: cv2.typing.MatLike, - ranges: typing.Sequence[float], - scale: float, - dst: cv2.typing.MatLike | None = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def calcBackProject( - images: typing.Sequence[UMat], - channels: typing.Sequence[int], - hist: UMat, - ranges: typing.Sequence[float], - scale: float, - dst: UMat | None = ..., -) -> UMat: ... -@typing.overload -def calcCovarMatrix( - samples: cv2.typing.MatLike, - mean: cv2.typing.MatLike, - flags: int, - covar: cv2.typing.MatLike | None = ..., - ctype: int = ..., -) -> tuple[ - cv2.typing.MatLike, - cv2.typing.MatLike, -]: ... -@typing.overload -def calcCovarMatrix( - samples: UMat, - mean: UMat, - flags: int, - covar: UMat | None = ..., - ctype: int = ..., -) -> tuple[ - UMat, - UMat, -]: ... -@typing.overload -def calcHist( - images: typing.Sequence[cv2.typing.MatLike], - channels: typing.Sequence[int], - mask: cv2.typing.MatLike | None, - histSize: typing.Sequence[int], - ranges: typing.Sequence[float], - hist: cv2.typing.MatLike | None = ..., - accumulate: bool = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def calcHist( - images: typing.Sequence[UMat], - channels: typing.Sequence[int], - mask: UMat | None, - histSize: typing.Sequence[int], - ranges: typing.Sequence[float], - hist: UMat | None = ..., - accumulate: bool = ..., -) -> UMat: ... -@typing.overload -def calcOpticalFlowFarneback( - prev: cv2.typing.MatLike, - next: cv2.typing.MatLike, - flow: cv2.typing.MatLike, - pyr_scale: float, - levels: int, - winsize: int, - iterations: int, - poly_n: int, - poly_sigma: float, - flags: int, -) -> cv2.typing.MatLike: ... -@typing.overload -def calcOpticalFlowFarneback( - prev: UMat, - next: UMat, - flow: UMat, - pyr_scale: float, - levels: int, - winsize: int, - iterations: int, - poly_n: int, - poly_sigma: float, - flags: int, -) -> UMat: ... -@typing.overload -def calcOpticalFlowPyrLK( - prevImg: cv2.typing.MatLike, - nextImg: cv2.typing.MatLike, - prevPts: cv2.typing.MatLike, - nextPts: cv2.typing.MatLike, - status: cv2.typing.MatLike | None = ..., - err: cv2.typing.MatLike | None = ..., - winSize: cv2.typing.Size = ..., - maxLevel: int = ..., - criteria: cv2.typing.TermCriteria = ..., - flags: int = ..., - minEigThreshold: float = ..., -) -> tuple[ - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, -]: ... -@typing.overload -def calcOpticalFlowPyrLK( - prevImg: UMat, - nextImg: UMat, - prevPts: UMat, - nextPts: UMat, - status: UMat | None = ..., - err: UMat | None = ..., - winSize: cv2.typing.Size = ..., - maxLevel: int = ..., - criteria: cv2.typing.TermCriteria = ..., - flags: int = ..., - minEigThreshold: float = ..., -) -> tuple[ - UMat, - UMat, - UMat, -]: ... -@typing.overload -def calibrateCamera( - objectPoints: typing.Sequence[cv2.typing.MatLike], - imagePoints: typing.Sequence[cv2.typing.MatLike], - imageSize: cv2.typing.Size, - cameraMatrix: cv2.typing.MatLike, - distCoeffs: cv2.typing.MatLike, - rvecs: typing.Sequence[cv2.typing.MatLike] | None = ..., - tvecs: typing.Sequence[cv2.typing.MatLike] | None = ..., - flags: int = ..., - criteria: cv2.typing.TermCriteria = ..., -) -> tuple[ - float, - cv2.typing.MatLike, - cv2.typing.MatLike, - typing.Sequence[cv2.typing.MatLike], - typing.Sequence[cv2.typing.MatLike], -]: ... -@typing.overload -def calibrateCamera( - objectPoints: typing.Sequence[UMat], - imagePoints: typing.Sequence[UMat], - imageSize: cv2.typing.Size, - cameraMatrix: UMat, - distCoeffs: UMat, - rvecs: typing.Sequence[UMat] | None = ..., - tvecs: typing.Sequence[UMat] | None = ..., - flags: int = ..., - criteria: cv2.typing.TermCriteria = ..., -) -> tuple[ - float, - UMat, - UMat, - typing.Sequence[UMat], - typing.Sequence[UMat], -]: ... -@typing.overload -def calibrateCameraExtended( - objectPoints: typing.Sequence[cv2.typing.MatLike], - imagePoints: typing.Sequence[cv2.typing.MatLike], - imageSize: cv2.typing.Size, - cameraMatrix: cv2.typing.MatLike, - distCoeffs: cv2.typing.MatLike, - rvecs: typing.Sequence[cv2.typing.MatLike] | None = ..., - tvecs: typing.Sequence[cv2.typing.MatLike] | None = ..., - stdDeviationsIntrinsics: cv2.typing.MatLike | None = ..., - stdDeviationsExtrinsics: cv2.typing.MatLike | None = ..., - perViewErrors: cv2.typing.MatLike | None = ..., - flags: int = ..., - criteria: cv2.typing.TermCriteria = ..., -) -> tuple[ - float, - cv2.typing.MatLike, - cv2.typing.MatLike, - typing.Sequence[cv2.typing.MatLike], - typing.Sequence[cv2.typing.MatLike], - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, -]: ... -@typing.overload -def calibrateCameraExtended( - objectPoints: typing.Sequence[UMat], - imagePoints: typing.Sequence[UMat], - imageSize: cv2.typing.Size, - cameraMatrix: UMat, - distCoeffs: UMat, - rvecs: typing.Sequence[UMat] | None = ..., - tvecs: typing.Sequence[UMat] | None = ..., - stdDeviationsIntrinsics: UMat | None = ..., - stdDeviationsExtrinsics: UMat | None = ..., - perViewErrors: UMat | None = ..., - flags: int = ..., - criteria: cv2.typing.TermCriteria = ..., -) -> tuple[ - float, - UMat, - UMat, - typing.Sequence[UMat], - typing.Sequence[UMat], - UMat, - UMat, - UMat, -]: ... -@typing.overload -def calibrateCameraRO( - objectPoints: typing.Sequence[cv2.typing.MatLike], - imagePoints: typing.Sequence[cv2.typing.MatLike], - imageSize: cv2.typing.Size, - iFixedPoint: int, - cameraMatrix: cv2.typing.MatLike, - distCoeffs: cv2.typing.MatLike, - rvecs: typing.Sequence[cv2.typing.MatLike] | None = ..., - tvecs: typing.Sequence[cv2.typing.MatLike] | None = ..., - newObjPoints: cv2.typing.MatLike | None = ..., - flags: int = ..., - criteria: cv2.typing.TermCriteria = ..., -) -> tuple[ - float, - cv2.typing.MatLike, - cv2.typing.MatLike, - typing.Sequence[cv2.typing.MatLike], - typing.Sequence[cv2.typing.MatLike], - cv2.typing.MatLike, -]: ... -@typing.overload -def calibrateCameraRO( - objectPoints: typing.Sequence[UMat], - imagePoints: typing.Sequence[UMat], - imageSize: cv2.typing.Size, - iFixedPoint: int, - cameraMatrix: UMat, - distCoeffs: UMat, - rvecs: typing.Sequence[UMat] | None = ..., - tvecs: typing.Sequence[UMat] | None = ..., - newObjPoints: UMat | None = ..., - flags: int = ..., - criteria: cv2.typing.TermCriteria = ..., -) -> tuple[ - float, - UMat, - UMat, - typing.Sequence[UMat], - typing.Sequence[UMat], - UMat, -]: ... -@typing.overload -def calibrateCameraROExtended( - objectPoints: typing.Sequence[cv2.typing.MatLike], - imagePoints: typing.Sequence[cv2.typing.MatLike], - imageSize: cv2.typing.Size, - iFixedPoint: int, - cameraMatrix: cv2.typing.MatLike, - distCoeffs: cv2.typing.MatLike, - rvecs: typing.Sequence[cv2.typing.MatLike] | None = ..., - tvecs: typing.Sequence[cv2.typing.MatLike] | None = ..., - newObjPoints: cv2.typing.MatLike | None = ..., - stdDeviationsIntrinsics: cv2.typing.MatLike | None = ..., - stdDeviationsExtrinsics: cv2.typing.MatLike | None = ..., - stdDeviationsObjPoints: cv2.typing.MatLike | None = ..., - perViewErrors: cv2.typing.MatLike | None = ..., - flags: int = ..., - criteria: cv2.typing.TermCriteria = ..., -) -> tuple[ - float, - cv2.typing.MatLike, - cv2.typing.MatLike, - typing.Sequence[cv2.typing.MatLike], - typing.Sequence[cv2.typing.MatLike], - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, -]: ... -@typing.overload -def calibrateCameraROExtended( - objectPoints: typing.Sequence[UMat], - imagePoints: typing.Sequence[UMat], - imageSize: cv2.typing.Size, - iFixedPoint: int, - cameraMatrix: UMat, - distCoeffs: UMat, - rvecs: typing.Sequence[UMat] | None = ..., - tvecs: typing.Sequence[UMat] | None = ..., - newObjPoints: UMat | None = ..., - stdDeviationsIntrinsics: UMat | None = ..., - stdDeviationsExtrinsics: UMat | None = ..., - stdDeviationsObjPoints: UMat | None = ..., - perViewErrors: UMat | None = ..., - flags: int = ..., - criteria: cv2.typing.TermCriteria = ..., -) -> tuple[ - float, - UMat, - UMat, - typing.Sequence[UMat], - typing.Sequence[UMat], - UMat, - UMat, - UMat, - UMat, - UMat, -]: ... -@typing.overload -def calibrateHandEye( - R_gripper2base: typing.Sequence[cv2.typing.MatLike], - t_gripper2base: typing.Sequence[cv2.typing.MatLike], - R_target2cam: typing.Sequence[cv2.typing.MatLike], - t_target2cam: typing.Sequence[cv2.typing.MatLike], - R_cam2gripper: cv2.typing.MatLike | None = ..., - t_cam2gripper: cv2.typing.MatLike | None = ..., - method: HandEyeCalibrationMethod = ..., -) -> tuple[ - cv2.typing.MatLike, - cv2.typing.MatLike, -]: ... -@typing.overload -def calibrateHandEye( - R_gripper2base: typing.Sequence[UMat], - t_gripper2base: typing.Sequence[UMat], - R_target2cam: typing.Sequence[UMat], - t_target2cam: typing.Sequence[UMat], - R_cam2gripper: UMat | None = ..., - t_cam2gripper: UMat | None = ..., - method: HandEyeCalibrationMethod = ..., -) -> tuple[ - UMat, - UMat, -]: ... -@typing.overload -def calibrateRobotWorldHandEye( - R_world2cam: typing.Sequence[cv2.typing.MatLike], - t_world2cam: typing.Sequence[cv2.typing.MatLike], - R_base2gripper: typing.Sequence[cv2.typing.MatLike], - t_base2gripper: typing.Sequence[cv2.typing.MatLike], - R_base2world: cv2.typing.MatLike | None = ..., - t_base2world: cv2.typing.MatLike | None = ..., - R_gripper2cam: cv2.typing.MatLike | None = ..., - t_gripper2cam: cv2.typing.MatLike | None = ..., - method: RobotWorldHandEyeCalibrationMethod = ..., -) -> tuple[ - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, -]: ... -@typing.overload -def calibrateRobotWorldHandEye( - R_world2cam: typing.Sequence[UMat], - t_world2cam: typing.Sequence[UMat], - R_base2gripper: typing.Sequence[UMat], - t_base2gripper: typing.Sequence[UMat], - R_base2world: UMat | None = ..., - t_base2world: UMat | None = ..., - R_gripper2cam: UMat | None = ..., - t_gripper2cam: UMat | None = ..., - method: RobotWorldHandEyeCalibrationMethod = ..., -) -> tuple[ - UMat, - UMat, - UMat, - UMat, -]: ... -@typing.overload -def calibrationMatrixValues( - cameraMatrix: cv2.typing.MatLike, - imageSize: cv2.typing.Size, - apertureWidth: float, - apertureHeight: float, -) -> tuple[ - float, - float, - float, - cv2.typing.Point2d, - float, -]: ... -@typing.overload -def calibrationMatrixValues( - cameraMatrix: UMat, - imageSize: cv2.typing.Size, - apertureWidth: float, - apertureHeight: float, -) -> tuple[ - float, - float, - float, - cv2.typing.Point2d, - float, -]: ... -@typing.overload -def cartToPolar( - x: cv2.typing.MatLike, - y: cv2.typing.MatLike, - magnitude: cv2.typing.MatLike | None = ..., - angle: cv2.typing.MatLike | None = ..., - angleInDegrees: bool = ..., -) -> tuple[ - cv2.typing.MatLike, - cv2.typing.MatLike, -]: ... -@typing.overload -def cartToPolar( - x: UMat, - y: UMat, - magnitude: UMat | None = ..., - angle: UMat | None = ..., - angleInDegrees: bool = ..., -) -> tuple[ - UMat, - UMat, -]: ... -@typing.overload -def checkChessboard(img: cv2.typing.MatLike, size: cv2.typing.Size) -> bool: ... -@typing.overload -def checkChessboard(img: UMat, size: cv2.typing.Size) -> bool: ... -def checkHardwareSupport(feature: int) -> bool: ... -@typing.overload -def checkRange( - a: cv2.typing.MatLike, - quiet: bool = ..., - minVal: float = ..., - maxVal: float = ..., -) -> tuple[ - bool, - cv2.typing.Point, -]: ... -@typing.overload -def checkRange( - a: UMat, - quiet: bool = ..., - minVal: float = ..., - maxVal: float = ..., -) -> tuple[ - bool, - cv2.typing.Point, -]: ... -@typing.overload -def circle( - img: cv2.typing.MatLike, - center: cv2.typing.Point, - radius: int, - color: cv2.typing.Scalar, - thickness: int = ..., - lineType: int = ..., - shift: int = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def circle( - img: UMat, - center: cv2.typing.Point, - radius: int, - color: cv2.typing.Scalar, - thickness: int = ..., - lineType: int = ..., - shift: int = ..., -) -> UMat: ... -def clipLine( - imgRect: cv2.typing.Rect, - pt1: cv2.typing.Point, - pt2: cv2.typing.Point, -) -> tuple[ - bool, - cv2.typing.Point, - cv2.typing.Point, -]: ... -@typing.overload -def colorChange( - src: cv2.typing.MatLike, - mask: cv2.typing.MatLike, - dst: cv2.typing.MatLike | None = ..., - red_mul: float = ..., - green_mul: float = ..., - blue_mul: float = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def colorChange( - src: UMat, - mask: UMat, - dst: UMat | None = ..., - red_mul: float = ..., - green_mul: float = ..., - blue_mul: float = ..., -) -> UMat: ... -@typing.overload -def compare( - src1: cv2.typing.MatLike, - src2: cv2.typing.MatLike, - cmpop: int, - dst: cv2.typing.MatLike | None = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def compare(src1: UMat, src2: UMat, cmpop: int, dst: UMat | None = ...) -> UMat: ... -@typing.overload -def compareHist(H1: cv2.typing.MatLike, H2: cv2.typing.MatLike, method: int) -> float: ... -@typing.overload -def compareHist(H1: UMat, H2: UMat, method: int) -> float: ... -@typing.overload -def completeSymm(m: cv2.typing.MatLike, lowerToUpper: bool = ...) -> cv2.typing.MatLike: ... -@typing.overload -def completeSymm(m: UMat, lowerToUpper: bool = ...) -> UMat: ... -@typing.overload -def composeRT( - rvec1: cv2.typing.MatLike, - tvec1: cv2.typing.MatLike, - rvec2: cv2.typing.MatLike, - tvec2: cv2.typing.MatLike, - rvec3: cv2.typing.MatLike | None = ..., - tvec3: cv2.typing.MatLike | None = ..., - dr3dr1: cv2.typing.MatLike | None = ..., - dr3dt1: cv2.typing.MatLike | None = ..., - dr3dr2: cv2.typing.MatLike | None = ..., - dr3dt2: cv2.typing.MatLike | None = ..., - dt3dr1: cv2.typing.MatLike | None = ..., - dt3dt1: cv2.typing.MatLike | None = ..., - dt3dr2: cv2.typing.MatLike | None = ..., - dt3dt2: cv2.typing.MatLike | None = ..., -) -> tuple[ - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, -]: ... -@typing.overload -def composeRT( - rvec1: UMat, - tvec1: UMat, - rvec2: UMat, - tvec2: UMat, - rvec3: UMat | None = ..., - tvec3: UMat | None = ..., - dr3dr1: UMat | None = ..., - dr3dt1: UMat | None = ..., - dr3dr2: UMat | None = ..., - dr3dt2: UMat | None = ..., - dt3dr1: UMat | None = ..., - dt3dt1: UMat | None = ..., - dt3dr2: UMat | None = ..., - dt3dt2: UMat | None = ..., -) -> tuple[ - UMat, - UMat, - UMat, - UMat, - UMat, - UMat, - UMat, - UMat, - UMat, - UMat, -]: ... -@typing.overload -def computeCorrespondEpilines( - points: cv2.typing.MatLike, - whichImage: int, - F: cv2.typing.MatLike, - lines: cv2.typing.MatLike | None = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def computeCorrespondEpilines(points: UMat, whichImage: int, F: UMat, lines: UMat | None = ...) -> UMat: ... -@typing.overload -def computeECC( - templateImage: cv2.typing.MatLike, - inputImage: cv2.typing.MatLike, - inputMask: cv2.typing.MatLike | None = ..., -) -> float: ... -@typing.overload -def computeECC(templateImage: UMat, inputImage: UMat, inputMask: UMat | None = ...) -> float: ... -@typing.overload -def connectedComponents( - image: cv2.typing.MatLike, - labels: cv2.typing.MatLike | None = ..., - connectivity: int = ..., - ltype: int = ..., -) -> tuple[ - int, - cv2.typing.MatLike, -]: ... -@typing.overload -def connectedComponents( - image: UMat, - labels: UMat | None = ..., - connectivity: int = ..., - ltype: int = ..., -) -> tuple[ - int, - UMat, -]: ... -@typing.overload -def connectedComponentsWithAlgorithm( - image: cv2.typing.MatLike, - connectivity: int, - ltype: int, - ccltype: int, - labels: cv2.typing.MatLike | None = ..., -) -> tuple[ - int, - cv2.typing.MatLike, -]: ... -@typing.overload -def connectedComponentsWithAlgorithm( - image: UMat, - connectivity: int, - ltype: int, - ccltype: int, - labels: UMat | None = ..., -) -> tuple[ - int, - UMat, -]: ... -@typing.overload -def connectedComponentsWithStats( - image: cv2.typing.MatLike, - labels: cv2.typing.MatLike | None = ..., - stats: cv2.typing.MatLike | None = ..., - centroids: cv2.typing.MatLike | None = ..., - connectivity: int = ..., - ltype: int = ..., -) -> tuple[ - int, - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, -]: ... -@typing.overload -def connectedComponentsWithStats( - image: UMat, - labels: UMat | None = ..., - stats: UMat | None = ..., - centroids: UMat | None = ..., - connectivity: int = ..., - ltype: int = ..., -) -> tuple[ - int, - UMat, - UMat, - UMat, -]: ... -@typing.overload -def connectedComponentsWithStatsWithAlgorithm( - image: cv2.typing.MatLike, - connectivity: int, - ltype: int, - ccltype: int, - labels: cv2.typing.MatLike | None = ..., - stats: cv2.typing.MatLike | None = ..., - centroids: cv2.typing.MatLike | None = ..., -) -> tuple[ - int, - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, -]: ... -@typing.overload -def connectedComponentsWithStatsWithAlgorithm( - image: UMat, - connectivity: int, - ltype: int, - ccltype: int, - labels: UMat | None = ..., - stats: UMat | None = ..., - centroids: UMat | None = ..., -) -> tuple[ - int, - UMat, - UMat, - UMat, -]: ... -@typing.overload -def contourArea(contour: cv2.typing.MatLike, oriented: bool = ...) -> float: ... -@typing.overload -def contourArea(contour: UMat, oriented: bool = ...) -> float: ... -@typing.overload -def convertFp16(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... -@typing.overload -def convertFp16(src: UMat, dst: UMat | None = ...) -> UMat: ... -@typing.overload -def convertMaps( - map1: cv2.typing.MatLike, - map2: cv2.typing.MatLike, - dstmap1type: int, - dstmap1: cv2.typing.MatLike | None = ..., - dstmap2: cv2.typing.MatLike | None = ..., - nninterpolation: bool = ..., -) -> tuple[ - cv2.typing.MatLike, - cv2.typing.MatLike, -]: ... -@typing.overload -def convertMaps( - map1: UMat, - map2: UMat, - dstmap1type: int, - dstmap1: UMat | None = ..., - dstmap2: UMat | None = ..., - nninterpolation: bool = ..., -) -> tuple[ - UMat, - UMat, -]: ... -@typing.overload -def convertPointsFromHomogeneous( - src: cv2.typing.MatLike, - dst: cv2.typing.MatLike | None = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def convertPointsFromHomogeneous(src: UMat, dst: UMat | None = ...) -> UMat: ... -@typing.overload -def convertPointsToHomogeneous(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... -@typing.overload -def convertPointsToHomogeneous(src: UMat, dst: UMat | None = ...) -> UMat: ... -@typing.overload -def convertScaleAbs( - src: cv2.typing.MatLike, - dst: cv2.typing.MatLike | None = ..., - alpha: float = ..., - beta: float = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def convertScaleAbs(src: UMat, dst: UMat | None = ..., alpha: float = ..., beta: float = ...) -> UMat: ... -@typing.overload -def convexHull( - points: cv2.typing.MatLike, - hull: cv2.typing.MatLike | None = ..., - clockwise: bool = ..., - returnPoints: bool = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def convexHull(points: UMat, hull: UMat | None = ..., clockwise: bool = ..., returnPoints: bool = ...) -> UMat: ... -@typing.overload -def convexityDefects( - contour: cv2.typing.MatLike, - convexhull: cv2.typing.MatLike, - convexityDefects: cv2.typing.MatLike | None = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def convexityDefects(contour: UMat, convexhull: UMat, convexityDefects: UMat | None = ...) -> UMat: ... -@typing.overload -def copyMakeBorder( - src: cv2.typing.MatLike, - top: int, - bottom: int, - left: int, - right: int, - borderType: int, - dst: cv2.typing.MatLike | None = ..., - value: cv2.typing.Scalar = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def copyMakeBorder( - src: UMat, - top: int, - bottom: int, - left: int, - right: int, - borderType: int, - dst: UMat | None = ..., - value: cv2.typing.Scalar = ..., -) -> UMat: ... -@typing.overload -def copyTo( - src: cv2.typing.MatLike, - mask: cv2.typing.MatLike, - dst: cv2.typing.MatLike | None = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def copyTo(src: UMat, mask: UMat, dst: UMat | None = ...) -> UMat: ... -@typing.overload -def cornerEigenValsAndVecs( - src: cv2.typing.MatLike, - blockSize: int, - ksize: int, - dst: cv2.typing.MatLike | None = ..., - borderType: int = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def cornerEigenValsAndVecs( - src: UMat, - blockSize: int, - ksize: int, - dst: UMat | None = ..., - borderType: int = ..., -) -> UMat: ... -@typing.overload -def cornerHarris( - src: cv2.typing.MatLike, - blockSize: int, - ksize: int, - k: float, - dst: cv2.typing.MatLike | None = ..., - borderType: int = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def cornerHarris( - src: UMat, - blockSize: int, - ksize: int, - k: float, - dst: UMat | None = ..., - borderType: int = ..., -) -> UMat: ... -@typing.overload -def cornerMinEigenVal( - src: cv2.typing.MatLike, - blockSize: int, - dst: cv2.typing.MatLike | None = ..., - ksize: int = ..., - borderType: int = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def cornerMinEigenVal( - src: UMat, - blockSize: int, - dst: UMat | None = ..., - ksize: int = ..., - borderType: int = ..., -) -> UMat: ... -@typing.overload -def cornerSubPix( - image: cv2.typing.MatLike, - corners: cv2.typing.MatLike, - winSize: cv2.typing.Size, - zeroZone: cv2.typing.Size, - criteria: cv2.typing.TermCriteria, -) -> cv2.typing.MatLike: ... -@typing.overload -def cornerSubPix( - image: UMat, - corners: UMat, - winSize: cv2.typing.Size, - zeroZone: cv2.typing.Size, - criteria: cv2.typing.TermCriteria, -) -> UMat: ... -@typing.overload -def correctMatches( - F: cv2.typing.MatLike, - points1: cv2.typing.MatLike, - points2: cv2.typing.MatLike, - newPoints1: cv2.typing.MatLike | None = ..., - newPoints2: cv2.typing.MatLike | None = ..., -) -> tuple[ - cv2.typing.MatLike, - cv2.typing.MatLike, -]: ... -@typing.overload -def correctMatches( - F: UMat, - points1: UMat, - points2: UMat, - newPoints1: UMat | None = ..., - newPoints2: UMat | None = ..., -) -> tuple[ - UMat, - UMat, -]: ... -@typing.overload -def countNonZero(src: cv2.typing.MatLike) -> int: ... -@typing.overload -def countNonZero(src: UMat) -> int: ... -def createAlignMTB(max_bits: int = ..., exclude_range: int = ..., cut: bool = ...) -> AlignMTB: ... -def createBackgroundSubtractorKNN( - history: int = ..., - dist2Threshold: float = ..., - detectShadows: bool = ..., -) -> BackgroundSubtractorKNN: ... -def createBackgroundSubtractorMOG2( - history: int = ..., - varThreshold: float = ..., - detectShadows: bool = ..., -) -> BackgroundSubtractorMOG2: ... -def createCLAHE(clipLimit: float = ..., tileGridSize: cv2.typing.Size = ...) -> CLAHE: ... -def createCalibrateDebevec(samples: int = ..., lambda_: float = ..., random: bool = ...) -> CalibrateDebevec: ... -def createCalibrateRobertson(max_iter: int = ..., threshold: float = ...) -> CalibrateRobertson: ... -def createGeneralizedHoughBallard() -> GeneralizedHoughBallard: ... -def createGeneralizedHoughGuil() -> GeneralizedHoughGuil: ... -@typing.overload -def createHanningWindow( - winSize: cv2.typing.Size, - type: int, - dst: cv2.typing.MatLike | None = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def createHanningWindow(winSize: cv2.typing.Size, type: int, dst: UMat | None = ...) -> UMat: ... -def createLineSegmentDetector( - refine: int = ..., - scale: float = ..., - sigma_scale: float = ..., - quant: float = ..., - ang_th: float = ..., - log_eps: float = ..., - density_th: float = ..., - n_bins: int = ..., -) -> LineSegmentDetector: ... -def createMergeDebevec() -> MergeDebevec: ... -def createMergeMertens( - contrast_weight: float = ..., - saturation_weight: float = ..., - exposure_weight: float = ..., -) -> MergeMertens: ... -def createMergeRobertson() -> MergeRobertson: ... -def createTonemap(gamma: float = ...) -> Tonemap: ... -def createTonemapDrago(gamma: float = ..., saturation: float = ..., bias: float = ...) -> TonemapDrago: ... -def createTonemapMantiuk(gamma: float = ..., scale: float = ..., saturation: float = ...) -> TonemapMantiuk: ... -def createTonemapReinhard( - gamma: float = ..., - intensity: float = ..., - light_adapt: float = ..., - color_adapt: float = ..., -) -> TonemapReinhard: ... -def cubeRoot(val: float) -> float: ... -@typing.overload -def cvtColor( - src: cv2.typing.MatLike, - code: int, - dst: cv2.typing.MatLike | None = ..., - dstCn: int = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def cvtColor(src: UMat, code: int, dst: UMat | None = ..., dstCn: int = ...) -> UMat: ... -@typing.overload -def cvtColorTwoPlane( - src1: cv2.typing.MatLike, - src2: cv2.typing.MatLike, - code: int, - dst: cv2.typing.MatLike | None = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def cvtColorTwoPlane(src1: UMat, src2: UMat, code: int, dst: UMat | None = ...) -> UMat: ... -@typing.overload -def dct(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., flags: int = ...) -> cv2.typing.MatLike: ... -@typing.overload -def dct(src: UMat, dst: UMat | None = ..., flags: int = ...) -> UMat: ... -@typing.overload -def decolor( - src: cv2.typing.MatLike, - grayscale: cv2.typing.MatLike | None = ..., - color_boost: cv2.typing.MatLike | None = ..., -) -> tuple[ - cv2.typing.MatLike, - cv2.typing.MatLike, -]: ... -@typing.overload -def decolor(src: UMat, grayscale: UMat | None = ..., color_boost: UMat | None = ...) -> tuple[UMat, UMat]: ... -@typing.overload -def decomposeEssentialMat( - E: cv2.typing.MatLike, - R1: cv2.typing.MatLike | None = ..., - R2: cv2.typing.MatLike | None = ..., - t: cv2.typing.MatLike | None = ..., -) -> tuple[ - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, -]: ... -@typing.overload -def decomposeEssentialMat( - E: UMat, - R1: UMat | None = ..., - R2: UMat | None = ..., - t: UMat | None = ..., -) -> tuple[ - UMat, - UMat, - UMat, -]: ... -@typing.overload -def decomposeHomographyMat( - H: cv2.typing.MatLike, - K: cv2.typing.MatLike, - rotations: typing.Sequence[cv2.typing.MatLike] | None = ..., - translations: typing.Sequence[cv2.typing.MatLike] | None = ..., - normals: typing.Sequence[cv2.typing.MatLike] | None = ..., -) -> tuple[ - int, - typing.Sequence[cv2.typing.MatLike], - typing.Sequence[cv2.typing.MatLike], - typing.Sequence[cv2.typing.MatLike], -]: ... -@typing.overload -def decomposeHomographyMat( - H: UMat, - K: UMat, - rotations: typing.Sequence[UMat] | None = ..., - translations: typing.Sequence[UMat] | None = ..., - normals: typing.Sequence[UMat] | None = ..., -) -> tuple[ - int, - typing.Sequence[UMat], - typing.Sequence[UMat], - typing.Sequence[UMat], -]: ... -@typing.overload -def decomposeProjectionMatrix( - projMatrix: cv2.typing.MatLike, - cameraMatrix: cv2.typing.MatLike | None = ..., - rotMatrix: cv2.typing.MatLike | None = ..., - transVect: cv2.typing.MatLike | None = ..., - rotMatrixX: cv2.typing.MatLike | None = ..., - rotMatrixY: cv2.typing.MatLike | None = ..., - rotMatrixZ: cv2.typing.MatLike | None = ..., - eulerAngles: cv2.typing.MatLike | None = ..., -) -> tuple[ - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, -]: ... -@typing.overload -def decomposeProjectionMatrix( - projMatrix: UMat, - cameraMatrix: UMat | None = ..., - rotMatrix: UMat | None = ..., - transVect: UMat | None = ..., - rotMatrixX: UMat | None = ..., - rotMatrixY: UMat | None = ..., - rotMatrixZ: UMat | None = ..., - eulerAngles: UMat | None = ..., -) -> tuple[ - UMat, - UMat, - UMat, - UMat, - UMat, - UMat, - UMat, -]: ... -@typing.overload -def demosaicing( - src: cv2.typing.MatLike, - code: int, - dst: cv2.typing.MatLike | None = ..., - dstCn: int = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def demosaicing(src: UMat, code: int, dst: UMat | None = ..., dstCn: int = ...) -> UMat: ... -def denoise_TVL1( - observations: typing.Sequence[cv2.typing.MatLike], - result: cv2.typing.MatLike, - lambda_: float = ..., - niters: int = ..., -) -> None: ... -def destroyAllWindows() -> None: ... -def destroyWindow(winname: str) -> None: ... -@typing.overload -def detailEnhance( - src: cv2.typing.MatLike, - dst: cv2.typing.MatLike | None = ..., - sigma_s: float = ..., - sigma_r: float = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def detailEnhance(src: UMat, dst: UMat | None = ..., sigma_s: float = ..., sigma_r: float = ...) -> UMat: ... -@typing.overload -def determinant(mtx: cv2.typing.MatLike) -> float: ... -@typing.overload -def determinant(mtx: UMat) -> float: ... -@typing.overload -def dft( - src: cv2.typing.MatLike, - dst: cv2.typing.MatLike | None = ..., - flags: int = ..., - nonzeroRows: int = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def dft(src: UMat, dst: UMat | None = ..., flags: int = ..., nonzeroRows: int = ...) -> UMat: ... -@typing.overload -def dilate( - src: cv2.typing.MatLike, - kernel: cv2.typing.MatLike, - dst: cv2.typing.MatLike | None = ..., - anchor: cv2.typing.Point = ..., - iterations: int = ..., - borderType: int = ..., - borderValue: cv2.typing.Scalar = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def dilate( - src: UMat, - kernel: UMat, - dst: UMat | None = ..., - anchor: cv2.typing.Point = ..., - iterations: int = ..., - borderType: int = ..., - borderValue: cv2.typing.Scalar = ..., -) -> UMat: ... -def displayOverlay(winname: str, text: str, delayms: int = ...) -> None: ... -def displayStatusBar(winname: str, text: str, delayms: int = ...) -> None: ... -@typing.overload -def distanceTransform( - src: cv2.typing.MatLike, - distanceType: int, - maskSize: int, - dst: cv2.typing.MatLike | None = ..., - dstType: int = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def distanceTransform( - src: UMat, - distanceType: int, - maskSize: int, - dst: UMat | None = ..., - dstType: int = ..., -) -> UMat: ... -@typing.overload -def distanceTransformWithLabels( - src: cv2.typing.MatLike, - distanceType: int, - maskSize: int, - dst: cv2.typing.MatLike | None = ..., - labels: cv2.typing.MatLike | None = ..., - labelType: int = ..., -) -> tuple[ - cv2.typing.MatLike, - cv2.typing.MatLike, -]: ... -@typing.overload -def distanceTransformWithLabels( - src: UMat, - distanceType: int, - maskSize: int, - dst: UMat | None = ..., - labels: UMat | None = ..., - labelType: int = ..., -) -> tuple[ - UMat, - UMat, -]: ... -@typing.overload -def divSpectrums( - a: cv2.typing.MatLike, - b: cv2.typing.MatLike, - flags: int, - c: cv2.typing.MatLike | None = ..., - conjB: bool = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def divSpectrums(a: UMat, b: UMat, flags: int, c: UMat | None = ..., conjB: bool = ...) -> UMat: ... -@typing.overload -def divide( - src1: cv2.typing.MatLike, - src2: cv2.typing.MatLike, - dst: cv2.typing.MatLike | None = ..., - scale: float = ..., - dtype: int = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def divide(src1: UMat, src2: UMat, dst: UMat | None = ..., scale: float = ..., dtype: int = ...) -> UMat: ... -@typing.overload -def divide( - scale: float, - src2: cv2.typing.MatLike, - dst: cv2.typing.MatLike | None = ..., - dtype: int = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def divide(scale: float, src2: UMat, dst: UMat | None = ..., dtype: int = ...) -> UMat: ... -@typing.overload -def drawChessboardCorners( - image: cv2.typing.MatLike, - patternSize: cv2.typing.Size, - corners: cv2.typing.MatLike, - patternWasFound: bool, -) -> cv2.typing.MatLike: ... -@typing.overload -def drawChessboardCorners(image: UMat, patternSize: cv2.typing.Size, corners: UMat, patternWasFound: bool) -> UMat: ... -@typing.overload -def drawContours( - image: cv2.typing.MatLike, - contours: typing.Sequence[cv2.typing.MatLike], - contourIdx: int, - color: cv2.typing.Scalar, - thickness: int = ..., - lineType: int = ..., - hierarchy: cv2.typing.MatLike | None = ..., - maxLevel: int = ..., - offset: cv2.typing.Point = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def drawContours( - image: UMat, - contours: typing.Sequence[UMat], - contourIdx: int, - color: cv2.typing.Scalar, - thickness: int = ..., - lineType: int = ..., - hierarchy: UMat | None = ..., - maxLevel: int = ..., - offset: cv2.typing.Point = ..., -) -> UMat: ... -@typing.overload -def drawFrameAxes( - image: cv2.typing.MatLike, - cameraMatrix: cv2.typing.MatLike, - distCoeffs: cv2.typing.MatLike, - rvec: cv2.typing.MatLike, - tvec: cv2.typing.MatLike, - length: float, - thickness: int = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def drawFrameAxes( - image: UMat, - cameraMatrix: UMat, - distCoeffs: UMat, - rvec: UMat, - tvec: UMat, - length: float, - thickness: int = ..., -) -> UMat: ... -@typing.overload -def drawKeypoints( - image: cv2.typing.MatLike, - keypoints: typing.Sequence[KeyPoint], - outImage: cv2.typing.MatLike, - color: cv2.typing.Scalar = ..., - flags: DrawMatchesFlags = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def drawKeypoints( - image: UMat, - keypoints: typing.Sequence[KeyPoint], - outImage: UMat, - color: cv2.typing.Scalar = ..., - flags: DrawMatchesFlags = ..., -) -> UMat: ... -@typing.overload -def drawMarker( - img: cv2.typing.MatLike, - position: cv2.typing.Point, - color: cv2.typing.Scalar, - markerType: int = ..., - markerSize: int = ..., - thickness: int = ..., - line_type: int = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def drawMarker( - img: UMat, - position: cv2.typing.Point, - color: cv2.typing.Scalar, - markerType: int = ..., - markerSize: int = ..., - thickness: int = ..., - line_type: int = ..., -) -> UMat: ... -@typing.overload -def drawMatches( - img1: cv2.typing.MatLike, - keypoints1: typing.Sequence[KeyPoint], - img2: cv2.typing.MatLike, - keypoints2: typing.Sequence[KeyPoint], - matches1to2: typing.Sequence[DMatch], - outImg: cv2.typing.MatLike, - matchColor: cv2.typing.Scalar = ..., - singlePointColor: cv2.typing.Scalar = ..., - matchesMask: typing.Sequence[str] = ..., - flags: DrawMatchesFlags = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def drawMatches( - img1: UMat, - keypoints1: typing.Sequence[KeyPoint], - img2: UMat, - keypoints2: typing.Sequence[KeyPoint], - matches1to2: typing.Sequence[DMatch], - outImg: UMat, - matchColor: cv2.typing.Scalar = ..., - singlePointColor: cv2.typing.Scalar = ..., - matchesMask: typing.Sequence[str] = ..., - flags: DrawMatchesFlags = ..., -) -> UMat: ... -@typing.overload -def drawMatches( - img1: cv2.typing.MatLike, - keypoints1: typing.Sequence[KeyPoint], - img2: cv2.typing.MatLike, - keypoints2: typing.Sequence[KeyPoint], - matches1to2: typing.Sequence[DMatch], - outImg: cv2.typing.MatLike, - matchesThickness: int, - matchColor: cv2.typing.Scalar = ..., - singlePointColor: cv2.typing.Scalar = ..., - matchesMask: typing.Sequence[str] = ..., - flags: DrawMatchesFlags = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def drawMatches( - img1: UMat, - keypoints1: typing.Sequence[KeyPoint], - img2: UMat, - keypoints2: typing.Sequence[KeyPoint], - matches1to2: typing.Sequence[DMatch], - outImg: UMat, - matchesThickness: int, - matchColor: cv2.typing.Scalar = ..., - singlePointColor: cv2.typing.Scalar = ..., - matchesMask: typing.Sequence[str] = ..., - flags: DrawMatchesFlags = ..., -) -> UMat: ... -@typing.overload -def drawMatchesKnn( - img1: cv2.typing.MatLike, - keypoints1: typing.Sequence[KeyPoint], - img2: cv2.typing.MatLike, - keypoints2: typing.Sequence[KeyPoint], - matches1to2: typing.Sequence[typing.Sequence[DMatch]], - outImg: cv2.typing.MatLike, - matchColor: cv2.typing.Scalar = ..., - singlePointColor: cv2.typing.Scalar = ..., - matchesMask: typing.Sequence[typing.Sequence[str]] = ..., - flags: DrawMatchesFlags = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def drawMatchesKnn( - img1: UMat, - keypoints1: typing.Sequence[KeyPoint], - img2: UMat, - keypoints2: typing.Sequence[KeyPoint], - matches1to2: typing.Sequence[typing.Sequence[DMatch]], - outImg: UMat, - matchColor: cv2.typing.Scalar = ..., - singlePointColor: cv2.typing.Scalar = ..., - matchesMask: typing.Sequence[typing.Sequence[str]] = ..., - flags: DrawMatchesFlags = ..., -) -> UMat: ... -@typing.overload -def edgePreservingFilter( - src: cv2.typing.MatLike, - dst: cv2.typing.MatLike | None = ..., - flags: int = ..., - sigma_s: float = ..., - sigma_r: float = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def edgePreservingFilter( - src: UMat, - dst: UMat | None = ..., - flags: int = ..., - sigma_s: float = ..., - sigma_r: float = ..., -) -> UMat: ... -@typing.overload -def eigen( - src: cv2.typing.MatLike, - eigenvalues: cv2.typing.MatLike | None = ..., - eigenvectors: cv2.typing.MatLike | None = ..., -) -> tuple[ - bool, - cv2.typing.MatLike, - cv2.typing.MatLike, -]: ... -@typing.overload -def eigen(src: UMat, eigenvalues: UMat | None = ..., eigenvectors: UMat | None = ...) -> tuple[bool, UMat, UMat]: ... -@typing.overload -def eigenNonSymmetric( - src: cv2.typing.MatLike, - eigenvalues: cv2.typing.MatLike | None = ..., - eigenvectors: cv2.typing.MatLike | None = ..., -) -> tuple[ - cv2.typing.MatLike, - cv2.typing.MatLike, -]: ... -@typing.overload -def eigenNonSymmetric( - src: UMat, - eigenvalues: UMat | None = ..., - eigenvectors: UMat | None = ..., -) -> tuple[ - UMat, - UMat, -]: ... -@typing.overload -def ellipse( - img: cv2.typing.MatLike, - center: cv2.typing.Point, - axes: cv2.typing.Size, - angle: float, - startAngle: float, - endAngle: float, - color: cv2.typing.Scalar, - thickness: int = ..., - lineType: int = ..., - shift: int = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def ellipse( - img: UMat, - center: cv2.typing.Point, - axes: cv2.typing.Size, - angle: float, - startAngle: float, - endAngle: float, - color: cv2.typing.Scalar, - thickness: int = ..., - lineType: int = ..., - shift: int = ..., -) -> UMat: ... -@typing.overload -def ellipse( - img: cv2.typing.MatLike, - box: cv2.typing.RotatedRect, - color: cv2.typing.Scalar, - thickness: int = ..., - lineType: int = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def ellipse( - img: UMat, - box: cv2.typing.RotatedRect, - color: cv2.typing.Scalar, - thickness: int = ..., - lineType: int = ..., -) -> UMat: ... -def ellipse2Poly( - center: cv2.typing.Point, - axes: cv2.typing.Size, - angle: int, - arcStart: int, - arcEnd: int, - delta: int, -) -> typing.Sequence[cv2.typing.Point]: ... -def empty_array_desc() -> GArrayDesc: ... -def empty_gopaque_desc() -> GOpaqueDesc: ... -def empty_scalar_desc() -> GScalarDesc: ... -@typing.overload -def equalizeHist(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... -@typing.overload -def equalizeHist(src: UMat, dst: UMat | None = ...) -> UMat: ... -@typing.overload -def erode( - src: cv2.typing.MatLike, - kernel: cv2.typing.MatLike, - dst: cv2.typing.MatLike | None = ..., - anchor: cv2.typing.Point = ..., - iterations: int = ..., - borderType: int = ..., - borderValue: cv2.typing.Scalar = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def erode( - src: UMat, - kernel: UMat, - dst: UMat | None = ..., - anchor: cv2.typing.Point = ..., - iterations: int = ..., - borderType: int = ..., - borderValue: cv2.typing.Scalar = ..., -) -> UMat: ... -@typing.overload -def estimateAffine2D( - from_: cv2.typing.MatLike, - to: cv2.typing.MatLike, - inliers: cv2.typing.MatLike | None = ..., - method: int = ..., - ransacReprojThreshold: float = ..., - maxIters: int = ..., - confidence: float = ..., - refineIters: int = ..., -) -> tuple[ - cv2.typing.MatLike, - cv2.typing.MatLike, -]: ... -@typing.overload -def estimateAffine2D( - from_: UMat, - to: UMat, - inliers: UMat | None = ..., - method: int = ..., - ransacReprojThreshold: float = ..., - maxIters: int = ..., - confidence: float = ..., - refineIters: int = ..., -) -> tuple[ - cv2.typing.MatLike, - UMat, -]: ... -@typing.overload -def estimateAffine2D( - pts1: cv2.typing.MatLike, - pts2: cv2.typing.MatLike, - params: UsacParams, - inliers: cv2.typing.MatLike | None = ..., -) -> tuple[ - cv2.typing.MatLike, - cv2.typing.MatLike, -]: ... -@typing.overload -def estimateAffine2D( - pts1: UMat, - pts2: UMat, - params: UsacParams, - inliers: UMat | None = ..., -) -> tuple[ - cv2.typing.MatLike, - UMat, -]: ... -@typing.overload -def estimateAffine3D( - src: cv2.typing.MatLike, - dst: cv2.typing.MatLike, - out: cv2.typing.MatLike | None = ..., - inliers: cv2.typing.MatLike | None = ..., - ransacThreshold: float = ..., - confidence: float = ..., -) -> tuple[ - int, - cv2.typing.MatLike, - cv2.typing.MatLike, -]: ... -@typing.overload -def estimateAffine3D( - src: UMat, - dst: UMat, - out: UMat | None = ..., - inliers: UMat | None = ..., - ransacThreshold: float = ..., - confidence: float = ..., -) -> tuple[ - int, - UMat, - UMat, -]: ... -@typing.overload -def estimateAffine3D( - src: cv2.typing.MatLike, - dst: cv2.typing.MatLike, - force_rotation: bool = ..., -) -> tuple[ - cv2.typing.MatLike, - float, -]: ... -@typing.overload -def estimateAffine3D(src: UMat, dst: UMat, force_rotation: bool = ...) -> tuple[cv2.typing.MatLike, float]: ... -@typing.overload -def estimateAffinePartial2D( - from_: cv2.typing.MatLike, - to: cv2.typing.MatLike, - inliers: cv2.typing.MatLike | None = ..., - method: int = ..., - ransacReprojThreshold: float = ..., - maxIters: int = ..., - confidence: float = ..., - refineIters: int = ..., -) -> tuple[ - cv2.typing.MatLike, - cv2.typing.MatLike, -]: ... -@typing.overload -def estimateAffinePartial2D( - from_: UMat, - to: UMat, - inliers: UMat | None = ..., - method: int = ..., - ransacReprojThreshold: float = ..., - maxIters: int = ..., - confidence: float = ..., - refineIters: int = ..., -) -> tuple[ - cv2.typing.MatLike, - UMat, -]: ... -@typing.overload -def estimateChessboardSharpness( - image: cv2.typing.MatLike, - patternSize: cv2.typing.Size, - corners: cv2.typing.MatLike, - rise_distance: float = ..., - vertical: bool = ..., - sharpness: cv2.typing.MatLike | None = ..., -) -> tuple[ - cv2.typing.Scalar, - cv2.typing.MatLike, -]: ... -@typing.overload -def estimateChessboardSharpness( - image: UMat, - patternSize: cv2.typing.Size, - corners: UMat, - rise_distance: float = ..., - vertical: bool = ..., - sharpness: UMat | None = ..., -) -> tuple[ - cv2.typing.Scalar, - UMat, -]: ... -@typing.overload -def estimateTranslation3D( - src: cv2.typing.MatLike, - dst: cv2.typing.MatLike, - out: cv2.typing.MatLike | None = ..., - inliers: cv2.typing.MatLike | None = ..., - ransacThreshold: float = ..., - confidence: float = ..., -) -> tuple[ - int, - cv2.typing.MatLike, - cv2.typing.MatLike, -]: ... -@typing.overload -def estimateTranslation3D( - src: UMat, - dst: UMat, - out: UMat | None = ..., - inliers: UMat | None = ..., - ransacThreshold: float = ..., - confidence: float = ..., -) -> tuple[ - int, - UMat, - UMat, -]: ... -@typing.overload -def exp(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... -@typing.overload -def exp(src: UMat, dst: UMat | None = ...) -> UMat: ... -@typing.overload -def extractChannel(src: cv2.typing.MatLike, coi: int, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... -@typing.overload -def extractChannel(src: UMat, coi: int, dst: UMat | None = ...) -> UMat: ... -def fastAtan2(y: float, x: float) -> float: ... -@typing.overload -def fastNlMeansDenoising( - src: cv2.typing.MatLike, - dst: cv2.typing.MatLike | None = ..., - h: float = ..., - templateWindowSize: int = ..., - searchWindowSize: int = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def fastNlMeansDenoising( - src: UMat, - dst: UMat | None = ..., - h: float = ..., - templateWindowSize: int = ..., - searchWindowSize: int = ..., -) -> UMat: ... -@typing.overload -def fastNlMeansDenoising( - src: cv2.typing.MatLike, - h: typing.Sequence[float], - dst: cv2.typing.MatLike | None = ..., - templateWindowSize: int = ..., - searchWindowSize: int = ..., - normType: int = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def fastNlMeansDenoising( - src: UMat, - h: typing.Sequence[float], - dst: UMat | None = ..., - templateWindowSize: int = ..., - searchWindowSize: int = ..., - normType: int = ..., -) -> UMat: ... -@typing.overload -def fastNlMeansDenoisingColored( - src: cv2.typing.MatLike, - dst: cv2.typing.MatLike | None = ..., - h: float = ..., - hColor: float = ..., - templateWindowSize: int = ..., - searchWindowSize: int = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def fastNlMeansDenoisingColored( - src: UMat, - dst: UMat | None = ..., - h: float = ..., - hColor: float = ..., - templateWindowSize: int = ..., - searchWindowSize: int = ..., -) -> UMat: ... -@typing.overload -def fastNlMeansDenoisingColoredMulti( - srcImgs: typing.Sequence[cv2.typing.MatLike], - imgToDenoiseIndex: int, - temporalWindowSize: int, - dst: cv2.typing.MatLike | None = ..., - h: float = ..., - hColor: float = ..., - templateWindowSize: int = ..., - searchWindowSize: int = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def fastNlMeansDenoisingColoredMulti( - srcImgs: typing.Sequence[UMat], - imgToDenoiseIndex: int, - temporalWindowSize: int, - dst: UMat | None = ..., - h: float = ..., - hColor: float = ..., - templateWindowSize: int = ..., - searchWindowSize: int = ..., -) -> UMat: ... -@typing.overload -def fastNlMeansDenoisingMulti( - srcImgs: typing.Sequence[cv2.typing.MatLike], - imgToDenoiseIndex: int, - temporalWindowSize: int, - dst: cv2.typing.MatLike | None = ..., - h: float = ..., - templateWindowSize: int = ..., - searchWindowSize: int = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def fastNlMeansDenoisingMulti( - srcImgs: typing.Sequence[UMat], - imgToDenoiseIndex: int, - temporalWindowSize: int, - dst: UMat | None = ..., - h: float = ..., - templateWindowSize: int = ..., - searchWindowSize: int = ..., -) -> UMat: ... -@typing.overload -def fastNlMeansDenoisingMulti( - srcImgs: typing.Sequence[cv2.typing.MatLike], - imgToDenoiseIndex: int, - temporalWindowSize: int, - h: typing.Sequence[float], - dst: cv2.typing.MatLike | None = ..., - templateWindowSize: int = ..., - searchWindowSize: int = ..., - normType: int = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def fastNlMeansDenoisingMulti( - srcImgs: typing.Sequence[UMat], - imgToDenoiseIndex: int, - temporalWindowSize: int, - h: typing.Sequence[float], - dst: UMat | None = ..., - templateWindowSize: int = ..., - searchWindowSize: int = ..., - normType: int = ..., -) -> UMat: ... -@typing.overload -def fillConvexPoly( - img: cv2.typing.MatLike, - points: cv2.typing.MatLike, - color: cv2.typing.Scalar, - lineType: int = ..., - shift: int = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def fillConvexPoly( - img: UMat, - points: UMat, - color: cv2.typing.Scalar, - lineType: int = ..., - shift: int = ..., -) -> UMat: ... -@typing.overload -def fillPoly( - img: cv2.typing.MatLike, - pts: typing.Sequence[cv2.typing.MatLike], - color: cv2.typing.Scalar, - lineType: int = ..., - shift: int = ..., - offset: cv2.typing.Point = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def fillPoly( - img: UMat, - pts: typing.Sequence[UMat], - color: cv2.typing.Scalar, - lineType: int = ..., - shift: int = ..., - offset: cv2.typing.Point = ..., -) -> UMat: ... -@typing.overload -def filter2D( - src: cv2.typing.MatLike, - ddepth: int, - kernel: cv2.typing.MatLike, - dst: cv2.typing.MatLike | None = ..., - anchor: cv2.typing.Point = ..., - delta: float = ..., - borderType: int = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def filter2D( - src: UMat, - ddepth: int, - kernel: UMat, - dst: UMat | None = ..., - anchor: cv2.typing.Point = ..., - delta: float = ..., - borderType: int = ..., -) -> UMat: ... -@typing.overload -def filterHomographyDecompByVisibleRefpoints( - rotations: typing.Sequence[cv2.typing.MatLike], - normals: typing.Sequence[cv2.typing.MatLike], - beforePoints: cv2.typing.MatLike, - afterPoints: cv2.typing.MatLike, - possibleSolutions: cv2.typing.MatLike | None = ..., - pointsMask: cv2.typing.MatLike | None = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def filterHomographyDecompByVisibleRefpoints( - rotations: typing.Sequence[UMat], - normals: typing.Sequence[UMat], - beforePoints: UMat, - afterPoints: UMat, - possibleSolutions: UMat | None = ..., - pointsMask: UMat | None = ..., -) -> UMat: ... -@typing.overload -def filterSpeckles( - img: cv2.typing.MatLike, - newVal: float, - maxSpeckleSize: int, - maxDiff: float, - buf: cv2.typing.MatLike | None = ..., -) -> tuple[ - cv2.typing.MatLike, - cv2.typing.MatLike, -]: ... -@typing.overload -def filterSpeckles( - img: UMat, - newVal: float, - maxSpeckleSize: int, - maxDiff: float, - buf: UMat | None = ..., -) -> tuple[ - UMat, - UMat, -]: ... -@typing.overload -def find4QuadCornerSubpix( - img: cv2.typing.MatLike, - corners: cv2.typing.MatLike, - region_size: cv2.typing.Size, -) -> tuple[ - bool, - cv2.typing.MatLike, -]: ... -@typing.overload -def find4QuadCornerSubpix(img: UMat, corners: UMat, region_size: cv2.typing.Size) -> tuple[bool, UMat]: ... -@typing.overload -def findChessboardCorners( - image: cv2.typing.MatLike, - patternSize: cv2.typing.Size, - corners: cv2.typing.MatLike | None = ..., - flags: int = ..., -) -> tuple[ - bool, - cv2.typing.MatLike, -]: ... -@typing.overload -def findChessboardCorners( - image: UMat, - patternSize: cv2.typing.Size, - corners: UMat | None = ..., - flags: int = ..., -) -> tuple[ - bool, - UMat, -]: ... -@typing.overload -def findChessboardCornersSB( - image: cv2.typing.MatLike, - patternSize: cv2.typing.Size, - corners: cv2.typing.MatLike | None = ..., - flags: int = ..., -) -> tuple[ - bool, - cv2.typing.MatLike, -]: ... -@typing.overload -def findChessboardCornersSB( - image: UMat, - patternSize: cv2.typing.Size, - corners: UMat | None = ..., - flags: int = ..., -) -> tuple[ - bool, - UMat, -]: ... -@typing.overload -def findChessboardCornersSBWithMeta( - image: cv2.typing.MatLike, - patternSize: cv2.typing.Size, - flags: int, - corners: cv2.typing.MatLike | None = ..., - meta: cv2.typing.MatLike | None = ..., -) -> tuple[ - bool, - cv2.typing.MatLike, - cv2.typing.MatLike, -]: ... -@typing.overload -def findChessboardCornersSBWithMeta( - image: UMat, - patternSize: cv2.typing.Size, - flags: int, - corners: UMat | None = ..., - meta: UMat | None = ..., -) -> tuple[ - bool, - UMat, - UMat, -]: ... -@typing.overload -def findCirclesGrid( - image: cv2.typing.MatLike, - patternSize: cv2.typing.Size, - flags: int, - blobDetector: cv2.typing.FeatureDetector, - parameters: CirclesGridFinderParameters, - centers: cv2.typing.MatLike | None = ..., -) -> tuple[ - bool, - cv2.typing.MatLike, -]: ... -@typing.overload -def findCirclesGrid( - image: UMat, - patternSize: cv2.typing.Size, - flags: int, - blobDetector: cv2.typing.FeatureDetector, - parameters: CirclesGridFinderParameters, - centers: UMat | None = ..., -) -> tuple[ - bool, - UMat, -]: ... -@typing.overload -def findCirclesGrid( - image: cv2.typing.MatLike, - patternSize: cv2.typing.Size, - centers: cv2.typing.MatLike | None = ..., - flags: int = ..., - blobDetector: cv2.typing.FeatureDetector = ..., -) -> tuple[ - bool, - cv2.typing.MatLike, -]: ... -@typing.overload -def findCirclesGrid( - image: UMat, - patternSize: cv2.typing.Size, - centers: UMat | None = ..., - flags: int = ..., - blobDetector: cv2.typing.FeatureDetector = ..., -) -> tuple[ - bool, - UMat, -]: ... -@typing.overload -def findContours( - image: cv2.typing.MatLike, - mode: int, - method: int, - contours: typing.Sequence[cv2.typing.MatLike] | None = ..., - hierarchy: cv2.typing.MatLike | None = ..., - offset: cv2.typing.Point = ..., -) -> tuple[ - typing.Sequence[cv2.typing.MatLike], - cv2.typing.MatLike, -]: ... -@typing.overload -def findContours( - image: UMat, - mode: int, - method: int, - contours: typing.Sequence[UMat] | None = ..., - hierarchy: UMat | None = ..., - offset: cv2.typing.Point = ..., -) -> tuple[ - typing.Sequence[UMat], - UMat, -]: ... -@typing.overload -def findEssentialMat( - points1: cv2.typing.MatLike, - points2: cv2.typing.MatLike, - cameraMatrix: cv2.typing.MatLike, - method: int = ..., - prob: float = ..., - threshold: float = ..., - maxIters: int = ..., - mask: cv2.typing.MatLike | None = ..., -) -> tuple[ - cv2.typing.MatLike, - cv2.typing.MatLike, -]: ... -@typing.overload -def findEssentialMat( - points1: UMat, - points2: UMat, - cameraMatrix: UMat, - method: int = ..., - prob: float = ..., - threshold: float = ..., - maxIters: int = ..., - mask: UMat | None = ..., -) -> tuple[ - cv2.typing.MatLike, - UMat, -]: ... -@typing.overload -def findEssentialMat( - points1: cv2.typing.MatLike, - points2: cv2.typing.MatLike, - focal: float = ..., - pp: cv2.typing.Point2d = ..., - method: int = ..., - prob: float = ..., - threshold: float = ..., - maxIters: int = ..., - mask: cv2.typing.MatLike | None = ..., -) -> tuple[ - cv2.typing.MatLike, - cv2.typing.MatLike, -]: ... -@typing.overload -def findEssentialMat( - points1: UMat, - points2: UMat, - focal: float = ..., - pp: cv2.typing.Point2d = ..., - method: int = ..., - prob: float = ..., - threshold: float = ..., - maxIters: int = ..., - mask: UMat | None = ..., -) -> tuple[ - cv2.typing.MatLike, - UMat, -]: ... -@typing.overload -def findEssentialMat( - points1: cv2.typing.MatLike, - points2: cv2.typing.MatLike, - cameraMatrix1: cv2.typing.MatLike, - distCoeffs1: cv2.typing.MatLike, - cameraMatrix2: cv2.typing.MatLike, - distCoeffs2: cv2.typing.MatLike, - method: int = ..., - prob: float = ..., - threshold: float = ..., - mask: cv2.typing.MatLike | None = ..., -) -> tuple[ - cv2.typing.MatLike, - cv2.typing.MatLike, -]: ... -@typing.overload -def findEssentialMat( - points1: UMat, - points2: UMat, - cameraMatrix1: UMat, - distCoeffs1: UMat, - cameraMatrix2: UMat, - distCoeffs2: UMat, - method: int = ..., - prob: float = ..., - threshold: float = ..., - mask: UMat | None = ..., -) -> tuple[ - cv2.typing.MatLike, - UMat, -]: ... -@typing.overload -def findEssentialMat( - points1: cv2.typing.MatLike, - points2: cv2.typing.MatLike, - cameraMatrix1: cv2.typing.MatLike, - cameraMatrix2: cv2.typing.MatLike, - dist_coeff1: cv2.typing.MatLike, - dist_coeff2: cv2.typing.MatLike, - params: UsacParams, - mask: cv2.typing.MatLike | None = ..., -) -> tuple[ - cv2.typing.MatLike, - cv2.typing.MatLike, -]: ... -@typing.overload -def findEssentialMat( - points1: UMat, - points2: UMat, - cameraMatrix1: UMat, - cameraMatrix2: UMat, - dist_coeff1: UMat, - dist_coeff2: UMat, - params: UsacParams, - mask: UMat | None = ..., -) -> tuple[ - cv2.typing.MatLike, - UMat, -]: ... -@typing.overload -def findFundamentalMat( - points1: cv2.typing.MatLike, - points2: cv2.typing.MatLike, - method: int, - ransacReprojThreshold: float, - confidence: float, - maxIters: int, - mask: cv2.typing.MatLike | None = ..., -) -> tuple[ - cv2.typing.MatLike, - cv2.typing.MatLike, -]: ... -@typing.overload -def findFundamentalMat( - points1: UMat, - points2: UMat, - method: int, - ransacReprojThreshold: float, - confidence: float, - maxIters: int, - mask: UMat | None = ..., -) -> tuple[ - cv2.typing.MatLike, - UMat, -]: ... -@typing.overload -def findFundamentalMat( - points1: cv2.typing.MatLike, - points2: cv2.typing.MatLike, - method: int = ..., - ransacReprojThreshold: float = ..., - confidence: float = ..., - mask: cv2.typing.MatLike | None = ..., -) -> tuple[ - cv2.typing.MatLike, - cv2.typing.MatLike, -]: ... -@typing.overload -def findFundamentalMat( - points1: UMat, - points2: UMat, - method: int = ..., - ransacReprojThreshold: float = ..., - confidence: float = ..., - mask: UMat | None = ..., -) -> tuple[ - cv2.typing.MatLike, - UMat, -]: ... -@typing.overload -def findFundamentalMat( - points1: cv2.typing.MatLike, - points2: cv2.typing.MatLike, - params: UsacParams, - mask: cv2.typing.MatLike | None = ..., -) -> tuple[ - cv2.typing.MatLike, - cv2.typing.MatLike, -]: ... -@typing.overload -def findFundamentalMat( - points1: UMat, - points2: UMat, - params: UsacParams, - mask: UMat | None = ..., -) -> tuple[ - cv2.typing.MatLike, - UMat, -]: ... -@typing.overload -def findHomography( - srcPoints: cv2.typing.MatLike, - dstPoints: cv2.typing.MatLike, - method: int = ..., - ransacReprojThreshold: float = ..., - mask: cv2.typing.MatLike | None = ..., - maxIters: int = ..., - confidence: float = ..., -) -> tuple[ - cv2.typing.MatLike, - cv2.typing.MatLike, -]: ... -@typing.overload -def findHomography( - srcPoints: UMat, - dstPoints: UMat, - method: int = ..., - ransacReprojThreshold: float = ..., - mask: UMat | None = ..., - maxIters: int = ..., - confidence: float = ..., -) -> tuple[ - cv2.typing.MatLike, - UMat, -]: ... -@typing.overload -def findHomography( - srcPoints: cv2.typing.MatLike, - dstPoints: cv2.typing.MatLike, - params: UsacParams, - mask: cv2.typing.MatLike | None = ..., -) -> tuple[ - cv2.typing.MatLike, - cv2.typing.MatLike, -]: ... -@typing.overload -def findHomography( - srcPoints: UMat, - dstPoints: UMat, - params: UsacParams, - mask: UMat | None = ..., -) -> tuple[ - cv2.typing.MatLike, - UMat, -]: ... -@typing.overload -def findNonZero(src: cv2.typing.MatLike, idx: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... -@typing.overload -def findNonZero(src: UMat, idx: UMat | None = ...) -> UMat: ... -@typing.overload -def findTransformECC( - templateImage: cv2.typing.MatLike, - inputImage: cv2.typing.MatLike, - warpMatrix: cv2.typing.MatLike, - motionType: int, - criteria: cv2.typing.TermCriteria, - inputMask: cv2.typing.MatLike, - gaussFiltSize: int, -) -> tuple[ - float, - cv2.typing.MatLike, -]: ... -@typing.overload -def findTransformECC( - templateImage: UMat, - inputImage: UMat, - warpMatrix: UMat, - motionType: int, - criteria: cv2.typing.TermCriteria, - inputMask: UMat, - gaussFiltSize: int, -) -> tuple[ - float, - UMat, -]: ... -@typing.overload -def findTransformECC( - templateImage: cv2.typing.MatLike, - inputImage: cv2.typing.MatLike, - warpMatrix: cv2.typing.MatLike, - motionType: int = ..., - criteria: cv2.typing.TermCriteria = ..., - inputMask: cv2.typing.MatLike | None = ..., -) -> tuple[ - float, - cv2.typing.MatLike, -]: ... -@typing.overload -def findTransformECC( - templateImage: UMat, - inputImage: UMat, - warpMatrix: UMat, - motionType: int = ..., - criteria: cv2.typing.TermCriteria = ..., - inputMask: UMat | None = ..., -) -> tuple[ - float, - UMat, -]: ... -@typing.overload -def fitEllipse(points: cv2.typing.MatLike) -> cv2.typing.RotatedRect: ... -@typing.overload -def fitEllipse(points: UMat) -> cv2.typing.RotatedRect: ... -@typing.overload -def fitEllipseAMS(points: cv2.typing.MatLike) -> cv2.typing.RotatedRect: ... -@typing.overload -def fitEllipseAMS(points: UMat) -> cv2.typing.RotatedRect: ... -@typing.overload -def fitEllipseDirect(points: cv2.typing.MatLike) -> cv2.typing.RotatedRect: ... -@typing.overload -def fitEllipseDirect(points: UMat) -> cv2.typing.RotatedRect: ... -@typing.overload -def fitLine( - points: cv2.typing.MatLike, - distType: int, - param: float, - reps: float, - aeps: float, - line: cv2.typing.MatLike | None = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def fitLine(points: UMat, distType: int, param: float, reps: float, aeps: float, line: UMat | None = ...) -> UMat: ... -@typing.overload -def flip(src: cv2.typing.MatLike, flipCode: int, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... -@typing.overload -def flip(src: UMat, flipCode: int, dst: UMat | None = ...) -> UMat: ... -@typing.overload -def flipND(src: cv2.typing.MatLike, axis: int, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... -@typing.overload -def flipND(src: UMat, axis: int, dst: UMat | None = ...) -> UMat: ... -@typing.overload -def floodFill( - image: cv2.typing.MatLike, - mask: cv2.typing.MatLike, - seedPoint: cv2.typing.Point, - newVal: cv2.typing.Scalar, - loDiff: cv2.typing.Scalar = ..., - upDiff: cv2.typing.Scalar = ..., - flags: int = ..., -) -> tuple[ - int, - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.Rect, -]: ... -@typing.overload -def floodFill( - image: UMat, - mask: UMat, - seedPoint: cv2.typing.Point, - newVal: cv2.typing.Scalar, - loDiff: cv2.typing.Scalar = ..., - upDiff: cv2.typing.Scalar = ..., - flags: int = ..., -) -> tuple[ - int, - UMat, - UMat, - cv2.typing.Rect, -]: ... -@typing.overload -def gemm( - src1: cv2.typing.MatLike, - src2: cv2.typing.MatLike, - alpha: float, - src3: cv2.typing.MatLike, - beta: float, - dst: cv2.typing.MatLike | None = ..., - flags: int = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def gemm( - src1: UMat, - src2: UMat, - alpha: float, - src3: UMat, - beta: float, - dst: UMat | None = ..., - flags: int = ..., -) -> UMat: ... -@typing.overload -def getAffineTransform(src: cv2.typing.MatLike, dst: cv2.typing.MatLike) -> cv2.typing.MatLike: ... -@typing.overload -def getAffineTransform(src: UMat, dst: UMat) -> cv2.typing.MatLike: ... -def getBuildInformation() -> str: ... -def getCPUFeaturesLine() -> str: ... -def getCPUTickCount() -> int: ... -@typing.overload -def getDefaultNewCameraMatrix( - cameraMatrix: cv2.typing.MatLike, - imgsize: cv2.typing.Size = ..., - centerPrincipalPoint: bool = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def getDefaultNewCameraMatrix( - cameraMatrix: UMat, - imgsize: cv2.typing.Size = ..., - centerPrincipalPoint: bool = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def getDerivKernels( - dx: int, - dy: int, - ksize: int, - kx: cv2.typing.MatLike | None = ..., - ky: cv2.typing.MatLike | None = ..., - normalize: bool = ..., - ktype: int = ..., -) -> tuple[ - cv2.typing.MatLike, - cv2.typing.MatLike, -]: ... -@typing.overload -def getDerivKernels( - dx: int, - dy: int, - ksize: int, - kx: UMat | None = ..., - ky: UMat | None = ..., - normalize: bool = ..., - ktype: int = ..., -) -> tuple[ - UMat, - UMat, -]: ... -def getFontScaleFromHeight(fontFace: int, pixelHeight: int, thickness: int = ...) -> float: ... -def getGaborKernel( - ksize: cv2.typing.Size, - sigma: float, - theta: float, - lambd: float, - gamma: float, - psi: float = ..., - ktype: int = ..., -) -> cv2.typing.MatLike: ... -def getGaussianKernel(ksize: int, sigma: float, ktype: int = ...) -> cv2.typing.MatLike: ... -def getHardwareFeatureName(feature: int) -> str: ... -def getLogLevel() -> int: ... -def getNumThreads() -> int: ... -def getNumberOfCPUs() -> int: ... -def getOptimalDFTSize(vecsize: int) -> int: ... -@typing.overload -def getOptimalNewCameraMatrix( - cameraMatrix: cv2.typing.MatLike, - distCoeffs: cv2.typing.MatLike, - imageSize: cv2.typing.Size, - alpha: float, - newImgSize: cv2.typing.Size = ..., - centerPrincipalPoint: bool = ..., -) -> tuple[ - cv2.typing.MatLike, - cv2.typing.Rect, -]: ... -@typing.overload -def getOptimalNewCameraMatrix( - cameraMatrix: UMat, - distCoeffs: UMat, - imageSize: cv2.typing.Size, - alpha: float, - newImgSize: cv2.typing.Size = ..., - centerPrincipalPoint: bool = ..., -) -> tuple[ - cv2.typing.MatLike, - cv2.typing.Rect, -]: ... -@typing.overload -def getPerspectiveTransform( - src: cv2.typing.MatLike, - dst: cv2.typing.MatLike, - solveMethod: int = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def getPerspectiveTransform(src: UMat, dst: UMat, solveMethod: int = ...) -> cv2.typing.MatLike: ... -@typing.overload -def getRectSubPix( - image: cv2.typing.MatLike, - patchSize: cv2.typing.Size, - center: cv2.typing.Point2f, - patch: cv2.typing.MatLike | None = ..., - patchType: int = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def getRectSubPix( - image: UMat, - patchSize: cv2.typing.Size, - center: cv2.typing.Point2f, - patch: UMat | None = ..., - patchType: int = ..., -) -> UMat: ... -def getRotationMatrix2D(center: cv2.typing.Point2f, angle: float, scale: float) -> cv2.typing.MatLike: ... -def getStructuringElement(shape: int, ksize: cv2.typing.Size, anchor: cv2.typing.Point = ...) -> cv2.typing.MatLike: ... -def getTextSize(text: str, fontFace: int, fontScale: float, thickness: int) -> tuple[cv2.typing.Size, int]: ... -def getThreadNum() -> int: ... -def getTickCount() -> int: ... -def getTickFrequency() -> float: ... -def getTrackbarPos(trackbarname: str, winname: str) -> int: ... -def getValidDisparityROI( - roi1: cv2.typing.Rect, - roi2: cv2.typing.Rect, - minDisparity: int, - numberOfDisparities: int, - blockSize: int, -) -> cv2.typing.Rect: ... -def getVersionMajor() -> int: ... -def getVersionMinor() -> int: ... -def getVersionRevision() -> int: ... -def getVersionString() -> str: ... -def getWindowImageRect(winname: str) -> cv2.typing.Rect: ... -def getWindowProperty(winname: str, prop_id: int) -> float: ... -@typing.overload -def goodFeaturesToTrack( - image: cv2.typing.MatLike, - maxCorners: int, - qualityLevel: float, - minDistance: float, - corners: cv2.typing.MatLike | None = ..., - mask: cv2.typing.MatLike | None = ..., - blockSize: int = ..., - useHarrisDetector: bool = ..., - k: float = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def goodFeaturesToTrack( - image: UMat, - maxCorners: int, - qualityLevel: float, - minDistance: float, - corners: UMat | None = ..., - mask: UMat | None = ..., - blockSize: int = ..., - useHarrisDetector: bool = ..., - k: float = ..., -) -> UMat: ... -@typing.overload -def goodFeaturesToTrack( - image: cv2.typing.MatLike, - maxCorners: int, - qualityLevel: float, - minDistance: float, - mask: cv2.typing.MatLike, - blockSize: int, - gradientSize: int, - corners: cv2.typing.MatLike | None = ..., - useHarrisDetector: bool = ..., - k: float = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def goodFeaturesToTrack( - image: UMat, - maxCorners: int, - qualityLevel: float, - minDistance: float, - mask: UMat, - blockSize: int, - gradientSize: int, - corners: UMat | None = ..., - useHarrisDetector: bool = ..., - k: float = ..., -) -> UMat: ... -@typing.overload -def goodFeaturesToTrackWithQuality( - image: cv2.typing.MatLike, - maxCorners: int, - qualityLevel: float, - minDistance: float, - mask: cv2.typing.MatLike, - corners: cv2.typing.MatLike | None = ..., - cornersQuality: cv2.typing.MatLike | None = ..., - blockSize: int = ..., - gradientSize: int = ..., - useHarrisDetector: bool = ..., - k: float = ..., -) -> tuple[ - cv2.typing.MatLike, - cv2.typing.MatLike, -]: ... -@typing.overload -def goodFeaturesToTrackWithQuality( - image: UMat, - maxCorners: int, - qualityLevel: float, - minDistance: float, - mask: UMat, - corners: UMat | None = ..., - cornersQuality: UMat | None = ..., - blockSize: int = ..., - gradientSize: int = ..., - useHarrisDetector: bool = ..., - k: float = ..., -) -> tuple[ - UMat, - UMat, -]: ... -@typing.overload -def grabCut( - img: cv2.typing.MatLike, - mask: cv2.typing.MatLike, - rect: cv2.typing.Rect, - bgdModel: cv2.typing.MatLike, - fgdModel: cv2.typing.MatLike, - iterCount: int, - mode: int = ..., -) -> tuple[ - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, -]: ... -@typing.overload -def grabCut( - img: UMat, - mask: UMat, - rect: cv2.typing.Rect, - bgdModel: UMat, - fgdModel: UMat, - iterCount: int, - mode: int = ..., -) -> tuple[ - UMat, - UMat, - UMat, -]: ... -def groupRectangles( - rectList: typing.Sequence[cv2.typing.Rect], - groupThreshold: int, - eps: float = ..., -) -> tuple[ - typing.Sequence[cv2.typing.Rect], - typing.Sequence[int], -]: ... -@typing.overload -def hasNonZero(src: cv2.typing.MatLike) -> bool: ... -@typing.overload -def hasNonZero(src: UMat) -> bool: ... -def haveImageReader(filename: str) -> bool: ... -def haveImageWriter(filename: str) -> bool: ... -def haveOpenVX() -> bool: ... -@typing.overload -def hconcat(src: typing.Sequence[cv2.typing.MatLike], dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... -@typing.overload -def hconcat(src: typing.Sequence[UMat], dst: UMat | None = ...) -> UMat: ... -@typing.overload -def idct(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., flags: int = ...) -> cv2.typing.MatLike: ... -@typing.overload -def idct(src: UMat, dst: UMat | None = ..., flags: int = ...) -> UMat: ... -@typing.overload -def idft( - src: cv2.typing.MatLike, - dst: cv2.typing.MatLike | None = ..., - flags: int = ..., - nonzeroRows: int = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def idft(src: UMat, dst: UMat | None = ..., flags: int = ..., nonzeroRows: int = ...) -> UMat: ... -@typing.overload -def illuminationChange( - src: cv2.typing.MatLike, - mask: cv2.typing.MatLike, - dst: cv2.typing.MatLike | None = ..., - alpha: float = ..., - beta: float = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def illuminationChange( - src: UMat, - mask: UMat, - dst: UMat | None = ..., - alpha: float = ..., - beta: float = ..., -) -> UMat: ... -def imcount(filename: str, flags: int = ...) -> int: ... -@typing.overload -def imdecode(buf: cv2.typing.MatLike, flags: int) -> cv2.typing.MatLike: ... -@typing.overload -def imdecode(buf: UMat, flags: int) -> cv2.typing.MatLike: ... -@typing.overload -def imdecodemulti( - buf: cv2.typing.MatLike, - flags: int, - mats: typing.Sequence[cv2.typing.MatLike] | None = ..., -) -> tuple[ - bool, - typing.Sequence[cv2.typing.MatLike], -]: ... -@typing.overload -def imdecodemulti( - buf: UMat, - flags: int, - mats: typing.Sequence[cv2.typing.MatLike] | None = ..., -) -> tuple[ - bool, - typing.Sequence[cv2.typing.MatLike], -]: ... -@typing.overload -def imencode( - ext: str, - img: cv2.typing.MatLike, - params: typing.Sequence[int] = ..., -) -> tuple[ - bool, - numpy.ndarray[ - typing.Any, - numpy.dtype[numpy.uint8], - ], -]: ... -@typing.overload -def imencode( - ext: str, - img: UMat, - params: typing.Sequence[int] = ..., -) -> tuple[ - bool, - numpy.ndarray[ - typing.Any, - numpy.dtype[numpy.uint8], - ], -]: ... -def imread(filename: str, flags: int = ...) -> cv2.typing.MatLike: ... -@typing.overload -def imreadmulti( - filename: str, - mats: typing.Sequence[cv2.typing.MatLike] | None = ..., - flags: int = ..., -) -> tuple[ - bool, - typing.Sequence[cv2.typing.MatLike], -]: ... -@typing.overload -def imreadmulti( - filename: str, - start: int, - count: int, - mats: typing.Sequence[cv2.typing.MatLike] | None = ..., - flags: int = ..., -) -> tuple[ - bool, - typing.Sequence[cv2.typing.MatLike], -]: ... -@typing.overload -def imshow(winname: str, mat: cv2.typing.MatLike) -> None: ... -@typing.overload -def imshow(winname: str, mat: cv2.cuda.GpuMat) -> None: ... -@typing.overload -def imshow(winname: str, mat: UMat) -> None: ... -@typing.overload -def imwrite(filename: str, img: cv2.typing.MatLike, params: typing.Sequence[int] = ...) -> bool: ... -@typing.overload -def imwrite(filename: str, img: UMat, params: typing.Sequence[int] = ...) -> bool: ... -@typing.overload -def imwritemulti( - filename: str, - img: typing.Sequence[cv2.typing.MatLike], - params: typing.Sequence[int] = ..., -) -> bool: ... -@typing.overload -def imwritemulti(filename: str, img: typing.Sequence[UMat], params: typing.Sequence[int] = ...) -> bool: ... -@typing.overload -def inRange( - src: cv2.typing.MatLike, - lowerb: cv2.typing.MatLike, - upperb: cv2.typing.MatLike, - dst: cv2.typing.MatLike | None = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def inRange(src: UMat, lowerb: UMat, upperb: UMat, dst: UMat | None = ...) -> UMat: ... -@typing.overload -def initCameraMatrix2D( - objectPoints: typing.Sequence[cv2.typing.MatLike], - imagePoints: typing.Sequence[cv2.typing.MatLike], - imageSize: cv2.typing.Size, - aspectRatio: float = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def initCameraMatrix2D( - objectPoints: typing.Sequence[UMat], - imagePoints: typing.Sequence[UMat], - imageSize: cv2.typing.Size, - aspectRatio: float = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def initInverseRectificationMap( - cameraMatrix: cv2.typing.MatLike, - distCoeffs: cv2.typing.MatLike, - R: cv2.typing.MatLike, - newCameraMatrix: cv2.typing.MatLike, - size: cv2.typing.Size, - m1type: int, - map1: cv2.typing.MatLike | None = ..., - map2: cv2.typing.MatLike | None = ..., -) -> tuple[ - cv2.typing.MatLike, - cv2.typing.MatLike, -]: ... -@typing.overload -def initInverseRectificationMap( - cameraMatrix: UMat, - distCoeffs: UMat, - R: UMat, - newCameraMatrix: UMat, - size: cv2.typing.Size, - m1type: int, - map1: UMat | None = ..., - map2: UMat | None = ..., -) -> tuple[ - UMat, - UMat, -]: ... -@typing.overload -def initUndistortRectifyMap( - cameraMatrix: cv2.typing.MatLike, - distCoeffs: cv2.typing.MatLike, - R: cv2.typing.MatLike, - newCameraMatrix: cv2.typing.MatLike, - size: cv2.typing.Size, - m1type: int, - map1: cv2.typing.MatLike | None = ..., - map2: cv2.typing.MatLike | None = ..., -) -> tuple[ - cv2.typing.MatLike, - cv2.typing.MatLike, -]: ... -@typing.overload -def initUndistortRectifyMap( - cameraMatrix: UMat, - distCoeffs: UMat, - R: UMat, - newCameraMatrix: UMat, - size: cv2.typing.Size, - m1type: int, - map1: UMat | None = ..., - map2: UMat | None = ..., -) -> tuple[ - UMat, - UMat, -]: ... -@typing.overload -def inpaint( - src: cv2.typing.MatLike, - inpaintMask: cv2.typing.MatLike, - inpaintRadius: float, - flags: int, - dst: cv2.typing.MatLike | None = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def inpaint(src: UMat, inpaintMask: UMat, inpaintRadius: float, flags: int, dst: UMat | None = ...) -> UMat: ... -@typing.overload -def insertChannel(src: cv2.typing.MatLike, dst: cv2.typing.MatLike, coi: int) -> cv2.typing.MatLike: ... -@typing.overload -def insertChannel(src: UMat, dst: UMat, coi: int) -> UMat: ... -@typing.overload -def integral( - src: cv2.typing.MatLike, - sum: cv2.typing.MatLike | None = ..., - sdepth: int = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def integral(src: UMat, sum: UMat | None = ..., sdepth: int = ...) -> UMat: ... -@typing.overload -def integral2( - src: cv2.typing.MatLike, - sum: cv2.typing.MatLike | None = ..., - sqsum: cv2.typing.MatLike | None = ..., - sdepth: int = ..., - sqdepth: int = ..., -) -> tuple[ - cv2.typing.MatLike, - cv2.typing.MatLike, -]: ... -@typing.overload -def integral2( - src: UMat, - sum: UMat | None = ..., - sqsum: UMat | None = ..., - sdepth: int = ..., - sqdepth: int = ..., -) -> tuple[ - UMat, - UMat, -]: ... -@typing.overload -def integral3( - src: cv2.typing.MatLike, - sum: cv2.typing.MatLike | None = ..., - sqsum: cv2.typing.MatLike | None = ..., - tilted: cv2.typing.MatLike | None = ..., - sdepth: int = ..., - sqdepth: int = ..., -) -> tuple[ - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, -]: ... -@typing.overload -def integral3( - src: UMat, - sum: UMat | None = ..., - sqsum: UMat | None = ..., - tilted: UMat | None = ..., - sdepth: int = ..., - sqdepth: int = ..., -) -> tuple[ - UMat, - UMat, - UMat, -]: ... -@typing.overload -def intersectConvexConvex( - p1: cv2.typing.MatLike, - p2: cv2.typing.MatLike, - p12: cv2.typing.MatLike | None = ..., - handleNested: bool = ..., -) -> tuple[ - float, - cv2.typing.MatLike, -]: ... -@typing.overload -def intersectConvexConvex( - p1: UMat, - p2: UMat, - p12: UMat | None = ..., - handleNested: bool = ..., -) -> tuple[ - float, - UMat, -]: ... -@typing.overload -def invert( - src: cv2.typing.MatLike, - dst: cv2.typing.MatLike | None = ..., - flags: int = ..., -) -> tuple[ - float, - cv2.typing.MatLike, -]: ... -@typing.overload -def invert(src: UMat, dst: UMat | None = ..., flags: int = ...) -> tuple[float, UMat]: ... -@typing.overload -def invertAffineTransform(M: cv2.typing.MatLike, iM: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... -@typing.overload -def invertAffineTransform(M: UMat, iM: UMat | None = ...) -> UMat: ... -@typing.overload -def isContourConvex(contour: cv2.typing.MatLike) -> bool: ... -@typing.overload -def isContourConvex(contour: UMat) -> bool: ... -@typing.overload -def kmeans( - data: cv2.typing.MatLike, - K: int, - bestLabels: cv2.typing.MatLike, - criteria: cv2.typing.TermCriteria, - attempts: int, - flags: int, - centers: cv2.typing.MatLike | None = ..., -) -> tuple[ - float, - cv2.typing.MatLike, - cv2.typing.MatLike, -]: ... -@typing.overload -def kmeans( - data: UMat, - K: int, - bestLabels: UMat, - criteria: cv2.typing.TermCriteria, - attempts: int, - flags: int, - centers: UMat | None = ..., -) -> tuple[ - float, - UMat, - UMat, -]: ... -@typing.overload -def line( - img: cv2.typing.MatLike, - pt1: cv2.typing.Point, - pt2: cv2.typing.Point, - color: cv2.typing.Scalar, - thickness: int = ..., - lineType: int = ..., - shift: int = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def line( - img: UMat, - pt1: cv2.typing.Point, - pt2: cv2.typing.Point, - color: cv2.typing.Scalar, - thickness: int = ..., - lineType: int = ..., - shift: int = ..., -) -> UMat: ... -@typing.overload -def linearPolar( - src: cv2.typing.MatLike, - center: cv2.typing.Point2f, - maxRadius: float, - flags: int, - dst: cv2.typing.MatLike | None = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def linearPolar( - src: UMat, - center: cv2.typing.Point2f, - maxRadius: float, - flags: int, - dst: UMat | None = ..., -) -> UMat: ... -@typing.overload -def log(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... -@typing.overload -def log(src: UMat, dst: UMat | None = ...) -> UMat: ... -@typing.overload -def logPolar( - src: cv2.typing.MatLike, - center: cv2.typing.Point2f, - M: float, - flags: int, - dst: cv2.typing.MatLike | None = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def logPolar(src: UMat, center: cv2.typing.Point2f, M: float, flags: int, dst: UMat | None = ...) -> UMat: ... -@typing.overload -def magnitude( - x: cv2.typing.MatLike, - y: cv2.typing.MatLike, - magnitude: cv2.typing.MatLike | None = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def magnitude(x: UMat, y: UMat, magnitude: UMat | None = ...) -> UMat: ... -@typing.overload -def matMulDeriv( - A: cv2.typing.MatLike, - B: cv2.typing.MatLike, - dABdA: cv2.typing.MatLike | None = ..., - dABdB: cv2.typing.MatLike | None = ..., -) -> tuple[ - cv2.typing.MatLike, - cv2.typing.MatLike, -]: ... -@typing.overload -def matMulDeriv(A: UMat, B: UMat, dABdA: UMat | None = ..., dABdB: UMat | None = ...) -> tuple[UMat, UMat]: ... -@typing.overload -def matchShapes(contour1: cv2.typing.MatLike, contour2: cv2.typing.MatLike, method: int, parameter: float) -> float: ... -@typing.overload -def matchShapes(contour1: UMat, contour2: UMat, method: int, parameter: float) -> float: ... -@typing.overload -def matchTemplate( - image: cv2.typing.MatLike, - templ: cv2.typing.MatLike, - method: int, - result: cv2.typing.MatLike | None = ..., - mask: cv2.typing.MatLike | None = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def matchTemplate( - image: UMat, - templ: UMat, - method: int, - result: UMat | None = ..., - mask: UMat | None = ..., -) -> UMat: ... -@typing.overload -def max( - src1: cv2.typing.MatLike, - src2: cv2.typing.MatLike, - dst: cv2.typing.MatLike | None = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def max(src1: UMat, src2: UMat, dst: UMat | None = ...) -> UMat: ... -@typing.overload -def mean(src: cv2.typing.MatLike, mask: cv2.typing.MatLike | None = ...) -> cv2.typing.Scalar: ... -@typing.overload -def mean(src: UMat, mask: UMat | None = ...) -> cv2.typing.Scalar: ... -@typing.overload -def meanShift( - probImage: cv2.typing.MatLike, - window: cv2.typing.Rect, - criteria: cv2.typing.TermCriteria, -) -> tuple[ - int, - cv2.typing.Rect, -]: ... -@typing.overload -def meanShift( - probImage: UMat, - window: cv2.typing.Rect, - criteria: cv2.typing.TermCriteria, -) -> tuple[ - int, - cv2.typing.Rect, -]: ... -@typing.overload -def meanStdDev( - src: cv2.typing.MatLike, - mean: cv2.typing.MatLike | None = ..., - stddev: cv2.typing.MatLike | None = ..., - mask: cv2.typing.MatLike | None = ..., -) -> tuple[ - cv2.typing.MatLike, - cv2.typing.MatLike, -]: ... -@typing.overload -def meanStdDev( - src: UMat, - mean: UMat | None = ..., - stddev: UMat | None = ..., - mask: UMat | None = ..., -) -> tuple[ - UMat, - UMat, -]: ... -@typing.overload -def medianBlur(src: cv2.typing.MatLike, ksize: int, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... -@typing.overload -def medianBlur(src: UMat, ksize: int, dst: UMat | None = ...) -> UMat: ... -@typing.overload -def merge(mv: typing.Sequence[cv2.typing.MatLike], dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... -@typing.overload -def merge(mv: typing.Sequence[UMat], dst: UMat | None = ...) -> UMat: ... -@typing.overload -def min( - src1: cv2.typing.MatLike, - src2: cv2.typing.MatLike, - dst: cv2.typing.MatLike | None = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def min(src1: UMat, src2: UMat, dst: UMat | None = ...) -> UMat: ... -@typing.overload -def minAreaRect(points: cv2.typing.MatLike) -> cv2.typing.RotatedRect: ... -@typing.overload -def minAreaRect(points: UMat) -> cv2.typing.RotatedRect: ... -@typing.overload -def minEnclosingCircle(points: cv2.typing.MatLike) -> tuple[cv2.typing.Point2f, float]: ... -@typing.overload -def minEnclosingCircle(points: UMat) -> tuple[cv2.typing.Point2f, float]: ... -@typing.overload -def minEnclosingTriangle( - points: cv2.typing.MatLike, - triangle: cv2.typing.MatLike | None = ..., -) -> tuple[ - float, - cv2.typing.MatLike, -]: ... -@typing.overload -def minEnclosingTriangle(points: UMat, triangle: UMat | None = ...) -> tuple[float, UMat]: ... -@typing.overload -def minMaxLoc( - src: cv2.typing.MatLike, - mask: cv2.typing.MatLike | None = ..., -) -> tuple[ - float, - float, - cv2.typing.Point, - cv2.typing.Point, -]: ... -@typing.overload -def minMaxLoc(src: UMat, mask: UMat | None = ...) -> tuple[float, float, cv2.typing.Point, cv2.typing.Point]: ... -@typing.overload -def mixChannels( - src: typing.Sequence[cv2.typing.MatLike], - dst: typing.Sequence[cv2.typing.MatLike], - fromTo: typing.Sequence[int], -) -> typing.Sequence[cv2.typing.MatLike]: ... -@typing.overload -def mixChannels( - src: typing.Sequence[UMat], - dst: typing.Sequence[UMat], - fromTo: typing.Sequence[int], -) -> typing.Sequence[UMat]: ... -@typing.overload -def moments(array: cv2.typing.MatLike, binaryImage: bool = ...) -> cv2.typing.Moments: ... -@typing.overload -def moments(array: UMat, binaryImage: bool = ...) -> cv2.typing.Moments: ... -@typing.overload -def morphologyEx( - src: cv2.typing.MatLike, - op: int, - kernel: cv2.typing.MatLike, - dst: cv2.typing.MatLike | None = ..., - anchor: cv2.typing.Point = ..., - iterations: int = ..., - borderType: int = ..., - borderValue: cv2.typing.Scalar = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def morphologyEx( - src: UMat, - op: int, - kernel: UMat, - dst: UMat | None = ..., - anchor: cv2.typing.Point = ..., - iterations: int = ..., - borderType: int = ..., - borderValue: cv2.typing.Scalar = ..., -) -> UMat: ... -def moveWindow(winname: str, x: int, y: int) -> None: ... -@typing.overload -def mulSpectrums( - a: cv2.typing.MatLike, - b: cv2.typing.MatLike, - flags: int, - c: cv2.typing.MatLike | None = ..., - conjB: bool = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def mulSpectrums(a: UMat, b: UMat, flags: int, c: UMat | None = ..., conjB: bool = ...) -> UMat: ... -@typing.overload -def mulTransposed( - src: cv2.typing.MatLike, - aTa: bool, - dst: cv2.typing.MatLike | None = ..., - delta: cv2.typing.MatLike | None = ..., - scale: float = ..., - dtype: int = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def mulTransposed( - src: UMat, - aTa: bool, - dst: UMat | None = ..., - delta: UMat | None = ..., - scale: float = ..., - dtype: int = ..., -) -> UMat: ... -@typing.overload -def multiply( - src1: cv2.typing.MatLike, - src2: cv2.typing.MatLike, - dst: cv2.typing.MatLike | None = ..., - scale: float = ..., - dtype: int = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def multiply(src1: UMat, src2: UMat, dst: UMat | None = ..., scale: float = ..., dtype: int = ...) -> UMat: ... -def namedWindow(winname: str, flags: int = ...) -> None: ... -@typing.overload -def norm(src1: cv2.typing.MatLike, normType: int = ..., mask: cv2.typing.MatLike | None = ...) -> float: ... -@typing.overload -def norm(src1: UMat, normType: int = ..., mask: UMat | None = ...) -> float: ... -@typing.overload -def norm( - src1: cv2.typing.MatLike, - src2: cv2.typing.MatLike, - normType: int = ..., - mask: cv2.typing.MatLike | None = ..., -) -> float: ... -@typing.overload -def norm(src1: UMat, src2: UMat, normType: int = ..., mask: UMat | None = ...) -> float: ... -@typing.overload -def normalize( - src: cv2.typing.MatLike, - dst: cv2.typing.MatLike, - alpha: float = ..., - beta: float = ..., - norm_type: int = ..., - dtype: int = ..., - mask: cv2.typing.MatLike | None = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def normalize( - src: UMat, - dst: UMat, - alpha: float = ..., - beta: float = ..., - norm_type: int = ..., - dtype: int = ..., - mask: UMat | None = ..., -) -> UMat: ... -@typing.overload -def patchNaNs(a: cv2.typing.MatLike, val: float = ...) -> cv2.typing.MatLike: ... -@typing.overload -def patchNaNs(a: UMat, val: float = ...) -> UMat: ... -@typing.overload -def pencilSketch( - src: cv2.typing.MatLike, - dst1: cv2.typing.MatLike | None = ..., - dst2: cv2.typing.MatLike | None = ..., - sigma_s: float = ..., - sigma_r: float = ..., - shade_factor: float = ..., -) -> tuple[ - cv2.typing.MatLike, - cv2.typing.MatLike, -]: ... -@typing.overload -def pencilSketch( - src: UMat, - dst1: UMat | None = ..., - dst2: UMat | None = ..., - sigma_s: float = ..., - sigma_r: float = ..., - shade_factor: float = ..., -) -> tuple[ - UMat, - UMat, -]: ... -@typing.overload -def perspectiveTransform( - src: cv2.typing.MatLike, - m: cv2.typing.MatLike, - dst: cv2.typing.MatLike | None = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def perspectiveTransform(src: UMat, m: UMat, dst: UMat | None = ...) -> UMat: ... -@typing.overload -def phase( - x: cv2.typing.MatLike, - y: cv2.typing.MatLike, - angle: cv2.typing.MatLike | None = ..., - angleInDegrees: bool = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def phase(x: UMat, y: UMat, angle: UMat | None = ..., angleInDegrees: bool = ...) -> UMat: ... -@typing.overload -def phaseCorrelate( - src1: cv2.typing.MatLike, - src2: cv2.typing.MatLike, - window: cv2.typing.MatLike | None = ..., -) -> tuple[ - cv2.typing.Point2d, - float, -]: ... -@typing.overload -def phaseCorrelate(src1: UMat, src2: UMat, window: UMat | None = ...) -> tuple[cv2.typing.Point2d, float]: ... -@typing.overload -def pointPolygonTest(contour: cv2.typing.MatLike, pt: cv2.typing.Point2f, measureDist: bool) -> float: ... -@typing.overload -def pointPolygonTest(contour: UMat, pt: cv2.typing.Point2f, measureDist: bool) -> float: ... -@typing.overload -def polarToCart( - magnitude: cv2.typing.MatLike, - angle: cv2.typing.MatLike, - x: cv2.typing.MatLike | None = ..., - y: cv2.typing.MatLike | None = ..., - angleInDegrees: bool = ..., -) -> tuple[ - cv2.typing.MatLike, - cv2.typing.MatLike, -]: ... -@typing.overload -def polarToCart( - magnitude: UMat, - angle: UMat, - x: UMat | None = ..., - y: UMat | None = ..., - angleInDegrees: bool = ..., -) -> tuple[ - UMat, - UMat, -]: ... -def pollKey() -> int: ... -@typing.overload -def polylines( - img: cv2.typing.MatLike, - pts: typing.Sequence[cv2.typing.MatLike], - isClosed: bool, - color: cv2.typing.Scalar, - thickness: int = ..., - lineType: int = ..., - shift: int = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def polylines( - img: UMat, - pts: typing.Sequence[UMat], - isClosed: bool, - color: cv2.typing.Scalar, - thickness: int = ..., - lineType: int = ..., - shift: int = ..., -) -> UMat: ... -@typing.overload -def pow(src: cv2.typing.MatLike, power: float, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... -@typing.overload -def pow(src: UMat, power: float, dst: UMat | None = ...) -> UMat: ... -@typing.overload -def preCornerDetect( - src: cv2.typing.MatLike, - ksize: int, - dst: cv2.typing.MatLike | None = ..., - borderType: int = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def preCornerDetect(src: UMat, ksize: int, dst: UMat | None = ..., borderType: int = ...) -> UMat: ... -@typing.overload -def projectPoints( - objectPoints: cv2.typing.MatLike, - rvec: cv2.typing.MatLike, - tvec: cv2.typing.MatLike, - cameraMatrix: cv2.typing.MatLike, - distCoeffs: cv2.typing.MatLike, - imagePoints: cv2.typing.MatLike | None = ..., - jacobian: cv2.typing.MatLike | None = ..., - aspectRatio: float = ..., -) -> tuple[ - cv2.typing.MatLike, - cv2.typing.MatLike, -]: ... -@typing.overload -def projectPoints( - objectPoints: UMat, - rvec: UMat, - tvec: UMat, - cameraMatrix: UMat, - distCoeffs: UMat, - imagePoints: UMat | None = ..., - jacobian: UMat | None = ..., - aspectRatio: float = ..., -) -> tuple[ - UMat, - UMat, -]: ... -@typing.overload -def putText( - img: cv2.typing.MatLike, - text: str, - org: cv2.typing.Point, - fontFace: int, - fontScale: float, - color: cv2.typing.Scalar, - thickness: int = ..., - lineType: int = ..., - bottomLeftOrigin: bool = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def putText( - img: UMat, - text: str, - org: cv2.typing.Point, - fontFace: int, - fontScale: float, - color: cv2.typing.Scalar, - thickness: int = ..., - lineType: int = ..., - bottomLeftOrigin: bool = ..., -) -> UMat: ... -@typing.overload -def pyrDown( - src: cv2.typing.MatLike, - dst: cv2.typing.MatLike | None = ..., - dstsize: cv2.typing.Size = ..., - borderType: int = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def pyrDown(src: UMat, dst: UMat | None = ..., dstsize: cv2.typing.Size = ..., borderType: int = ...) -> UMat: ... -@typing.overload -def pyrMeanShiftFiltering( - src: cv2.typing.MatLike, - sp: float, - sr: float, - dst: cv2.typing.MatLike | None = ..., - maxLevel: int = ..., - termcrit: cv2.typing.TermCriteria = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def pyrMeanShiftFiltering( - src: UMat, - sp: float, - sr: float, - dst: UMat | None = ..., - maxLevel: int = ..., - termcrit: cv2.typing.TermCriteria = ..., -) -> UMat: ... -@typing.overload -def pyrUp( - src: cv2.typing.MatLike, - dst: cv2.typing.MatLike | None = ..., - dstsize: cv2.typing.Size = ..., - borderType: int = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def pyrUp(src: UMat, dst: UMat | None = ..., dstsize: cv2.typing.Size = ..., borderType: int = ...) -> UMat: ... -@typing.overload -def randShuffle(dst: cv2.typing.MatLike, iterFactor: float = ...) -> cv2.typing.MatLike: ... -@typing.overload -def randShuffle(dst: UMat, iterFactor: float = ...) -> UMat: ... -@typing.overload -def randn(dst: cv2.typing.MatLike, mean: cv2.typing.MatLike, stddev: cv2.typing.MatLike) -> cv2.typing.MatLike: ... -@typing.overload -def randn(dst: UMat, mean: UMat, stddev: UMat) -> UMat: ... -@typing.overload -def randu(dst: cv2.typing.MatLike, low: cv2.typing.MatLike, high: cv2.typing.MatLike) -> cv2.typing.MatLike: ... -@typing.overload -def randu(dst: UMat, low: UMat, high: UMat) -> UMat: ... -def readOpticalFlow(path: str) -> cv2.typing.MatLike: ... -@typing.overload -def recoverPose( - points1: cv2.typing.MatLike, - points2: cv2.typing.MatLike, - cameraMatrix1: cv2.typing.MatLike, - distCoeffs1: cv2.typing.MatLike, - cameraMatrix2: cv2.typing.MatLike, - distCoeffs2: cv2.typing.MatLike, - E: cv2.typing.MatLike | None = ..., - R: cv2.typing.MatLike | None = ..., - t: cv2.typing.MatLike | None = ..., - method: int = ..., - prob: float = ..., - threshold: float = ..., - mask: cv2.typing.MatLike | None = ..., -) -> tuple[ - int, - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, -]: ... -@typing.overload -def recoverPose( - points1: UMat, - points2: UMat, - cameraMatrix1: UMat, - distCoeffs1: UMat, - cameraMatrix2: UMat, - distCoeffs2: UMat, - E: UMat | None = ..., - R: UMat | None = ..., - t: UMat | None = ..., - method: int = ..., - prob: float = ..., - threshold: float = ..., - mask: UMat | None = ..., -) -> tuple[ - int, - UMat, - UMat, - UMat, - UMat, -]: ... -@typing.overload -def recoverPose( - E: cv2.typing.MatLike, - points1: cv2.typing.MatLike, - points2: cv2.typing.MatLike, - cameraMatrix: cv2.typing.MatLike, - R: cv2.typing.MatLike | None = ..., - t: cv2.typing.MatLike | None = ..., - mask: cv2.typing.MatLike | None = ..., -) -> tuple[ - int, - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, -]: ... -@typing.overload -def recoverPose( - E: UMat, - points1: UMat, - points2: UMat, - cameraMatrix: UMat, - R: UMat | None = ..., - t: UMat | None = ..., - mask: UMat | None = ..., -) -> tuple[ - int, - UMat, - UMat, - UMat, -]: ... -@typing.overload -def recoverPose( - E: cv2.typing.MatLike, - points1: cv2.typing.MatLike, - points2: cv2.typing.MatLike, - R: cv2.typing.MatLike | None = ..., - t: cv2.typing.MatLike | None = ..., - focal: float = ..., - pp: cv2.typing.Point2d = ..., - mask: cv2.typing.MatLike | None = ..., -) -> tuple[ - int, - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, -]: ... -@typing.overload -def recoverPose( - E: UMat, - points1: UMat, - points2: UMat, - R: UMat | None = ..., - t: UMat | None = ..., - focal: float = ..., - pp: cv2.typing.Point2d = ..., - mask: UMat | None = ..., -) -> tuple[ - int, - UMat, - UMat, - UMat, -]: ... -@typing.overload -def recoverPose( - E: cv2.typing.MatLike, - points1: cv2.typing.MatLike, - points2: cv2.typing.MatLike, - cameraMatrix: cv2.typing.MatLike, - distanceThresh: float, - R: cv2.typing.MatLike | None = ..., - t: cv2.typing.MatLike | None = ..., - mask: cv2.typing.MatLike | None = ..., - triangulatedPoints: cv2.typing.MatLike | None = ..., -) -> tuple[ - int, - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, -]: ... -@typing.overload -def recoverPose( - E: UMat, - points1: UMat, - points2: UMat, - cameraMatrix: UMat, - distanceThresh: float, - R: UMat | None = ..., - t: UMat | None = ..., - mask: UMat | None = ..., - triangulatedPoints: UMat | None = ..., -) -> tuple[ - int, - UMat, - UMat, - UMat, - UMat, -]: ... -@typing.overload -def rectangle( - img: cv2.typing.MatLike, - pt1: cv2.typing.Point, - pt2: cv2.typing.Point, - color: cv2.typing.Scalar, - thickness: int = ..., - lineType: int = ..., - shift: int = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def rectangle( - img: UMat, - pt1: cv2.typing.Point, - pt2: cv2.typing.Point, - color: cv2.typing.Scalar, - thickness: int = ..., - lineType: int = ..., - shift: int = ..., -) -> UMat: ... -@typing.overload -def rectangle( - img: cv2.typing.MatLike, - rec: cv2.typing.Rect, - color: cv2.typing.Scalar, - thickness: int = ..., - lineType: int = ..., - shift: int = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def rectangle( - img: UMat, - rec: cv2.typing.Rect, - color: cv2.typing.Scalar, - thickness: int = ..., - lineType: int = ..., - shift: int = ..., -) -> UMat: ... -def rectangleIntersectionArea(a: cv2.typing.Rect2d, b: cv2.typing.Rect2d) -> float: ... -@typing.overload -def rectify3Collinear( - cameraMatrix1: cv2.typing.MatLike, - distCoeffs1: cv2.typing.MatLike, - cameraMatrix2: cv2.typing.MatLike, - distCoeffs2: cv2.typing.MatLike, - cameraMatrix3: cv2.typing.MatLike, - distCoeffs3: cv2.typing.MatLike, - imgpt1: typing.Sequence[cv2.typing.MatLike], - imgpt3: typing.Sequence[cv2.typing.MatLike], - imageSize: cv2.typing.Size, - R12: cv2.typing.MatLike, - T12: cv2.typing.MatLike, - R13: cv2.typing.MatLike, - T13: cv2.typing.MatLike, - alpha: float, - newImgSize: cv2.typing.Size, - flags: int, - R1: cv2.typing.MatLike | None = ..., - R2: cv2.typing.MatLike | None = ..., - R3: cv2.typing.MatLike | None = ..., - P1: cv2.typing.MatLike | None = ..., - P2: cv2.typing.MatLike | None = ..., - P3: cv2.typing.MatLike | None = ..., - Q: cv2.typing.MatLike | None = ..., -) -> tuple[ - float, - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.Rect, - cv2.typing.Rect, -]: ... -@typing.overload -def rectify3Collinear( - cameraMatrix1: UMat, - distCoeffs1: UMat, - cameraMatrix2: UMat, - distCoeffs2: UMat, - cameraMatrix3: UMat, - distCoeffs3: UMat, - imgpt1: typing.Sequence[UMat], - imgpt3: typing.Sequence[UMat], - imageSize: cv2.typing.Size, - R12: UMat, - T12: UMat, - R13: UMat, - T13: UMat, - alpha: float, - newImgSize: cv2.typing.Size, - flags: int, - R1: UMat | None = ..., - R2: UMat | None = ..., - R3: UMat | None = ..., - P1: UMat | None = ..., - P2: UMat | None = ..., - P3: UMat | None = ..., - Q: UMat | None = ..., -) -> tuple[ - float, - UMat, - UMat, - UMat, - UMat, - UMat, - UMat, - UMat, - cv2.typing.Rect, - cv2.typing.Rect, -]: ... -@typing.overload -def reduce( - src: cv2.typing.MatLike, - dim: int, - rtype: int, - dst: cv2.typing.MatLike | None = ..., - dtype: int = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def reduce(src: UMat, dim: int, rtype: int, dst: UMat | None = ..., dtype: int = ...) -> UMat: ... -@typing.overload -def reduceArgMax( - src: cv2.typing.MatLike, - axis: int, - dst: cv2.typing.MatLike | None = ..., - lastIndex: bool = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def reduceArgMax(src: UMat, axis: int, dst: UMat | None = ..., lastIndex: bool = ...) -> UMat: ... -@typing.overload -def reduceArgMin( - src: cv2.typing.MatLike, - axis: int, - dst: cv2.typing.MatLike | None = ..., - lastIndex: bool = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def reduceArgMin(src: UMat, axis: int, dst: UMat | None = ..., lastIndex: bool = ...) -> UMat: ... -@typing.overload -def remap( - src: cv2.typing.MatLike, - map1: cv2.typing.MatLike, - map2: cv2.typing.MatLike, - interpolation: int, - dst: cv2.typing.MatLike | None = ..., - borderMode: int = ..., - borderValue: cv2.typing.Scalar = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def remap( - src: UMat, - map1: UMat, - map2: UMat, - interpolation: int, - dst: UMat | None = ..., - borderMode: int = ..., - borderValue: cv2.typing.Scalar = ..., -) -> UMat: ... -@typing.overload -def repeat(src: cv2.typing.MatLike, ny: int, nx: int, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... -@typing.overload -def repeat(src: UMat, ny: int, nx: int, dst: UMat | None = ...) -> UMat: ... -@typing.overload -def reprojectImageTo3D( - disparity: cv2.typing.MatLike, - Q: cv2.typing.MatLike, - _3dImage: cv2.typing.MatLike | None = ..., - handleMissingValues: bool = ..., - ddepth: int = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def reprojectImageTo3D( - disparity: UMat, - Q: UMat, - _3dImage: UMat | None = ..., - handleMissingValues: bool = ..., - ddepth: int = ..., -) -> UMat: ... -@typing.overload -def resize( - src: cv2.typing.MatLike, - dsize: cv2.typing.Size | None, - dst: cv2.typing.MatLike | None = ..., - fx: float = ..., - fy: float = ..., - interpolation: int = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def resize( - src: UMat, - dsize: cv2.typing.Size | None, - dst: UMat | None = ..., - fx: float = ..., - fy: float = ..., - interpolation: int = ..., -) -> UMat: ... -@typing.overload -def resizeWindow(winname: str, width: int, height: int) -> None: ... -@typing.overload -def resizeWindow(winname: str, size: cv2.typing.Size) -> None: ... -@typing.overload -def rotate(src: cv2.typing.MatLike, rotateCode: int, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... -@typing.overload -def rotate(src: UMat, rotateCode: int, dst: UMat | None = ...) -> UMat: ... -@typing.overload -def rotatedRectangleIntersection( - rect1: cv2.typing.RotatedRect, - rect2: cv2.typing.RotatedRect, - intersectingRegion: cv2.typing.MatLike | None = ..., -) -> tuple[ - int, - cv2.typing.MatLike, -]: ... -@typing.overload -def rotatedRectangleIntersection( - rect1: cv2.typing.RotatedRect, - rect2: cv2.typing.RotatedRect, - intersectingRegion: UMat | None = ..., -) -> tuple[ - int, - UMat, -]: ... -@typing.overload -def sampsonDistance(pt1: cv2.typing.MatLike, pt2: cv2.typing.MatLike, F: cv2.typing.MatLike) -> float: ... -@typing.overload -def sampsonDistance(pt1: UMat, pt2: UMat, F: UMat) -> float: ... -@typing.overload -def scaleAdd( - src1: cv2.typing.MatLike, - alpha: float, - src2: cv2.typing.MatLike, - dst: cv2.typing.MatLike | None = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def scaleAdd(src1: UMat, alpha: float, src2: UMat, dst: UMat | None = ...) -> UMat: ... -@typing.overload -def seamlessClone( - src: cv2.typing.MatLike, - dst: cv2.typing.MatLike, - mask: cv2.typing.MatLike, - p: cv2.typing.Point, - flags: int, - blend: cv2.typing.MatLike | None = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def seamlessClone( - src: UMat, - dst: UMat, - mask: UMat, - p: cv2.typing.Point, - flags: int, - blend: UMat | None = ..., -) -> UMat: ... -@typing.overload -def selectROI( - windowName: str, - img: cv2.typing.MatLike, - showCrosshair: bool = ..., - fromCenter: bool = ..., - printNotice: bool = ..., -) -> cv2.typing.Rect: ... -@typing.overload -def selectROI( - windowName: str, - img: UMat, - showCrosshair: bool = ..., - fromCenter: bool = ..., - printNotice: bool = ..., -) -> cv2.typing.Rect: ... -@typing.overload -def selectROI( - img: cv2.typing.MatLike, - showCrosshair: bool = ..., - fromCenter: bool = ..., - printNotice: bool = ..., -) -> cv2.typing.Rect: ... -@typing.overload -def selectROI( - img: UMat, - showCrosshair: bool = ..., - fromCenter: bool = ..., - printNotice: bool = ..., -) -> cv2.typing.Rect: ... -@typing.overload -def selectROIs( - windowName: str, - img: cv2.typing.MatLike, - showCrosshair: bool = ..., - fromCenter: bool = ..., - printNotice: bool = ..., -) -> typing.Sequence[cv2.typing.Rect]: ... -@typing.overload -def selectROIs( - windowName: str, - img: UMat, - showCrosshair: bool = ..., - fromCenter: bool = ..., - printNotice: bool = ..., -) -> typing.Sequence[cv2.typing.Rect]: ... -@typing.overload -def sepFilter2D( - src: cv2.typing.MatLike, - ddepth: int, - kernelX: cv2.typing.MatLike, - kernelY: cv2.typing.MatLike, - dst: cv2.typing.MatLike | None = ..., - anchor: cv2.typing.Point = ..., - delta: float = ..., - borderType: int = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def sepFilter2D( - src: UMat, - ddepth: int, - kernelX: UMat, - kernelY: UMat, - dst: UMat | None = ..., - anchor: cv2.typing.Point = ..., - delta: float = ..., - borderType: int = ..., -) -> UMat: ... -@typing.overload -def setIdentity(mtx: cv2.typing.MatLike, s: cv2.typing.Scalar = ...) -> cv2.typing.MatLike: ... -@typing.overload -def setIdentity(mtx: UMat, s: cv2.typing.Scalar = ...) -> UMat: ... -def setLogLevel(level: int) -> int: ... -def setNumThreads(nthreads: int) -> None: ... -def setRNGSeed(seed: int) -> None: ... -def setTrackbarMax(trackbarname: str, winname: str, maxval: int) -> None: ... -def setTrackbarMin(trackbarname: str, winname: str, minval: int) -> None: ... -def setTrackbarPos(trackbarname: str, winname: str, pos: int) -> None: ... -def setUseOpenVX(flag: bool) -> None: ... -def setUseOptimized(onoff: bool) -> None: ... -def setWindowProperty(winname: str, prop_id: int, prop_value: float) -> None: ... -def setWindowTitle(winname: str, title: str) -> None: ... -@typing.overload -def solve( - src1: cv2.typing.MatLike, - src2: cv2.typing.MatLike, - dst: cv2.typing.MatLike | None = ..., - flags: int = ..., -) -> tuple[ - bool, - cv2.typing.MatLike, -]: ... -@typing.overload -def solve(src1: UMat, src2: UMat, dst: UMat | None = ..., flags: int = ...) -> tuple[bool, UMat]: ... -@typing.overload -def solveCubic( - coeffs: cv2.typing.MatLike, - roots: cv2.typing.MatLike | None = ..., -) -> tuple[ - int, - cv2.typing.MatLike, -]: ... -@typing.overload -def solveCubic(coeffs: UMat, roots: UMat | None = ...) -> tuple[int, UMat]: ... -@typing.overload -def solveLP( - Func: cv2.typing.MatLike, - Constr: cv2.typing.MatLike, - constr_eps: float, - z: cv2.typing.MatLike | None = ..., -) -> tuple[ - int, - cv2.typing.MatLike, -]: ... -@typing.overload -def solveLP(Func: UMat, Constr: UMat, constr_eps: float, z: UMat | None = ...) -> tuple[int, UMat]: ... -@typing.overload -def solveLP( - Func: cv2.typing.MatLike, - Constr: cv2.typing.MatLike, - z: cv2.typing.MatLike | None = ..., -) -> tuple[ - int, - cv2.typing.MatLike, -]: ... -@typing.overload -def solveLP(Func: UMat, Constr: UMat, z: UMat | None = ...) -> tuple[int, UMat]: ... -@typing.overload -def solveP3P( - objectPoints: cv2.typing.MatLike, - imagePoints: cv2.typing.MatLike, - cameraMatrix: cv2.typing.MatLike, - distCoeffs: cv2.typing.MatLike, - flags: int, - rvecs: typing.Sequence[cv2.typing.MatLike] | None = ..., - tvecs: typing.Sequence[cv2.typing.MatLike] | None = ..., -) -> tuple[ - int, - typing.Sequence[cv2.typing.MatLike], - typing.Sequence[cv2.typing.MatLike], -]: ... -@typing.overload -def solveP3P( - objectPoints: UMat, - imagePoints: UMat, - cameraMatrix: UMat, - distCoeffs: UMat, - flags: int, - rvecs: typing.Sequence[UMat] | None = ..., - tvecs: typing.Sequence[UMat] | None = ..., -) -> tuple[ - int, - typing.Sequence[UMat], - typing.Sequence[UMat], -]: ... -@typing.overload -def solvePnP( - objectPoints: cv2.typing.MatLike, - imagePoints: cv2.typing.MatLike, - cameraMatrix: cv2.typing.MatLike, - distCoeffs: cv2.typing.MatLike, - rvec: cv2.typing.MatLike | None = ..., - tvec: cv2.typing.MatLike | None = ..., - useExtrinsicGuess: bool = ..., - flags: int = ..., -) -> tuple[ - bool, - cv2.typing.MatLike, - cv2.typing.MatLike, -]: ... -@typing.overload -def solvePnP( - objectPoints: UMat, - imagePoints: UMat, - cameraMatrix: UMat, - distCoeffs: UMat, - rvec: UMat | None = ..., - tvec: UMat | None = ..., - useExtrinsicGuess: bool = ..., - flags: int = ..., -) -> tuple[ - bool, - UMat, - UMat, -]: ... -@typing.overload -def solvePnPGeneric( - objectPoints: cv2.typing.MatLike, - imagePoints: cv2.typing.MatLike, - cameraMatrix: cv2.typing.MatLike, - distCoeffs: cv2.typing.MatLike, - rvecs: typing.Sequence[cv2.typing.MatLike] | None = ..., - tvecs: typing.Sequence[cv2.typing.MatLike] | None = ..., - useExtrinsicGuess: bool = ..., - flags: SolvePnPMethod = ..., - rvec: cv2.typing.MatLike | None = ..., - tvec: cv2.typing.MatLike | None = ..., - reprojectionError: cv2.typing.MatLike | None = ..., -) -> tuple[ - int, - typing.Sequence[cv2.typing.MatLike], - typing.Sequence[cv2.typing.MatLike], - cv2.typing.MatLike, -]: ... -@typing.overload -def solvePnPGeneric( - objectPoints: UMat, - imagePoints: UMat, - cameraMatrix: UMat, - distCoeffs: UMat, - rvecs: typing.Sequence[UMat] | None = ..., - tvecs: typing.Sequence[UMat] | None = ..., - useExtrinsicGuess: bool = ..., - flags: SolvePnPMethod = ..., - rvec: UMat | None = ..., - tvec: UMat | None = ..., - reprojectionError: UMat | None = ..., -) -> tuple[ - int, - typing.Sequence[UMat], - typing.Sequence[UMat], - UMat, -]: ... -@typing.overload -def solvePnPRansac( - objectPoints: cv2.typing.MatLike, - imagePoints: cv2.typing.MatLike, - cameraMatrix: cv2.typing.MatLike, - distCoeffs: cv2.typing.MatLike, - rvec: cv2.typing.MatLike | None = ..., - tvec: cv2.typing.MatLike | None = ..., - useExtrinsicGuess: bool = ..., - iterationsCount: int = ..., - reprojectionError: float = ..., - confidence: float = ..., - inliers: cv2.typing.MatLike | None = ..., - flags: int = ..., -) -> tuple[ - bool, - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, -]: ... -@typing.overload -def solvePnPRansac( - objectPoints: UMat, - imagePoints: UMat, - cameraMatrix: UMat, - distCoeffs: UMat, - rvec: UMat | None = ..., - tvec: UMat | None = ..., - useExtrinsicGuess: bool = ..., - iterationsCount: int = ..., - reprojectionError: float = ..., - confidence: float = ..., - inliers: UMat | None = ..., - flags: int = ..., -) -> tuple[ - bool, - UMat, - UMat, - UMat, -]: ... -@typing.overload -def solvePnPRansac( - objectPoints: cv2.typing.MatLike, - imagePoints: cv2.typing.MatLike, - cameraMatrix: cv2.typing.MatLike, - distCoeffs: cv2.typing.MatLike, - rvec: cv2.typing.MatLike | None = ..., - tvec: cv2.typing.MatLike | None = ..., - inliers: cv2.typing.MatLike | None = ..., - params: UsacParams = ..., -) -> tuple[ - bool, - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, -]: ... -@typing.overload -def solvePnPRansac( - objectPoints: UMat, - imagePoints: UMat, - cameraMatrix: UMat, - distCoeffs: UMat, - rvec: UMat | None = ..., - tvec: UMat | None = ..., - inliers: UMat | None = ..., - params: UsacParams = ..., -) -> tuple[ - bool, - UMat, - UMat, - UMat, - UMat, -]: ... -@typing.overload -def solvePnPRefineLM( - objectPoints: cv2.typing.MatLike, - imagePoints: cv2.typing.MatLike, - cameraMatrix: cv2.typing.MatLike, - distCoeffs: cv2.typing.MatLike, - rvec: cv2.typing.MatLike, - tvec: cv2.typing.MatLike, - criteria: cv2.typing.TermCriteria = ..., -) -> tuple[ - cv2.typing.MatLike, - cv2.typing.MatLike, -]: ... -@typing.overload -def solvePnPRefineLM( - objectPoints: UMat, - imagePoints: UMat, - cameraMatrix: UMat, - distCoeffs: UMat, - rvec: UMat, - tvec: UMat, - criteria: cv2.typing.TermCriteria = ..., -) -> tuple[ - UMat, - UMat, -]: ... -@typing.overload -def solvePnPRefineVVS( - objectPoints: cv2.typing.MatLike, - imagePoints: cv2.typing.MatLike, - cameraMatrix: cv2.typing.MatLike, - distCoeffs: cv2.typing.MatLike, - rvec: cv2.typing.MatLike, - tvec: cv2.typing.MatLike, - criteria: cv2.typing.TermCriteria = ..., - VVSlambda: float = ..., -) -> tuple[ - cv2.typing.MatLike, - cv2.typing.MatLike, -]: ... -@typing.overload -def solvePnPRefineVVS( - objectPoints: UMat, - imagePoints: UMat, - cameraMatrix: UMat, - distCoeffs: UMat, - rvec: UMat, - tvec: UMat, - criteria: cv2.typing.TermCriteria = ..., - VVSlambda: float = ..., -) -> tuple[ - UMat, - UMat, -]: ... -@typing.overload -def solvePoly( - coeffs: cv2.typing.MatLike, - roots: cv2.typing.MatLike | None = ..., - maxIters: int = ..., -) -> tuple[ - float, - cv2.typing.MatLike, -]: ... -@typing.overload -def solvePoly(coeffs: UMat, roots: UMat | None = ..., maxIters: int = ...) -> tuple[float, UMat]: ... -@typing.overload -def sort(src: cv2.typing.MatLike, flags: int, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... -@typing.overload -def sort(src: UMat, flags: int, dst: UMat | None = ...) -> UMat: ... -@typing.overload -def sortIdx(src: cv2.typing.MatLike, flags: int, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... -@typing.overload -def sortIdx(src: UMat, flags: int, dst: UMat | None = ...) -> UMat: ... -@typing.overload -def spatialGradient( - src: cv2.typing.MatLike, - dx: cv2.typing.MatLike | None = ..., - dy: cv2.typing.MatLike | None = ..., - ksize: int = ..., - borderType: int = ..., -) -> tuple[ - cv2.typing.MatLike, - cv2.typing.MatLike, -]: ... -@typing.overload -def spatialGradient( - src: UMat, - dx: UMat | None = ..., - dy: UMat | None = ..., - ksize: int = ..., - borderType: int = ..., -) -> tuple[ - UMat, - UMat, -]: ... -@typing.overload -def split( - m: cv2.typing.MatLike, - mv: typing.Sequence[cv2.typing.MatLike] | None = ..., -) -> typing.Sequence[cv2.typing.MatLike]: ... -@typing.overload -def split(m: UMat, mv: typing.Sequence[UMat] | None = ...) -> typing.Sequence[UMat]: ... -@typing.overload -def sqrBoxFilter( - src: cv2.typing.MatLike, - ddepth: int, - ksize: cv2.typing.Size, - dst: cv2.typing.MatLike | None = ..., - anchor: cv2.typing.Point = ..., - normalize: bool = ..., - borderType: int = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def sqrBoxFilter( - src: UMat, - ddepth: int, - ksize: cv2.typing.Size, - dst: UMat | None = ..., - anchor: cv2.typing.Point = ..., - normalize: bool = ..., - borderType: int = ..., -) -> UMat: ... -@typing.overload -def sqrt(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... -@typing.overload -def sqrt(src: UMat, dst: UMat | None = ...) -> UMat: ... -@typing.overload -def stackBlur( - src: cv2.typing.MatLike, - ksize: cv2.typing.Size, - dst: cv2.typing.MatLike | None = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def stackBlur(src: UMat, ksize: cv2.typing.Size, dst: UMat | None = ...) -> UMat: ... -def startWindowThread() -> int: ... -@typing.overload -def stereoCalibrate( - objectPoints: typing.Sequence[cv2.typing.MatLike], - imagePoints1: typing.Sequence[cv2.typing.MatLike], - imagePoints2: typing.Sequence[cv2.typing.MatLike], - cameraMatrix1: cv2.typing.MatLike, - distCoeffs1: cv2.typing.MatLike, - cameraMatrix2: cv2.typing.MatLike, - distCoeffs2: cv2.typing.MatLike, - imageSize: cv2.typing.Size, - R: cv2.typing.MatLike | None = ..., - T: cv2.typing.MatLike | None = ..., - E: cv2.typing.MatLike | None = ..., - F: cv2.typing.MatLike | None = ..., - flags: int = ..., - criteria: cv2.typing.TermCriteria = ..., -) -> tuple[ - float, - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, -]: ... -@typing.overload -def stereoCalibrate( - objectPoints: typing.Sequence[UMat], - imagePoints1: typing.Sequence[UMat], - imagePoints2: typing.Sequence[UMat], - cameraMatrix1: UMat, - distCoeffs1: UMat, - cameraMatrix2: UMat, - distCoeffs2: UMat, - imageSize: cv2.typing.Size, - R: UMat | None = ..., - T: UMat | None = ..., - E: UMat | None = ..., - F: UMat | None = ..., - flags: int = ..., - criteria: cv2.typing.TermCriteria = ..., -) -> tuple[ - float, - UMat, - UMat, - UMat, - UMat, - UMat, - UMat, - UMat, - UMat, -]: ... -@typing.overload -def stereoCalibrate( - objectPoints: typing.Sequence[cv2.typing.MatLike], - imagePoints1: typing.Sequence[cv2.typing.MatLike], - imagePoints2: typing.Sequence[cv2.typing.MatLike], - cameraMatrix1: cv2.typing.MatLike, - distCoeffs1: cv2.typing.MatLike, - cameraMatrix2: cv2.typing.MatLike, - distCoeffs2: cv2.typing.MatLike, - imageSize: cv2.typing.Size, - R: cv2.typing.MatLike, - T: cv2.typing.MatLike, - E: cv2.typing.MatLike | None = ..., - F: cv2.typing.MatLike | None = ..., - perViewErrors: cv2.typing.MatLike | None = ..., - flags: int = ..., - criteria: cv2.typing.TermCriteria = ..., -) -> tuple[ - float, - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, -]: ... -@typing.overload -def stereoCalibrate( - objectPoints: typing.Sequence[UMat], - imagePoints1: typing.Sequence[UMat], - imagePoints2: typing.Sequence[UMat], - cameraMatrix1: UMat, - distCoeffs1: UMat, - cameraMatrix2: UMat, - distCoeffs2: UMat, - imageSize: cv2.typing.Size, - R: UMat, - T: UMat, - E: UMat | None = ..., - F: UMat | None = ..., - perViewErrors: UMat | None = ..., - flags: int = ..., - criteria: cv2.typing.TermCriteria = ..., -) -> tuple[ - float, - UMat, - UMat, - UMat, - UMat, - UMat, - UMat, - UMat, - UMat, - UMat, -]: ... -@typing.overload -def stereoCalibrateExtended( - objectPoints: typing.Sequence[cv2.typing.MatLike], - imagePoints1: typing.Sequence[cv2.typing.MatLike], - imagePoints2: typing.Sequence[cv2.typing.MatLike], - cameraMatrix1: cv2.typing.MatLike, - distCoeffs1: cv2.typing.MatLike, - cameraMatrix2: cv2.typing.MatLike, - distCoeffs2: cv2.typing.MatLike, - imageSize: cv2.typing.Size, - R: cv2.typing.MatLike, - T: cv2.typing.MatLike, - E: cv2.typing.MatLike | None = ..., - F: cv2.typing.MatLike | None = ..., - rvecs: typing.Sequence[cv2.typing.MatLike] | None = ..., - tvecs: typing.Sequence[cv2.typing.MatLike] | None = ..., - perViewErrors: cv2.typing.MatLike | None = ..., - flags: int = ..., - criteria: cv2.typing.TermCriteria = ..., -) -> tuple[ - float, - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, - typing.Sequence[cv2.typing.MatLike], - typing.Sequence[cv2.typing.MatLike], - cv2.typing.MatLike, -]: ... -@typing.overload -def stereoCalibrateExtended( - objectPoints: typing.Sequence[UMat], - imagePoints1: typing.Sequence[UMat], - imagePoints2: typing.Sequence[UMat], - cameraMatrix1: UMat, - distCoeffs1: UMat, - cameraMatrix2: UMat, - distCoeffs2: UMat, - imageSize: cv2.typing.Size, - R: UMat, - T: UMat, - E: UMat | None = ..., - F: UMat | None = ..., - rvecs: typing.Sequence[UMat] | None = ..., - tvecs: typing.Sequence[UMat] | None = ..., - perViewErrors: UMat | None = ..., - flags: int = ..., - criteria: cv2.typing.TermCriteria = ..., -) -> tuple[ - float, - UMat, - UMat, - UMat, - UMat, - UMat, - UMat, - UMat, - UMat, - typing.Sequence[UMat], - typing.Sequence[UMat], - UMat, -]: ... -@typing.overload -def stereoRectify( - cameraMatrix1: cv2.typing.MatLike, - distCoeffs1: cv2.typing.MatLike, - cameraMatrix2: cv2.typing.MatLike, - distCoeffs2: cv2.typing.MatLike, - imageSize: cv2.typing.Size, - R: cv2.typing.MatLike, - T: cv2.typing.MatLike, - R1: cv2.typing.MatLike | None = ..., - R2: cv2.typing.MatLike | None = ..., - P1: cv2.typing.MatLike | None = ..., - P2: cv2.typing.MatLike | None = ..., - Q: cv2.typing.MatLike | None = ..., - flags: int = ..., - alpha: float = ..., - newImageSize: cv2.typing.Size = ..., -) -> tuple[ - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.MatLike, - cv2.typing.Rect, - cv2.typing.Rect, -]: ... -@typing.overload -def stereoRectify( - cameraMatrix1: UMat, - distCoeffs1: UMat, - cameraMatrix2: UMat, - distCoeffs2: UMat, - imageSize: cv2.typing.Size, - R: UMat, - T: UMat, - R1: UMat | None = ..., - R2: UMat | None = ..., - P1: UMat | None = ..., - P2: UMat | None = ..., - Q: UMat | None = ..., - flags: int = ..., - alpha: float = ..., - newImageSize: cv2.typing.Size = ..., -) -> tuple[ - UMat, - UMat, - UMat, - UMat, - UMat, - cv2.typing.Rect, - cv2.typing.Rect, -]: ... -@typing.overload -def stereoRectifyUncalibrated( - points1: cv2.typing.MatLike, - points2: cv2.typing.MatLike, - F: cv2.typing.MatLike, - imgSize: cv2.typing.Size, - H1: cv2.typing.MatLike | None = ..., - H2: cv2.typing.MatLike | None = ..., - threshold: float = ..., -) -> tuple[ - bool, - cv2.typing.MatLike, - cv2.typing.MatLike, -]: ... -@typing.overload -def stereoRectifyUncalibrated( - points1: UMat, - points2: UMat, - F: UMat, - imgSize: cv2.typing.Size, - H1: UMat | None = ..., - H2: UMat | None = ..., - threshold: float = ..., -) -> tuple[ - bool, - UMat, - UMat, -]: ... -@typing.overload -def stylization( - src: cv2.typing.MatLike, - dst: cv2.typing.MatLike | None = ..., - sigma_s: float = ..., - sigma_r: float = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def stylization(src: UMat, dst: UMat | None = ..., sigma_s: float = ..., sigma_r: float = ...) -> UMat: ... -@typing.overload -def subtract( - src1: cv2.typing.MatLike, - src2: cv2.typing.MatLike, - dst: cv2.typing.MatLike | None = ..., - mask: cv2.typing.MatLike | None = ..., - dtype: int = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def subtract(src1: UMat, src2: UMat, dst: UMat | None = ..., mask: UMat | None = ..., dtype: int = ...) -> UMat: ... -@typing.overload -def sumElems(src: cv2.typing.MatLike) -> cv2.typing.Scalar: ... -@typing.overload -def sumElems(src: UMat) -> cv2.typing.Scalar: ... -@typing.overload -def textureFlattening( - src: cv2.typing.MatLike, - mask: cv2.typing.MatLike, - dst: cv2.typing.MatLike | None = ..., - low_threshold: float = ..., - high_threshold: float = ..., - kernel_size: int = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def textureFlattening( - src: UMat, - mask: UMat, - dst: UMat | None = ..., - low_threshold: float = ..., - high_threshold: float = ..., - kernel_size: int = ..., -) -> UMat: ... -@typing.overload -def threshold( - src: cv2.typing.MatLike, - thresh: float, - maxval: float, - type: int, - dst: cv2.typing.MatLike | None = ..., -) -> tuple[ - float, - cv2.typing.MatLike, -]: ... -@typing.overload -def threshold(src: UMat, thresh: float, maxval: float, type: int, dst: UMat | None = ...) -> tuple[float, UMat]: ... -@typing.overload -def trace(mtx: cv2.typing.MatLike) -> cv2.typing.Scalar: ... -@typing.overload -def trace(mtx: UMat) -> cv2.typing.Scalar: ... -@typing.overload -def transform( - src: cv2.typing.MatLike, - m: cv2.typing.MatLike, - dst: cv2.typing.MatLike | None = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def transform(src: UMat, m: UMat, dst: UMat | None = ...) -> UMat: ... -@typing.overload -def transpose(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... -@typing.overload -def transpose(src: UMat, dst: UMat | None = ...) -> UMat: ... -@typing.overload -def transposeND( - src: cv2.typing.MatLike, - order: typing.Sequence[int], - dst: cv2.typing.MatLike | None = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def transposeND(src: UMat, order: typing.Sequence[int], dst: UMat | None = ...) -> UMat: ... -@typing.overload -def triangulatePoints( - projMatr1: cv2.typing.MatLike, - projMatr2: cv2.typing.MatLike, - projPoints1: cv2.typing.MatLike, - projPoints2: cv2.typing.MatLike, - points4D: cv2.typing.MatLike | None = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def triangulatePoints( - projMatr1: UMat, - projMatr2: UMat, - projPoints1: UMat, - projPoints2: UMat, - points4D: UMat | None = ..., -) -> UMat: ... -@typing.overload -def undistort( - src: cv2.typing.MatLike, - cameraMatrix: cv2.typing.MatLike, - distCoeffs: cv2.typing.MatLike, - dst: cv2.typing.MatLike | None = ..., - newCameraMatrix: cv2.typing.MatLike | None = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def undistort( - src: UMat, - cameraMatrix: UMat, - distCoeffs: UMat, - dst: UMat | None = ..., - newCameraMatrix: UMat | None = ..., -) -> UMat: ... -@typing.overload -def undistortImagePoints( - src: cv2.typing.MatLike, - cameraMatrix: cv2.typing.MatLike, - distCoeffs: cv2.typing.MatLike, - dst: cv2.typing.MatLike | None = ..., - arg1: cv2.typing.TermCriteria = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def undistortImagePoints( - src: UMat, - cameraMatrix: UMat, - distCoeffs: UMat, - dst: UMat | None = ..., - arg1: cv2.typing.TermCriteria = ..., -) -> UMat: ... -@typing.overload -def undistortPoints( - src: cv2.typing.MatLike, - cameraMatrix: cv2.typing.MatLike, - distCoeffs: cv2.typing.MatLike, - dst: cv2.typing.MatLike | None = ..., - R: cv2.typing.MatLike | None = ..., - P: cv2.typing.MatLike | None = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def undistortPoints( - src: UMat, - cameraMatrix: UMat, - distCoeffs: UMat, - dst: UMat | None = ..., - R: UMat | None = ..., - P: UMat | None = ..., -) -> UMat: ... -@typing.overload -def undistortPointsIter( - src: cv2.typing.MatLike, - cameraMatrix: cv2.typing.MatLike, - distCoeffs: cv2.typing.MatLike, - R: cv2.typing.MatLike, - P: cv2.typing.MatLike, - criteria: cv2.typing.TermCriteria, - dst: cv2.typing.MatLike | None = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def undistortPointsIter( - src: UMat, - cameraMatrix: UMat, - distCoeffs: UMat, - R: UMat, - P: UMat, - criteria: cv2.typing.TermCriteria, - dst: UMat | None = ..., -) -> UMat: ... -def useOpenVX() -> bool: ... -def useOptimized() -> bool: ... -@typing.overload -def validateDisparity( - disparity: cv2.typing.MatLike, - cost: cv2.typing.MatLike, - minDisparity: int, - numberOfDisparities: int, - disp12MaxDisp: int = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def validateDisparity( - disparity: UMat, - cost: UMat, - minDisparity: int, - numberOfDisparities: int, - disp12MaxDisp: int = ..., -) -> UMat: ... -@typing.overload -def vconcat(src: typing.Sequence[cv2.typing.MatLike], dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... -@typing.overload -def vconcat(src: typing.Sequence[UMat], dst: UMat | None = ...) -> UMat: ... -def waitKey(delay: int = ...) -> int: ... -def waitKeyEx(delay: int = ...) -> int: ... -@typing.overload -def warpAffine( - src: cv2.typing.MatLike, - M: cv2.typing.MatLike, - dsize: cv2.typing.Size, - dst: cv2.typing.MatLike | None = ..., - flags: int = ..., - borderMode: int = ..., - borderValue: cv2.typing.Scalar = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def warpAffine( - src: UMat, - M: UMat, - dsize: cv2.typing.Size, - dst: UMat | None = ..., - flags: int = ..., - borderMode: int = ..., - borderValue: cv2.typing.Scalar = ..., -) -> UMat: ... -@typing.overload -def warpPerspective( - src: cv2.typing.MatLike, - M: cv2.typing.MatLike, - dsize: cv2.typing.Size, - dst: cv2.typing.MatLike | None = ..., - flags: int = ..., - borderMode: int = ..., - borderValue: cv2.typing.Scalar = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def warpPerspective( - src: UMat, - M: UMat, - dsize: cv2.typing.Size, - dst: UMat | None = ..., - flags: int = ..., - borderMode: int = ..., - borderValue: cv2.typing.Scalar = ..., -) -> UMat: ... -@typing.overload -def warpPolar( - src: cv2.typing.MatLike, - dsize: cv2.typing.Size, - center: cv2.typing.Point2f, - maxRadius: float, - flags: int, - dst: cv2.typing.MatLike | None = ..., -) -> cv2.typing.MatLike: ... -@typing.overload -def warpPolar( - src: UMat, - dsize: cv2.typing.Size, - center: cv2.typing.Point2f, - maxRadius: float, - flags: int, - dst: UMat | None = ..., -) -> UMat: ... -@typing.overload -def watershed(image: cv2.typing.MatLike, markers: cv2.typing.MatLike) -> cv2.typing.MatLike: ... -@typing.overload -def watershed(image: UMat, markers: UMat) -> UMat: ... -@typing.overload -def writeOpticalFlow(path: str, flow: cv2.typing.MatLike) -> bool: ... -@typing.overload -def writeOpticalFlow(path: str, flow: UMat) -> bool: ... diff --git a/typings/cv2/mat_wrapper/__init__.pyi b/typings/cv2/mat_wrapper/__init__.pyi deleted file mode 100644 index 0bfcd316..00000000 --- a/typings/cv2/mat_wrapper/__init__.pyi +++ /dev/null @@ -1,14 +0,0 @@ -from typing import TypeAlias - -import numpy as np -from _typeshed import Unused - -__all__: list[str] = [] -_NDArray: TypeAlias = np.ndarray[float, np.dtype[np.generic]] - -class Mat(_NDArray): - wrap_channels: bool | None - - def __new__(cls, arr: _NDArray, wrap_channels: bool = ..., **kwargs: Unused) -> _NDArray: ... - def __init__(self, arr: _NDArray, wrap_channels: bool = ...) -> None: ... - def __array_finalize__(self, obj: _NDArray | None) -> None: ... diff --git a/typings/multiprocessing/connection.pyi b/typings/multiprocessing/connection.pyi deleted file mode 100644 index 05821561..00000000 --- a/typings/multiprocessing/connection.pyi +++ /dev/null @@ -1,41 +0,0 @@ -# https://github.com/python/typeshed/blob/main/stdlib/multiprocessing/connection.pyi -import sys -from types import TracebackType -from typing import Any, Generic, Self, SupportsIndex, TypeVar - -from _typeshed import ReadableBuffer - -_T1 = TypeVar("_T1") -_T2 = TypeVar("_T2") - -class _ConnectionBase(Generic[_T1, _T2]): - def __init__(self, handle: SupportsIndex, readable: bool = True, writable: bool = True) -> None: ... - @property - def closed(self) -> bool: ... # undocumented - @property - def readable(self) -> bool: ... # undocumented - @property - def writable(self) -> bool: ... # undocumented - def fileno(self) -> int: ... - def close(self) -> None: ... - def send_bytes(self, buf: ReadableBuffer, offset: int = 0, size: int | None = None) -> None: ... - def send(self, obj: _T1) -> None: ... - def recv_bytes(self, maxlength: int | None = None) -> bytes: ... - def recv_bytes_into(self, buf: Any, offset: int = 0) -> int: ... - def recv(self) -> _T2: ... - def poll(self, timeout: float | None = 0.0) -> bool: ... - def __enter__(self) -> Self: ... - def __exit__( - self, - exc_type: type[BaseException] | None, - exc_value: BaseException | None, - exc_tb: TracebackType | None, - ) -> None: ... - -class Connection(_ConnectionBase[_T1, _T2]): ... - -if sys.platform == "win32": - class PipeConnection(_ConnectionBase[_T1, _T2]): ... - def Pipe(duplex=True) -> tuple[PipeConnection[_T1, _T2], PipeConnection[_T2, _T1]]: ... -else: - def Pipe(duplex: bool = True) -> tuple[Connection[_T1, _T2], Connection[_T2, _T1]]: ... diff --git a/typings/multiprocessing/py.typed b/typings/multiprocessing/py.typed deleted file mode 100644 index b648ac92..00000000 --- a/typings/multiprocessing/py.typed +++ /dev/null @@ -1 +0,0 @@ -partial diff --git a/typings/multiprocessing/test_cases/check_pipe_connections.py b/typings/multiprocessing/test_cases/check_pipe_connections.py deleted file mode 100644 index a5b1cd2a..00000000 --- a/typings/multiprocessing/test_cases/check_pipe_connections.py +++ /dev/null @@ -1,25 +0,0 @@ -from multiprocessing.connection import Pipe, PipeConnection - -# Less type-safe, but no extra variable. User could mix up send and recv types. -# This should be improvable with PEP 695: Type Parameter Syntax in Python 3.12 -a: PipeConnection[str, int] -b: PipeConnection[int, str] -a, b = Pipe() - -# More type safe, but extra variable -connections_wrong: tuple[ - PipeConnection[str, int], - PipeConnection[str, int], -] = Pipe() # pyright: ignore[reportGeneralTypeIssues] -connections_ok: tuple[PipeConnection[str, int], PipeConnection[int, str]] = Pipe() -a, b = connections_ok - -a.send("test") -a.send(0) # pyright: ignore[reportGeneralTypeIssues] -test1: str = b.recv() -test2: int = b.recv() # pyright: ignore[reportGeneralTypeIssues] - -b.send("test") # pyright: ignore[reportGeneralTypeIssues] -b.send(0) -test3: str = a.recv() # pyright: ignore[reportGeneralTypeIssues] -test4: int = a.recv()