diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml deleted file mode 100644 index 5cc1b69..0000000 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ /dev/null @@ -1,97 +0,0 @@ -name: Bug report -description: Something is not working correctly. -title: "[BUG]" -labels: "bug, to be solved" - -body: - - type: checkboxes - attributes: - label: Get started - options: - - label: >- - I have read [Contributing guidelines](https://github.com/cainmagi/FFmpeg-Encoder-Decoder-for-Python/blob/master/CONTRIBUTING.md). - required: true - - label: >- - I have confirmed that my problem could not be solved by the [troubleshooting](https://cainmagi.github.io/FFmpeg-Encoder-Decoder-for-Python/docs/troubleshooting/installation) section in the documentation. - required: true - - label: >- - I agree to follow the [Code of Conduct](https://github.com/cainmagi/FFmpeg-Encoder-Decoder-for-Python/blob/master/CODE_OF_CONDUCT.md). - required: true - - label: >- - I have confirmed that my issue is not duplicated with an existing issue. - required: true - - - type: textarea - attributes: - label: Description - description: >- - A clear and concise description of what the bug is. - validations: - required: true - - - type: textarea - attributes: - label: To Reproduce - description: >- - Steps to reproduce the behavior. Instead of describing the steps, you could also provide your codes related to the error here. - value: | - 1. Get package from '...' - 2. Then run '...' - 3. An error occurs. - - - type: textarea - attributes: - label: Traceback - description: >- - The python trackback of the bug. If there is no traceback, please describe (1) The expected behaviors. (2) The actual behaviors. - render: sh-session - - - type: textarea - attributes: - label: Behaviors - description: >- - If there is no traceback, please describe (1) The expected behaviors. (2) The actual behaviors. - value: | - 1. The expected behaviors: - 2. The actual behaviors: - - - type: textarea - attributes: - label: Screenshots - description: >- - If applicable, add screenshots to help explain your problem. - - - type: input - attributes: - label: OS - description: >- - e.g. Ubuntu 20.04, Debian 10, Windows 10 21H1 - validations: - required: true - - type: input - attributes: - label: Python version - description: >- - e.g. 3.8 - validations: - required: true - - type: input - attributes: - label: numpy version - description: >- - e.g. 1.21.1 - validations: - required: true - - type: input - attributes: - label: mpegCoder version - description: >- - e.g. 3.1.0 - validations: - required: true - - - type: textarea - attributes: - label: Additional context - description: >- - Add any other context about the problem here. diff --git a/.github/ISSUE_TEMPLATE/docs_request.yml b/.github/ISSUE_TEMPLATE/docs_request.yml deleted file mode 100644 index 6df461f..0000000 --- a/.github/ISSUE_TEMPLATE/docs_request.yml +++ /dev/null @@ -1,45 +0,0 @@ -name: Docs request -description: Report a problem or a request for the docs. -title: "[Docs]" -labels: documentation, to be solved - -body: - - type: checkboxes - attributes: - label: Get started - options: - - label: >- - I have read [Contributing guidelines](https://github.com/cainmagi/FFmpeg-Encoder-Decoder-for-Python/blob/master/CONTRIBUTING.md). - required: true - - label: >- - I agree to follow the [Code of Conduct](https://github.com/cainmagi/FFmpeg-Encoder-Decoder-for-Python/blob/master/CODE_OF_CONDUCT.md). - required: true - - label: >- - I have confirmed that my issue is not duplicated with an existing issue. - required: true - - - type: textarea - attributes: - label: Problem - description: >- - If you meet any problems with the documentation, please describe your problems here. - - - type: textarea - attributes: - label: Required feature - description: >- - If you need more explanations in the documentation, please describe your needs here. - - - type: input - attributes: - label: mpegCoder version - description: >- - e.g. 3.1.0 - validations: - required: true - - - type: textarea - attributes: - label: Additional context - description: >- - Add any other context about the problem here. diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml deleted file mode 100644 index fdf4016..0000000 --- a/.github/ISSUE_TEMPLATE/feature_request.yml +++ /dev/null @@ -1,53 +0,0 @@ -name: Feature request -description: Suggest an idea for this project -title: "[Feature]" -labels: enhancement, to be solved - -body: - - type: checkboxes - attributes: - label: Get started - options: - - label: >- - I have read [Contributing guidelines](https://github.com/cainmagi/FFmpeg-Encoder-Decoder-for-Python/blob/master/CONTRIBUTING.md). - required: true - - label: >- - I agree to follow the [Code of Conduct](https://github.com/cainmagi/FFmpeg-Encoder-Decoder-for-Python/blob/master/CODE_OF_CONDUCT.md). - required: true - - label: >- - I have confirmed that my issue is not duplicated with an existing issue. - required: true - - - type: textarea - attributes: - label: Problem - description: >- - If your feature request is related to a problem, please describe the problem clearly and concisely. - - - type: textarea - attributes: - label: Required feature - description: >- - A clear and concise description of what you want to happen. - validations: - required: true - - - type: textarea - attributes: - label: Alternative solution - description: >- - A clear and concise description of any alternative solutions or features you've considered. - - - type: input - attributes: - label: mpegCoder version - description: >- - e.g. 3.1.0 - validations: - required: true - - - type: textarea - attributes: - label: Additional context - description: >- - Add any other context about the problem here. diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md deleted file mode 100644 index 2701a3e..0000000 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ /dev/null @@ -1,33 +0,0 @@ -# Pull request - -## Get started - -- [ ] I have read [Contributing guidelines](https://github.com/cainmagi/FFmpeg-Encoder-Decoder-for-Python/blob/master/CONTRIBUTING.md). -- [ ] I agree to follow the [Code of Conduct](https://github.com/cainmagi/FFmpeg-Encoder-Decoder-for-Python/blob/master/CODE_OF_CONDUCT.md). -- [ ] I have confirmed that my pull request (PR) is not duplicated with an existing PR. -- [ ] I have confirmed that my pull request (PR) passes the testing workflow of the project. - -## Description - -Describe what you have done with this PR. List any dependencies that are required for this change. - -If your PR is designed for an issue, please refer to the issue by the following example: - -Fixes # (issue) - -## Updated report - -Please summarize your modifications as an itemized report. - -1. Update ... -2. Add ... - -## Information - -Please provide the following information about your PR: - -- `mpegCoder` version: - -## Additional context - -Add any other context about the problem here. diff --git a/.github/PULL_REQUEST_TEMPLATE/pull_request_template.md b/.github/PULL_REQUEST_TEMPLATE/pull_request_template.md deleted file mode 100644 index 2701a3e..0000000 --- a/.github/PULL_REQUEST_TEMPLATE/pull_request_template.md +++ /dev/null @@ -1,33 +0,0 @@ -# Pull request - -## Get started - -- [ ] I have read [Contributing guidelines](https://github.com/cainmagi/FFmpeg-Encoder-Decoder-for-Python/blob/master/CONTRIBUTING.md). -- [ ] I agree to follow the [Code of Conduct](https://github.com/cainmagi/FFmpeg-Encoder-Decoder-for-Python/blob/master/CODE_OF_CONDUCT.md). -- [ ] I have confirmed that my pull request (PR) is not duplicated with an existing PR. -- [ ] I have confirmed that my pull request (PR) passes the testing workflow of the project. - -## Description - -Describe what you have done with this PR. List any dependencies that are required for this change. - -If your PR is designed for an issue, please refer to the issue by the following example: - -Fixes # (issue) - -## Updated report - -Please summarize your modifications as an itemized report. - -1. Update ... -2. Add ... - -## Information - -Please provide the following information about your PR: - -- `mpegCoder` version: - -## Additional context - -Add any other context about the problem here. diff --git a/.github/PULL_REQUEST_TEMPLATE/pull_request_template.yml b/.github/PULL_REQUEST_TEMPLATE/pull_request_template.yml deleted file mode 100644 index 8b55cb6..0000000 --- a/.github/PULL_REQUEST_TEMPLATE/pull_request_template.yml +++ /dev/null @@ -1,50 +0,0 @@ -name: Pull request -description: Send a pull request (PR) for this project. -title: "[PR]" - -body: - - type: checkboxes - attributes: - label: Get started - options: - - label: >- - I have read [Contributing guidelines](https://github.com/cainmagi/FFmpeg-Encoder-Decoder-for-Python/blob/master/CONTRIBUTING.md). - required: true - - label: >- - I agree to follow the [Code of Conduct](https://github.com/cainmagi/FFmpeg-Encoder-Decoder-for-Python/blob/master/CODE_OF_CONDUCT.md). - required: true - - label: >- - I have confirmed that my pull request (PR) is not duplicated with an existing PR. - required: true - - label: >- - I have confirmed that my pull request (PR) passes the testing workflow of the project. - required: true - - - type: textarea - attributes: - label: Description - description: >- - Describe what you have done with this PR. - - - type: textarea - attributes: - label: Updated report - description: >- - Summarize your modifications as itemized report. - value: | - 1. Update ... - 2. Add ... - - - type: input - attributes: - label: mpegCoder version - description: >- - e.g. 3.1.0 - validations: - required: true - - - type: textarea - attributes: - label: Additional context - description: >- - Add any other context about the problem here. diff --git a/.github/workflows/python-publish.yml b/.github/workflows/python-publish.yml new file mode 100644 index 0000000..e602772 --- /dev/null +++ b/.github/workflows/python-publish.yml @@ -0,0 +1,48 @@ +# This workflow will upload a Python Package using Twine when a release is created +# For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries + +# This workflow uses actions that are not certified by GitHub. +# They are provided by a third-party and are governed by +# separate terms of service, privacy policy, and support +# documentation. + +name: Upload Python Package + +on: + release: + types: [published] + +jobs: + deploy: + + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-latest, windows-latest] + python: ['3.6', '3.7', '3.8', '3.9', '3.10'] + + steps: + - uses: actions/checkout@v2 + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python }} + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install setuptools wheel twine + pip install -r requirements.txt + - name: Build package (Linux) + run: | + python setup.py bdist_wheel --plat-name manylinux1_x86_64 + if: runner.os == 'Linux' + - name: Build package (Windows) + run: | + python setup.py bdist_wheel + if: runner.os == 'Windows' + - name: Publish + env: + TWINE_USERNAME: __token__ + TWINE_PASSWORD: ${{ secrets.PYPI_API_TOKEN }} + run: | + twine upload dist/* diff --git a/.gitignore b/.gitignore index 8c1a3d3..00e650d 100644 --- a/.gitignore +++ b/.gitignore @@ -1,7 +1,9 @@ # Others include/* lib/* +/dependencies/* .vs/* +.vscode/* *.pdb *.pyd *.ipdb @@ -12,14 +14,7 @@ lib/* *.lastbuildstate unsuccessfulbuild /MpegCoder/x64/ - -# Compressed files -*.tar.xz -*.tar.gz -*.tar.bz2 -*.7z -*.zip -*.rar +/mpegCoder/* # Prerequisites *.d @@ -53,3 +48,127 @@ unsuccessfulbuild *.exe *.out *.app + + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +pip-wheel-metadata/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ diff --git a/CHANGELOG.md b/CHANGELOG.md index 9c722a8..41e68f4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,13 +2,39 @@ ## Update Report -### V3.2.0 update report: +### V3.2.4 @ 4/24/2022 -1. Upgrade to `FFMpeg 5.0` Version. +1. Fix a bug when `tqdm<4.40.0` is installed. Previously, this problem should not trigger if `tqdm>4.40.0` is installed, or `tqdm` is not installed. + +2. Fix the same bug (mentioned by item 1) in the `setup.py` script. + +3. Add change logs. + +### V3.2.3 @ 4/22/2022 + +1. Fix a severe bug that causes the dependencies to be downloaded repeatedly. + +### V3.2.2 @ 4/22/2022 + +1. Fix a typo: `mpegCoder.__verion__` -> `mpegCoder.__version__`. + +### V3.2.1 @ 4/22/2022 + +1. Fix an issue caused by the missing dependency `libcrypto.so.1.1`. This fixture is only required by the Linux version. + +2. Format the PyPI release script. + +### V3.2.0 @ 4/8/2022 + +1. Upgrade to `FFMpeg 5.0` version. 2. Fix the const assignment bug caused by the codec configuration method. -### V3.1.0 update report: +3. (Only for Linux) Upgrade the dependencies of FFMpeg to the newest versions ([#4](https://github.com/cainmagi/FFmpeg-Encoder-Decoder-for-Python/issues/4)). + +4. (About PyPI) Change the behavior of the PYPI `.whl` release. Now the dependencies will not be packed into `.whl` directly. When users `import mpegCoder` for the first time, the dependency will be automatically downloaded. Please ensure that you have the authority to modify the `site-packages` folder when you import `mpegCoder` for the first time. + +### V3.1.0 @ 7/23/2021 1. Support `str()` type for all string arguments. @@ -22,7 +48,7 @@ 6. Fix typos in docstrings. -### V3.0.0 update report: +### V3.0.0 update report 1. Fix a severe memory leaking bugs when using `AVPacket`. @@ -42,19 +68,19 @@ 9. Add a quick script for fetching the `FFMpeg` dependencies. -### V2.05 update report: +### V2.05 update report 1. Fix a severe bug that causes the memory leak when using `MpegClient`.This bug also exists in `MpegDecoder`, but it seems that the bug would not cause memory leak in that case. (Although we have also fixed it now.) 2. Upgrade to `FFMpeg 4.0` Version. -### V2.01 update report: +### V2.01 update report 1. Fix a bug that occurs when the first received frame may has a PTS larger than zero. 2. Enable the project produce the newest `FFMpeg 3.4.2` version and use `Python 3.6.4`, `numpy 1.14`. -### V2.0 update report: +### V2.0 update report 1. Revise the bug of the encoder which may cause the stream duration is shorter than the real duration of the video in some not advanced media players. @@ -62,31 +88,31 @@ 3. Provide a complete version of client, which could demux the video stream from a server in any network protocol. -### V1.8 update report: +### V1.8 update report 1. Provide options `(widthDst, heightDst)` to let `MpegDecoder` could control the output size manually. To ensure the option is valid, we must use the method `setParameter` before `FFmpegSetup`. Now you could use this options to get a rescaled output directly: ```python - d = mpegCoder.MpegDecoder() # initialize - d.setParameter(widthDst=400, heightDst=300) # noted that these options must be set before 'FFmpegSetup'! - d.FFmpegSetup(b'i.avi') # the original video size would not influence the output - print(d) # examine the parameters. You could also get the original video size by 'getParameter' - d.ExtractFrame(0, 100) # get 100 frames with 400x300 + d = mpegCoder.MpegDecoder() # initialize + d.setParameter(widthDst=400, heightDst=300) # noted that these options must be set before 'FFmpegSetup'! + d.FFmpegSetup(b'i.avi') # the original video size would not influence the output + print(d) # examine the parameters. You could also get the original video size by 'getParameter' + d.ExtractFrame(0, 100) # get 100 frames with 400x300 ``` In another example, the set optional parameters could be inherited by encoder, too: ```python - d.setParameter(widthDst=400, heightDst=300) # set optional parameters - ... - e.setParameter(decoder=d) # the width/height would inherit from widthDst/heightDst rather than original width/height of the decoder. + d.setParameter(widthDst=400, heightDst=300) # set optional parameters + ... + e.setParameter(decoder=d) # the width/height would inherit from widthDst/heightDst rather than original width/height of the decoder. ``` Noted that we do not provide `widthDst`/`heightDst` in `getParameter`, because these 2 options are all set by users. There is no need to get them from the video metadata. 2. Optimize some realization of Decoder so that its efficiency could be improved. -### V1.7-linux update report: +### V1.7-linux update report Thanks to God, we succeed in this work! @@ -103,14 +129,14 @@ If you want, you could install `ffmpeg` on Linux: Here are some instructions 2. Use these steps to install ffmpeg instead of provided commands on the above site. ```Bash - $ git clone https://git.ffmpeg.org/ffmpeg.git - $ cd ffmpeg - $ ./configure --prefix=host --enable-gpl --enable-libx264 --enable-libx265 --enable-shared --disable-static --disable-doc - $ make - $ make install +git clone https://git.ffmpeg.org/ffmpeg.git +cd ffmpeg +./configure --prefix=host --enable-gpl --enable-libx264 --enable-libx265 --enable-shared --disable-static --disable-doc +make +make install ``` -### V1.7 update report: +### V1.7 update report 1. Realize the encoder totally. @@ -118,18 +144,18 @@ If you want, you could install `ffmpeg` on Linux: Here are some instructions 3. Fix bugs in initialize functions. -### V1.5 update report: +### V1.5 update report 1. Provide an incomplete version of encoder, which could encode frames as a video stream that could not be played by player. -### V1.4 update report: +### V1.4 update report 1. Fix a severe bug of the decoder, which causes the memory collapsed if decoding a lot of frames. -### V1.2 update report: +### V1.2 update report 1. Use numpy array to replace the native pyList, which improves the speed significantly. -### V1.0 update report: +### V1.0 update report 1. Provide the decoder which could decode videos in arbitrary formats and arbitrary coding. diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md deleted file mode 100644 index 4aa8000..0000000 --- a/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,128 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -We as members, contributors, and leaders pledge to make participation in our -community a harassment-free experience for everyone, regardless of age, body -size, visible or invisible disability, ethnicity, sex characteristics, gender -identity and expression, level of experience, education, socio-economic status, -nationality, personal appearance, race, religion, or sexual identity -and orientation. - -We pledge to act and interact in ways that contribute to an open, welcoming, -diverse, inclusive, and healthy community. - -## Our Standards - -Examples of behavior that contributes to a positive environment for our -community include: - -* Demonstrating empathy and kindness toward other people -* Being respectful of differing opinions, viewpoints, and experiences -* Giving and gracefully accepting constructive feedback -* Accepting responsibility and apologizing to those affected by our mistakes, - and learning from the experience -* Focusing on what is best not just for us as individuals, but for the - overall community - -Examples of unacceptable behavior include: - -* The use of sexualized language or imagery, and sexual attention or - advances of any kind -* Trolling, insulting or derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or email - address, without their explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Enforcement Responsibilities - -Community leaders are responsible for clarifying and enforcing our standards of -acceptable behavior and will take appropriate and fair corrective action in -response to any behavior that they deem inappropriate, threatening, offensive, -or harmful. - -Community leaders have the right and responsibility to remove, edit, or reject -comments, commits, code, wiki edits, issues, and other contributions that are -not aligned to this Code of Conduct, and will communicate reasons for moderation -decisions when appropriate. - -## Scope - -This Code of Conduct applies within all community spaces, and also applies when -an individual is officially representing the community in public spaces. -Examples of representing our community include using an official e-mail address, -posting via an official social media account, or acting as an appointed -representative at an online or offline event. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported to the community leaders responsible for enforcement at -cainmagi@gmail.com. -All complaints will be reviewed and investigated promptly and fairly. - -All community leaders are obligated to respect the privacy and security of the -reporter of any incident. - -## Enforcement Guidelines - -Community leaders will follow these Community Impact Guidelines in determining -the consequences for any action they deem in violation of this Code of Conduct: - -### 1. Correction - -**Community Impact**: Use of inappropriate language or other behavior deemed -unprofessional or unwelcome in the community. - -**Consequence**: A private, written warning from community leaders, providing -clarity around the nature of the violation and an explanation of why the -behavior was inappropriate. A public apology may be requested. - -### 2. Warning - -**Community Impact**: A violation through a single incident or series -of actions. - -**Consequence**: A warning with consequences for continued behavior. No -interaction with the people involved, including unsolicited interaction with -those enforcing the Code of Conduct, for a specified period of time. This -includes avoiding interactions in community spaces as well as external channels -like social media. Violating these terms may lead to a temporary or -permanent ban. - -### 3. Temporary Ban - -**Community Impact**: A serious violation of community standards, including -sustained inappropriate behavior. - -**Consequence**: A temporary ban from any sort of interaction or public -communication with the community for a specified period of time. No public or -private interaction with the people involved, including unsolicited interaction -with those enforcing the Code of Conduct, is allowed during this period. -Violating these terms may lead to a permanent ban. - -### 4. Permanent Ban - -**Community Impact**: Demonstrating a pattern of violation of community -standards, including sustained inappropriate behavior, harassment of an -individual, or aggression toward or disparagement of classes of individuals. - -**Consequence**: A permanent ban from any sort of public interaction within -the community. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], -version 2.0, available at -https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. - -Community Impact Guidelines were inspired by [Mozilla's code of conduct -enforcement ladder](https://github.com/mozilla/diversity). - -[homepage]: https://www.contributor-covenant.org - -For answers to common questions about this code of conduct, see the FAQ at -https://www.contributor-covenant.org/faq. Translations are available at -https://www.contributor-covenant.org/translations. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md deleted file mode 100644 index eddc2b7..0000000 --- a/CONTRIBUTING.md +++ /dev/null @@ -1,59 +0,0 @@ -# Contributing to mpegCoder - -Thank you for your interest in contributing to `mpegCoder`! We are accepting pull -requests in any time. - -As a reminder, all contributors are expected to follow our [Code of Conduct][coc]. - -[coc]: https://github.com/cainmagi/FFmpeg-Encoder-Decoder-for-Python/blob/master/CODE_OF_CONDUCT.md - -## Contributing to the package - -### Installation - -Please [fork] this project as your own repository, and create a sub-branch based on any branch in this project. The new branch name could be a short description of the implemented new feature. - -After that, clone your repository by - -```shell -git clone -b --single-branch https://github.com//FFmpeg-Encoder-Decoder-for-Python.git mpegCoder -``` - -In some cases, you may need to install some dependencies. Please follow the specific instructions for compling `mpegCoder`. - -### Debugging - -We have not provided any testing scripts now. I am glad to accept the help from anyone who is willing to writing the testing scripts for this project. - -### Sending pull requests - -After you finish your works, please send a new request, and compare your branch with the target branch in `mpegCoder`. You could explain your works concisely in the pull request description. You are not required to add the updating reports in the repository, or add the documentation. I could take over these works based on your description. - -## Contributing to docs - -If you want to contribute to docs, please fork the [`docs`](https://github.com/cainmagi/FFmpeg-Encoder-Decoder-for-Python/tree/docs) branch, and clone it - -```shell -git clone -b docs --single-branch https://github.com//FFmpeg-Encoder-Decoder-for-Python.git mpegCoder-docs -``` - -You need to install `nodejs` and `yarn` first. We suggest to create an isolated conda environment: - -```shell -conda create -n docs -c conda-forge git python=3.9 nodejs=15.14.0 yarn=1.22.10 -``` - -Then you could initialize the docs project by - -```shell -cd mpegCoder-docs -yarn install -``` - -You could start the local debugging by - -```shell -yarn start -``` - -After you finish your works, you could also send a pull request. diff --git a/LICENSE b/LICENSE old mode 100644 new mode 100755 diff --git a/MpegCoder.sln b/MpegCoder.sln deleted file mode 100644 index 7215572..0000000 --- a/MpegCoder.sln +++ /dev/null @@ -1,31 +0,0 @@ - -Microsoft Visual Studio Solution File, Format Version 12.00 -# Visual Studio Version 16 -VisualStudioVersion = 16.0.31410.357 -MinimumVisualStudioVersion = 10.0.40219.1 -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "MpegCoder", "MpegCoder\MpegCoder.vcxproj", "{57C5DB39-2AA7-40DD-B7E1-162B3E7F7044}" -EndProject -Global - GlobalSection(SolutionConfigurationPlatforms) = preSolution - Debug|x64 = Debug|x64 - Debug|x86 = Debug|x86 - Release|x64 = Release|x64 - Release|x86 = Release|x86 - EndGlobalSection - GlobalSection(ProjectConfigurationPlatforms) = postSolution - {57C5DB39-2AA7-40DD-B7E1-162B3E7F7044}.Debug|x64.ActiveCfg = Debug|x64 - {57C5DB39-2AA7-40DD-B7E1-162B3E7F7044}.Debug|x64.Build.0 = Debug|x64 - {57C5DB39-2AA7-40DD-B7E1-162B3E7F7044}.Debug|x86.ActiveCfg = Debug|Win32 - {57C5DB39-2AA7-40DD-B7E1-162B3E7F7044}.Debug|x86.Build.0 = Debug|Win32 - {57C5DB39-2AA7-40DD-B7E1-162B3E7F7044}.Release|x64.ActiveCfg = Release|x64 - {57C5DB39-2AA7-40DD-B7E1-162B3E7F7044}.Release|x64.Build.0 = Release|x64 - {57C5DB39-2AA7-40DD-B7E1-162B3E7F7044}.Release|x86.ActiveCfg = Release|Win32 - {57C5DB39-2AA7-40DD-B7E1-162B3E7F7044}.Release|x86.Build.0 = Release|Win32 - EndGlobalSection - GlobalSection(SolutionProperties) = preSolution - HideSolutionNode = FALSE - EndGlobalSection - GlobalSection(ExtensibilityGlobals) = postSolution - SolutionGuid = {C950261D-8B64-4B1B-8275-B7B3F8F58C6E} - EndGlobalSection -EndGlobal diff --git a/MpegCoder/MpegBase.cpp b/MpegCoder/MpegBase.cpp deleted file mode 100644 index 55e76f7..0000000 --- a/MpegCoder/MpegBase.cpp +++ /dev/null @@ -1,100 +0,0 @@ -#include "stdafx.h" -#include "MpegBase.h" - -// Global functions. -const string cmpc::av_make_error_string2_cpp(int errnum) { - char errbuf[AV_ERROR_MAX_STRING_SIZE]; - av_strerror(errnum, errbuf, AV_ERROR_MAX_STRING_SIZE); - string strerrbuf = errbuf; - return strerrbuf; -} - -const string cmpc::av_ts_make_string_cpp(int64_t ts) { - char tsstrbuf[AV_TS_MAX_STRING_SIZE]; - av_ts_make_string(tsstrbuf, ts); - string strtsstrbuf = tsstrbuf; - return strtsstrbuf; -} - -const string cmpc::av_ts_make_time_string_cpp(int64_t ts, AVRational* tb) { - char tsstrbuf[AV_TS_MAX_STRING_SIZE]; - av_ts_make_time_string(tsstrbuf, ts, tb); - string strtsstrbuf = tsstrbuf; - return strtsstrbuf; -} - -// CharList implementation. -cmpc::CharList::CharList(void) : data() { -} - -cmpc::CharList::CharList(const std::vector& args) : data() { - set(args); -} - -cmpc::CharList::CharList(const std::vector&& args) noexcept : - data(args) { -} - -cmpc::CharList::~CharList(void) { - clear(); -} - -cmpc::CharList::CharList(const CharList& ref) : data() { - set(ref.data); -} - -cmpc::CharList& cmpc::CharList::operator=(const CharList& ref) { - if (this != &ref) { - set(ref.data); - } - return *this; -} - -cmpc::CharList::CharList(CharList&& ref) noexcept : - data(std::move(ref.data)) { -} - -cmpc::CharList& cmpc::CharList::operator=(CharList&& ref) noexcept { - if (this != &ref) { - set(std::move(ref.data)); - } - return *this; -} - -cmpc::CharList& cmpc::CharList::operator=(const std::vector& args) { - set(args); - return *this; -} - -cmpc::CharList& cmpc::CharList::operator=(std::vector&& args) noexcept { - set(args); - return *this; -} - -void cmpc::CharList::set(const std::vector& args) { - data.clear(); - for (auto it = args.begin(); it != args.end(); ++it) { - string new_str(*it); - data.push_back(new_str); - } -} - -void cmpc::CharList::set(std::vector&& args) noexcept { - data = args; -} - -void cmpc::CharList::clear() { - data.clear(); -} - -std::shared_ptr cmpc::CharList::c_str() { - std::shared_ptr pointer(new const char* [data.size() + 1], std::default_delete()); - auto p_cur = pointer.get(); - for (auto it = data.begin(); it != data.end(); ++it) { - *p_cur = it->c_str(); - p_cur++; - } - *p_cur = nullptr; - return pointer; -} - diff --git a/MpegCoder/MpegBase.h b/MpegCoder/MpegBase.h deleted file mode 100644 index 74e43bc..0000000 --- a/MpegCoder/MpegBase.h +++ /dev/null @@ -1,116 +0,0 @@ -#ifndef MPEGBASE_H_INCLUDED -#define MPEGBASE_H_INCLUDED - -#define MPEGCODER_EXPORTS -#ifdef MPEGCODER_EXPORTS - #define MPEGCODER_API __declspec(dllexport) -#else - #define MPEGCODER_API __declspec(dllimport) -#endif - -#define FFMPG3_4 -#define FFMPG4_0 -#define FFMPG4_4 -#define FFMPG5_0 - -#define MPEGCODER_CURRENT_VERSION "3.2.0" - -#define STREAM_PIX_FMT AVPixelFormat::AV_PIX_FMT_YUV420P /* default pix_fmt */ - -#define SCALE_FLAGS SWS_BICUBIC -//SWS_BILINEAR - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -using std::string; -using std::cerr; -using std::cout; -using std::endl; -using std::ostream; - -namespace cmpc { - extern "C" - { - #include "libavcodec/avcodec.h" - #include "libavformat/avformat.h" - #include "libswscale/swscale.h" - #include "libavutil/imgutils.h" - #include "libavutil/samplefmt.h" - #include "libavutil/timestamp.h" - #include "libavutil/opt.h" - #include "libavutil/avassert.h" - #include "libavutil/channel_layout.h" - #include "libavutil/mathematics.h" - #include "libavutil/time.h" - #include "libswresample/swresample.h" - } -} - -#ifdef __cplusplus -namespace cmpc { - const string av_make_error_string2_cpp(int errnum); - #undef av_err2str - #define av_err2str(errnum) av_make_error_string2_cpp(errnum) - const string av_ts_make_string_cpp(int64_t ts); - #undef av_ts2str - #define av_ts2str(ts) av_ts_make_string_cpp(ts) - const string av_ts_make_time_string_cpp(int64_t ts, AVRational* tb); - #undef av_ts2timestr - #define av_ts2timestr(ts, tb) av_ts_make_time_string_cpp(ts, tb) -} -#endif // __cplusplus - -namespace cmpc { - // a wrapper around a single output AVStream - typedef struct _OutputStream { - AVStream* st; - AVCodecContext* enc; - - /* pts of the next frame that will be generated */ - int64_t next_frame; - - AVFrame* frame; - AVFrame* tmp_frame; - - struct SwsContext* sws_ctx; - } OutputStream; - - // A wrapper of the char *[] - class CharList { - public: - CharList(void); // Constructor. - CharList(const std::vector& args); // Copy constructor (string ver). - CharList(const std::vector&& args) noexcept; // Move constructor (string ver). - ~CharList(void); // 3-5 law. Destructor. - CharList(const CharList& ref); // Copy constructor. - CharList& operator=(const CharList& ref); // Copy assignment operator. - CharList(CharList&& ref) noexcept; // Move constructor. - CharList& operator=(CharList&& ref) noexcept; // Move assignment operator. - CharList& operator=(const std::vector& args); // Copy assignment operator (string ver). - CharList& operator=(std::vector&& args) noexcept; // Move assignment operator (string ver). - void set(const std::vector& args); // Set strings as data. - void set(std::vector&& args) noexcept; // Set strings as data (move). - void clear(); // clear all data. - std::shared_ptr c_str(); // Equivalent conversion for char ** - private: - std::vector data; - }; -} - -// compatibility with newer API -#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(55,28,1) - #define av_frame_alloc avcodec_alloc_frame - #define av_frame_free avcodec_free_frame -#endif - -#endif diff --git a/MpegCoder/MpegCoder.cpp b/MpegCoder/MpegCoder.cpp deleted file mode 100644 index 4f9abf6..0000000 --- a/MpegCoder/MpegCoder.cpp +++ /dev/null @@ -1,2008 +0,0 @@ -// MpegCoder.cpp: 定义 DLL 应用程序的导出函数。 -// - -#include "stdafx.h" - -#define NO_IMPORT_ARRAY -#define PY_ARRAY_UNIQUE_SYMBOL MPEGARRAY_API -#include -#include "MpegCoder.h" -#include "MpegStreamer.h" - -int8_t cmpc::__dumpControl = 1; - -// 这是已导出类的构造函数。 -// 有关类定义的信息,请参阅 MpegCoder.h - -// Constructors. -cmpc::CMpegDecoder::CMpegDecoder(void) : - videoPath(), width(0), height(0), widthDst(0), heightDst(0), - PPixelFormat(STREAM_PIX_FMT), PFormatCtx(nullptr), PCodecCtx(nullptr), PVideoStream(nullptr), - PVideoStreamIDX(-1), PVideoFrameCount(0), RGBbuffer(nullptr), PswsCtx(nullptr), - _str_codec(), _duration(0), _predictFrameNum(0), currentGOPTSM(0), EndofGOP(false), - nthread(0), refcount(1) { - /* Enable or disable frame reference counting. You are not supposed to support - * both paths in your application but pick the one most appropriate to your - * needs. Look for the use of refcount in this example to see what are the - * differences of API usage between them. */ - // refcount = 1; -} - -void cmpc::CMpegDecoder::meta_protected_clear(void) { - auto protectWidth = widthDst; - auto protectHeight = heightDst; - auto protectNthread = nthread; - clear(); - widthDst = protectWidth; - heightDst = protectHeight; - nthread = protectNthread; -} - -void cmpc::CMpegDecoder::clear(void) { - width = height = 0; - widthDst = heightDst = 0; - PVideoStreamIDX = -1; - PVideoFrameCount = 0; - nthread = 0; - _duration = 0; - _predictFrameNum = 0; - currentGOPTSM = 0; - EndofGOP = false; - PPixelFormat = STREAM_PIX_FMT; - _str_codec.clear(); - //videoPath.clear(); - - PVideoStream = nullptr; - if (PswsCtx) { - sws_freeContext(PswsCtx); - PswsCtx = nullptr; - } - if (RGBbuffer) { - av_free(RGBbuffer); - RGBbuffer = nullptr; - } - if (PCodecCtx) { - avcodec_free_context(&PCodecCtx); - PCodecCtx = nullptr; - } - if (PFormatCtx) { - avformat_close_input(&PFormatCtx); - PFormatCtx = nullptr; - } - refcount = 1; -} - -cmpc::CMpegDecoder::~CMpegDecoder() { - clear(); -} - -cmpc::CMpegDecoder::CMpegDecoder(const CMpegDecoder& ref) : - videoPath(ref.videoPath), width(0), height(0), widthDst(ref.widthDst), heightDst(ref.heightDst), - PPixelFormat(ref.PPixelFormat), PFormatCtx(nullptr), PCodecCtx(nullptr), PVideoStream(nullptr), - PVideoStreamIDX(-1), PVideoFrameCount(0), RGBbuffer(nullptr), PswsCtx(nullptr), - _str_codec(), _duration(0), _predictFrameNum(0), currentGOPTSM(0), EndofGOP(false), - nthread(ref.nthread), refcount(ref.refcount) { - if (!FFmpegSetup()) { - clear(); - } -} - -cmpc::CMpegDecoder& cmpc::CMpegDecoder::operator=(const CMpegDecoder& ref) { - if (this != &ref) { - videoPath.assign(ref.videoPath); - width = 0; - height = 0; - widthDst = ref.widthDst; - heightDst = ref.heightDst; - PPixelFormat = ref.PPixelFormat; - PFormatCtx = nullptr; - PCodecCtx = nullptr; - PVideoStream = nullptr; - PVideoStreamIDX = -1; - PVideoFrameCount = 0; - RGBbuffer = nullptr; - PswsCtx = nullptr; - _str_codec.clear(); - _duration = 0.0; - _predictFrameNum = 0; - currentGOPTSM = 0; - EndofGOP = false; - nthread = ref.nthread; - refcount = ref.refcount; - if (!FFmpegSetup()) { - clear(); - } - } - return *this; -} - -cmpc::CMpegDecoder::CMpegDecoder(CMpegDecoder&& ref) noexcept : - videoPath(std::move(ref.videoPath)), width(ref.width), height(ref.height), - widthDst(ref.widthDst), heightDst(ref.heightDst), PPixelFormat(ref.PPixelFormat), - PFormatCtx(ref.PFormatCtx), PCodecCtx(ref.PCodecCtx), PVideoStream(ref.PVideoStream), - PVideoStreamIDX(ref.PVideoStreamIDX), PVideoFrameCount(ref.PVideoFrameCount), - RGBbuffer(ref.RGBbuffer), PswsCtx(ref.PswsCtx), _str_codec(std::move(ref._str_codec)), - _duration(ref._duration), _predictFrameNum(ref._predictFrameNum), - currentGOPTSM(ref.currentGOPTSM), EndofGOP(ref.EndofGOP), - nthread(ref.nthread), refcount(ref.refcount) { - ref.PFormatCtx = nullptr; - ref.PCodecCtx = nullptr; - ref.PVideoStream = nullptr; - ref.PswsCtx = nullptr; -} - -cmpc::CMpegDecoder& cmpc::CMpegDecoder::operator=(CMpegDecoder&& ref) noexcept { - if (this != &ref) { - videoPath.assign(std::move(ref.videoPath)); - width = ref.width; - height = ref.height; - widthDst = ref.widthDst; - heightDst = ref.heightDst; - PPixelFormat = ref.PPixelFormat; - PFormatCtx = ref.PFormatCtx; - PCodecCtx = ref.PCodecCtx; - PVideoStream = ref.PVideoStream; - PVideoStreamIDX = ref.PVideoStreamIDX; - PVideoFrameCount = ref.PVideoFrameCount; - RGBbuffer = ref.RGBbuffer; - PswsCtx = ref.PswsCtx; - _str_codec.assign(std::move(ref._str_codec)); - _duration = ref._duration; - _predictFrameNum = ref._predictFrameNum; - currentGOPTSM = ref.currentGOPTSM; - EndofGOP = ref.EndofGOP; - nthread = ref.nthread; - refcount = ref.refcount; - ref.PFormatCtx = nullptr; - ref.PCodecCtx = nullptr; - ref.PVideoStream = nullptr; - ref.RGBbuffer = nullptr; - ref.PswsCtx = nullptr; - } - return *this; -} - -void cmpc::CMpegDecoder::resetPath(string inVideoPath) { - videoPath.assign(inVideoPath); -} - -void cmpc::CMpegDecoder::setGOPPosition(int64_t inpos) { - currentGOPTSM = __FrameToPts(inpos); - EndofGOP = false; -} - -void cmpc::CMpegDecoder::setGOPPosition(double inpos) { - currentGOPTSM = __TimeToPts(inpos); - EndofGOP = false; -} - -int cmpc::CMpegDecoder::_open_codec_context(int& stream_idx, AVCodecContext*& dec_ctx, \ - AVFormatContext* PFormatCtx, enum AVMediaType type) { // Search the correct decoder, and make the configurations. - auto ret = av_find_best_stream(PFormatCtx, type, -1, -1, nullptr, 0); - if (ret < 0) { - cerr << "Could not find " << av_get_media_type_string(type) << \ - " stream in input file '" << videoPath << "'" << endl; - return ret; - } - else { - auto stream_index = ret; - auto st = PFormatCtx->streams[stream_index]; // The AVStream object. - - /* find decoder for the stream */ - auto dec = avcodec_find_decoder(st->codecpar->codec_id); // Decoder (AVCodec). - if (!dec) { - cerr << "Failed to find " << av_get_media_type_string(type) << " codec" << endl; - return AVERROR(EINVAL); - } - _str_codec.assign(dec->name); - - /* Allocate a codec context for the decoder */ - auto dec_ctx_ = avcodec_alloc_context3(dec); // Decoder context (AVCodecContext). - if (!dec_ctx_) { - cerr << "Failed to allocate the " << av_get_media_type_string(type) << " codec context" << endl; - return AVERROR(ENOMEM); - } - - if (nthread > 0) { - dec_ctx_->thread_count = nthread; - } - - /* Copy codec parameters from input stream to output codec context */ - if ((ret = avcodec_parameters_to_context(dec_ctx_, st->codecpar)) < 0) { - cerr << "Failed to copy " << av_get_media_type_string(type) << \ - " codec parameters to decoder context" << endl; - return ret; - } - - /* Init the decoders, with or without reference counting */ - AVDictionary* opts = nullptr; // The uninitialized argument dictionary. - av_dict_set(&opts, "refcounted_frames", refcount ? "1" : "0", 0); - if ((ret = avcodec_open2(dec_ctx_, dec, &opts)) < 0) { - cerr << "Failed to open " << av_get_media_type_string(type) << " codec" << endl; - return ret; - } - dec_ctx = dec_ctx_; - stream_idx = stream_index; - } - return 0; -} - -bool cmpc::CMpegDecoder::FFmpegSetup(string inVideoPath) { - resetPath(inVideoPath); - return FFmpegSetup(); -} - -bool cmpc::CMpegDecoder::FFmpegSetup() { // Open the video file, and search the correct codec. - meta_protected_clear(); - - /* open input file, and allocate format context */ - if (avformat_open_input(&PFormatCtx, videoPath.c_str(), nullptr, nullptr) < 0) { - cerr << "Could not open source file " << videoPath << endl; - return false; - } - - /* retrieve stream information */ - if (avformat_find_stream_info(PFormatCtx, nullptr) < 0) { - cerr << "Could not find stream information" << endl; - return false; - } - - if (_open_codec_context(PVideoStreamIDX, PCodecCtx, PFormatCtx, AVMediaType::AVMEDIA_TYPE_VIDEO) >= 0) { - PVideoStream = PFormatCtx->streams[PVideoStreamIDX]; - auto time_base = PVideoStream->time_base; - auto frame_base = PVideoStream->avg_frame_rate; - - /* allocate image where the decoded image will be put */ - width = PCodecCtx->width; - height = PCodecCtx->height; - PPixelFormat = PCodecCtx->pix_fmt; - _duration = static_cast(PVideoStream->duration) / static_cast(time_base.den) * static_cast(time_base.num); - _predictFrameNum = av_rescale(static_cast(_duration * 0xFFFF), frame_base.num, frame_base.den) / 0xFFFF; - } - - /* dump input information to stderr */ - auto dump_level = av_log_get_level(); - if (dump_level >= AV_LOG_INFO) { - av_dump_format(PFormatCtx, 0, videoPath.c_str(), 0); - } - - if (!PVideoStream) { // Check whether the video stream is opened correctly. - cerr << "Could not find audio or video stream in the input, aborting" << endl; - clear(); - return false; - } - - // Initialize SWS context for software scaling. - if (widthDst > 0 && heightDst > 0) { - PswsCtx = sws_getContext(width, height, PPixelFormat, widthDst, heightDst, AVPixelFormat::AV_PIX_FMT_RGB24, SCALE_FLAGS, nullptr, nullptr, nullptr); - auto numBytes = av_image_get_buffer_size(AVPixelFormat::AV_PIX_FMT_RGB24, widthDst, heightDst, 1); - RGBbuffer = (uint8_t*)av_malloc(numBytes * sizeof(uint8_t)); - } - else { - PswsCtx = sws_getContext(width, height, PPixelFormat, width, height, AVPixelFormat::AV_PIX_FMT_RGB24, SCALE_FLAGS, nullptr, nullptr, nullptr); - auto numBytes = av_image_get_buffer_size(AVPixelFormat::AV_PIX_FMT_RGB24, width, height, 1); - RGBbuffer = (uint8_t*)av_malloc(numBytes * sizeof(uint8_t)); - } - return true; -} - -void cmpc::CMpegDecoder::dumpFormat() { - if ((!videoPath.empty()) && PFormatCtx) { - av_dump_format(PFormatCtx, 0, videoPath.c_str(), 0); - } - else { - cerr << "Still need to FFmpegSetup()" << endl; - } -} - -void cmpc::CMpegDecoder::setParameter(string keyword, void* ptr) { - if (keyword.compare("widthDst") == 0) { - auto ref = reinterpret_cast(ptr); - widthDst = *ref; - } - else if (keyword.compare("heightDst") == 0) { - auto ref = reinterpret_cast(ptr); - heightDst = *ref; - } - else if (keyword.compare("nthread") == 0) { - auto ref = reinterpret_cast(ptr); - if (PCodecCtx) { - PCodecCtx->thread_count = *ref; - } - nthread = *ref; - } -} - -PyObject* cmpc::CMpegDecoder::getParameter() { - auto res = PyDict_New(); - string key; - PyObject* val = nullptr; - // Fill the values. - key.assign("videoPath"); - val = Py_BuildValue("y", videoPath.c_str()); - PyDict_SetItemString(res, key.c_str(), val); - Py_DECREF(val); - key.assign("codecName"); - val = Py_BuildValue("y", _str_codec.c_str()); - PyDict_SetItemString(res, key.c_str(), val); - Py_DECREF(val); - if (PCodecCtx) { - key.assign("bitRate"); - val = Py_BuildValue("L", PCodecCtx->bit_rate); - PyDict_SetItemString(res, key.c_str(), val); - Py_DECREF(val); - key.assign("GOPSize"); - val = Py_BuildValue("i", PCodecCtx->gop_size); - PyDict_SetItemString(res, key.c_str(), val); - Py_DECREF(val); - key.assign("maxBframe"); - val = Py_BuildValue("i", PCodecCtx->max_b_frames); - PyDict_SetItemString(res, key.c_str(), val); - Py_DECREF(val); - key.assign("nthread"); - val = Py_BuildValue("i", PCodecCtx->thread_count); - PyDict_SetItemString(res, key.c_str(), val); - Py_DECREF(val); - } - else { - key.assign("nthread"); - val = Py_BuildValue("i", nthread); - PyDict_SetItemString(res, key.c_str(), val); - Py_DECREF(val); - } - if (widthDst > 0) { - key.assign("widthDst"); - val = Py_BuildValue("i", widthDst); - PyDict_SetItemString(res, key.c_str(), val); - Py_DECREF(val); - } - if (heightDst > 0) { - key.assign("heightDst"); - val = Py_BuildValue("i", heightDst); - PyDict_SetItemString(res, key.c_str(), val); - Py_DECREF(val); - } - key.assign("width"); - val = Py_BuildValue("i", width); - PyDict_SetItemString(res, key.c_str(), val); - Py_DECREF(val); - key.assign("height"); - val = Py_BuildValue("i", height); - PyDict_SetItemString(res, key.c_str(), val); - Py_DECREF(val); - if (PVideoStream) { - key.assign("frameRate"); - auto& frame_rate = PVideoStream->avg_frame_rate; - val = Py_BuildValue("(ii)", frame_rate.num, frame_rate.den); - PyDict_SetItemString(res, key.c_str(), val); - Py_DECREF(val); - } - return res; -} - -PyObject* cmpc::CMpegDecoder::getParameter(string keyword) { - if (keyword.compare("videoPath") == 0) { - return PyUnicode_DecodeFSDefaultAndSize(videoPath.c_str(), static_cast(videoPath.size())); - } - else if (keyword.compare("width") == 0) { - return Py_BuildValue("i", width); - } - else if (keyword.compare("height") == 0) { - return Py_BuildValue("i", height); - } - else if (keyword.compare("frameCount") == 0) { - return Py_BuildValue("i", PVideoFrameCount); - } - else if (keyword.compare("coderName") == 0) { - return PyUnicode_DecodeFSDefaultAndSize(_str_codec.c_str(), static_cast(_str_codec.size())); - } - else if (keyword.compare("duration") == 0) { - return Py_BuildValue("d", _duration); - } - else if (keyword.compare("estFrameNum") == 0) { - return Py_BuildValue("L", _predictFrameNum); - } - else if (keyword.compare("avgFrameRate") == 0) { - auto frame_base = PVideoStream->avg_frame_rate; - double frameRate = static_cast(frame_base.num) / static_cast(frame_base.den); - return Py_BuildValue("d", frameRate); - } - else if (keyword.compare("nthread") == 0) { - if (PCodecCtx) { - return Py_BuildValue("i", PCodecCtx->thread_count); - } - else { - return Py_BuildValue("i", nthread); - } - } - else { - Py_RETURN_NONE; - } -} - -// The flush packet is a non-NULL packet with size 0 and data NULL -int cmpc::CMpegDecoder::__avcodec_decode_video2(AVCodecContext* avctx, AVFrame* frame, bool& got_frame, AVPacket* pkt) { - int ret; - - got_frame = false; - - if (pkt) { - ret = avcodec_send_packet(avctx, pkt); - // In particular, we don't expect AVERROR(EAGAIN), because we read all - // decoded frames with avcodec_receive_frame() until done. - if (ret < 0) { - //cout << ret << ", " << AVERROR(EAGAIN) << ", " << AVERROR_EOF << endl; - return ret == AVERROR_EOF ? 0 : ret; - } - } - - ret = avcodec_receive_frame(avctx, frame); - if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) - return ret; - if (ret >= 0) - got_frame = true; - - //cout << ret << ", " << AVERROR(EAGAIN) << ", " << AVERROR_EOF << endl; - - return 0; -} - -int cmpc::CMpegDecoder::_SaveFrame(PyObject* PyFrameList, AVFrame*& frame, AVFrame*& frameRGB, AVPacket*& pkt, bool& got_frame, int64_t minPTS, bool& processed, int cached) { - int ret = 0; - int decoded = pkt->size; - PyObject* OneFrame = nullptr; - - got_frame = false; - - if (pkt->stream_index == PVideoStreamIDX) { - /* decode video frame */ - ret = __avcodec_decode_video2(PCodecCtx, frame, got_frame, pkt); - if (ret < 0) { - cout << "Error decoding video frame (" << av_err2str(ret) << ")" << endl; - return ret; - } - - if (got_frame) { - - if (frame->pts < minPTS) { - //cout << frame->pts << " < " << minPTS << endl; - processed = false; - return decoded; - } - - if (frame->width != width || frame->height != height || - frame->format != PPixelFormat) { - /* To handle this change, one could call av_image_alloc again and - * decode the following frames into another rawvideo file. */ - cout << "Error: Width, height and pixel format have to be " - "constant in a rawvideo file, but the width, height or " - "pixel format of the input video changed:\n" - "old: width = " << width << ", height = " << height << ", format = " - << av_get_pix_fmt_name(PPixelFormat) << endl << - "new: width = " << frame->width << ", height = " << frame->height << ", format = " - << av_get_pix_fmt_name(static_cast(frame->format)) << endl; - return -1; - } - - - PVideoFrameCount++; - if (__dumpControl > 0) { - std::ostringstream str_data; - str_data << "video_frame" << (cached ? "(cached)" : "") << " n:" << PVideoFrameCount << - " coded_n:" << frame->coded_picture_number << endl; - auto str_data_s = str_data.str(); - av_log(nullptr, AV_LOG_INFO, "%s", str_data_s.c_str()); - } - - /* copy decoded frame to destination buffer: - * this is required since rawvideo expects non aligned data */ - - sws_scale(PswsCtx, frame->data, frame->linesize, 0, height, frameRGB->data, frameRGB->linesize); - - /* write to rawvideo file */ - if (widthDst > 0 && heightDst > 0) - OneFrame = _SaveFrame_castToPyFrameArray(frameRGB->data, widthDst, heightDst); - else - OneFrame = _SaveFrame_castToPyFrameArray(frameRGB->data, width, height); - PyList_Append(PyFrameList, OneFrame); - processed = true; - } - } - - /* If we use frame reference counting, we own the data and need - * to de-reference it when we don't use it anymore */ - - if (got_frame && refcount) - av_frame_unref(frame); - - return decoded; -} - -int cmpc::CMpegDecoder::_SaveFrameForGOP(PyObject* PyFrameList, AVFrame*& frame, AVFrame*& frameRGB, AVPacket*& pkt, bool& got_frame, int& GOPstate, bool& processed, int cached) { - int ret = 0; - int decoded = pkt->size; - PyObject* OneFrame = nullptr; - - got_frame = false; - - if (pkt->stream_index == PVideoStreamIDX) { - /* decode video frame */ - ret = __avcodec_decode_video2(PCodecCtx, frame, got_frame, pkt); - if (ret < 0) { - cout << "Error decoding video frame (" << av_err2str(ret) << ")" << endl; - return ret; - } - - if (got_frame) { - - currentGOPTSM = frame->pts + 1; - - switch (GOPstate) { - case 0: - if (frame->key_frame) { - GOPstate = 1; - } - else { - processed = false; - return decoded; - } - break; - case 1: - if (frame->key_frame) { - GOPstate = 2; - processed = false; - return decoded; - } - break; - default: - break; - } - - if (frame->width != width || frame->height != height || - frame->format != PPixelFormat) { - /* To handle this change, one could call av_image_alloc again and - * decode the following frames into another rawvideo file. */ - cout << "Error: Width, height and pixel format have to be " - "constant in a rawvideo file, but the width, height or " - "pixel format of the input video changed:\n" - "old: width = " << width << ", height = " << height << ", format = " - << av_get_pix_fmt_name(PPixelFormat) << endl << - "new: width = " << frame->width << ", height = " << frame->height << ", format = " - << av_get_pix_fmt_name(static_cast(frame->format)) << endl; - return -1; - } - - PVideoFrameCount++; - if (__dumpControl > 0) { - std::ostringstream str_data; - str_data << "video_frame" << (cached ? "(cached)" : "") << " n:" << PVideoFrameCount << - " coded_n:" << frame->coded_picture_number << endl; - auto str_data_s = str_data.str(); - av_log(nullptr, AV_LOG_INFO, "%s", str_data_s.c_str()); - } - - /* copy decoded frame to destination buffer: - * this is required since rawvideo expects non aligned data */ - /*av_image_copy(video_dst_data, video_dst_linesize, - (const uint8_t **)frame->data, frame->linesize, - PPixelFormat, width, height);*/ - - sws_scale(PswsCtx, frame->data, frame->linesize, 0, height, frameRGB->data, frameRGB->linesize); - - /* write to rawvideo file */ - if (widthDst > 0 && heightDst > 0) - OneFrame = _SaveFrame_castToPyFrameArray(frameRGB->data, widthDst, heightDst); - else - OneFrame = _SaveFrame_castToPyFrameArray(frameRGB->data, width, height); - PyList_Append(PyFrameList, OneFrame); - //cout << "[" << width << "-" << height << ", " << width*height << ", " << video_dst_bufsize << "]" << endl; - //cout << "PTS = " << frameRGB->pts << ", coded Fnum = " << frameRGB->coded_picture_number << endl; - processed = true; - } - } - - /* If we use frame reference counting, we own the data and need - * to de-reference it when we don't use it anymore */ - - if (got_frame && refcount) - av_frame_unref(frame); - - return decoded; -} - -PyObject* cmpc::CMpegDecoder::_SaveFrame_castToPyFrameArray(uint8_t* data[], int fWidth, int fHeight) { - npy_intp dims[] = { fHeight, fWidth, 3 }; - auto newdata = new uint8_t[static_cast(fHeight) * static_cast(fWidth) * 3]; - memcpy(newdata, data[0], static_cast(fHeight) * static_cast(fWidth) * 3); - PyObject* PyFrame = PyArray_SimpleNewFromData(3, dims, NPY_UINT8, reinterpret_cast(newdata)); - PyArray_ENABLEFLAGS((PyArrayObject*)PyFrame, NPY_ARRAY_OWNDATA); - return PyFrame; -} - -attribute_deprecated -PyObject* cmpc::CMpegDecoder::_SaveFrame_castToPyFrameArrayOld(uint8_t* data[], int fWidth, int fHeight) { - npy_intp dims[] = { static_cast(fHeight) * static_cast(fWidth) * 3 }; - PyObject* PyFrame = PyArray_SimpleNew(1, dims, NPY_UINT8); - if (PyFrame == NULL) { - Py_RETURN_NONE; - } - auto out_iter = NpyIter_New((PyArrayObject*)PyFrame, NPY_ITER_READWRITE, - NPY_CORDER, NPY_NO_CASTING, NULL); - if (out_iter == NULL) { - Py_DECREF(PyFrame); - Py_RETURN_NONE; - } - /* - * The iternext function gets stored in a local variable - * so it can be called repeatedly in an efficient manner. - */ - auto iternext = NpyIter_GetIterNext(out_iter, NULL); - if (iternext == NULL) { - NpyIter_Deallocate(out_iter); - Py_DECREF(PyFrame); - Py_RETURN_NONE; - } - /* The location of the data pointer which the iterator may update */ - auto dataptr = NpyIter_GetDataPtrArray(out_iter); - //auto out_iter = (PyArrayIterObject *)PyArray_IterNew(PyFrame); - uint8_t* pdata = data[0]; - for (auto i = 0; i < fHeight; i++) { - for (auto j = 0; j < fWidth; j++) { - for (auto k = 0; k < 3; k++, pdata++) { - uint8_t* out_dataptr = (uint8_t*)(*dataptr); - *out_dataptr = *pdata; - iternext(out_iter); - } - } - } - PyObject* pyshape = Py_BuildValue("[iii]", fHeight, fWidth, 3); - PyFrame = PyArray_Reshape((PyArrayObject*)PyFrame, pyshape); - Py_DECREF(pyshape); - NpyIter_Deallocate(out_iter); - PyGC_Collect(); - //Py_INCREF(PyFrame); - return PyFrame; -} - -int64_t cmpc::CMpegDecoder::__FrameToPts(int64_t seekFrame) const { - auto time_base = PVideoStream->time_base; - auto frame_base = PVideoStream->avg_frame_rate; - //cout << "Frame_Base: den=" << frame_base.den << ", num=" << frame_base.num << endl; - auto seekTimeStamp = PVideoStream->start_time + av_rescale(av_rescale(seekFrame, time_base.den, time_base.num), frame_base.den, frame_base.num); - return seekTimeStamp; -} - -int64_t cmpc::CMpegDecoder::__TimeToPts(double seekTime) const { - auto time_base = PVideoStream->time_base; - auto seekTimeStamp = PVideoStream->start_time + av_rescale(static_cast(seekTime * 1000), time_base.den, time_base.num) / 1000; - return seekTimeStamp; -} - -bool cmpc::CMpegDecoder::ExtractGOP(PyObject* PyFrameList) { - int ret; - bool got_frame; - - if (EndofGOP) - return false; - - AVFrame* frame = av_frame_alloc(); - auto pkt = av_packet_alloc(); - if (!frame) { - cerr << "Could not allocate frame" << endl; - ret = AVERROR(ENOMEM); - return false; - } - AVFrame* frameRGB = av_frame_alloc(); - if (!frameRGB) { - cerr << "Could not allocate frameRGB" << endl; - return false; - } - /* initialize packet, set data to NULL, let the demuxer fill it */ - if (PVideoStream && (__dumpControl > 0)) { - std::ostringstream str_data; - str_data << "Demuxing video from file '" << videoPath << "' into Python-List" << endl; - auto str_data_s = str_data.str(); - av_log(nullptr, AV_LOG_INFO, "%s", str_data_s.c_str()); - } - - /* Reset the contex to remove the flushed state. */ - avcodec_flush_buffers(PCodecCtx); - - /* read frames from the file */ - bool frameProcessed = false; - PVideoFrameCount = 0; - - //cout << framePos_TimeBase << endl; - if (av_seek_frame(PFormatCtx, PVideoStreamIDX, currentGOPTSM, AVSEEK_FLAG_BACKWARD) < 0) { - cerr << "AV seek frame fail!" << endl; - av_seek_frame(PFormatCtx, -1, 0, AVSEEK_FLAG_BACKWARD); - } - - // Assign appropriate parts of buffer to image planes in pFrameRGB Note that pFrameRGB is an AVFrame, but AVFrame is a superset of AVPicture - if (widthDst > 0 && heightDst > 0) { - av_image_fill_arrays(frameRGB->data, frameRGB->linesize, RGBbuffer, AVPixelFormat::AV_PIX_FMT_RGB24, widthDst, heightDst, 1); - } - else { - av_image_fill_arrays(frameRGB->data, frameRGB->linesize, RGBbuffer, AVPixelFormat::AV_PIX_FMT_RGB24, width, height, 1); - } - - int GOPstate = 0; // 0: Have not meed key frame; 1: During GOP; 2: End of GOP - int count = 0; - - auto temp_pkt = av_packet_alloc(); - while (av_read_frame(PFormatCtx, pkt) >= 0) { - //cout << "[Test - " << pkt.size << " ]" << endl; - av_packet_ref(temp_pkt, pkt); - frameProcessed = false; - do { - ret = _SaveFrameForGOP(PyFrameList, frame, frameRGB, temp_pkt, got_frame, GOPstate, frameProcessed, 0); - if (ret < 0) - break; - temp_pkt->data += ret; - temp_pkt->size -= ret; - } while (temp_pkt->size > 0); - /* flush cached frames */ - av_packet_unref(temp_pkt); - av_packet_unref(pkt); - if (frameProcessed) - count++; - if (GOPstate == 2) - break; - } - av_packet_free(&temp_pkt); - - if (GOPstate == 1) { //If the end of reading is not raised by I frame, it indicates that the video reaches the end. - EndofGOP = true; - } - - do { - _SaveFrameForGOP(PyFrameList, frame, frameRGB, pkt, got_frame, GOPstate, frameProcessed, 1); - } while (got_frame); - - //cout << "Demuxing succeeded." << endl; - - if (PVideoStream && (__dumpControl > 0)) { - std::ostringstream str_data; - str_data << "Succeed in convert GOP into Python_List, got " << count << " frames." << endl; - auto str_data_s = str_data.str(); - av_log(nullptr, AV_LOG_INFO, "%s", str_data_s.c_str()); - } - - //av_free(RGBbuffer); - //RGBbuffer = nullptr; - //cout << "Free Buffer" << endl; - //sws_freeContext(PswsCtx); - //cout << "Free ctx" << endl; - //PswsCtx = nullptr; - if (frameRGB) { - av_frame_free(&frameRGB); - } - if (frame) { - av_frame_free(&frame); - } - if (pkt) { - av_packet_free(&pkt); - } - - //cout << "End Process" << endl; - - return true; -} - -bool cmpc::CMpegDecoder::ExtractFrame(PyObject* PyFrameList, int64_t framePos, int64_t frameNum, double timePos, int mode) { - int ret; - bool got_frame; - auto frame = av_frame_alloc(); - if (!frame) { - cerr << "Could not allocate frame" << endl; - ret = AVERROR(ENOMEM); - return false; - } - auto pkt = av_packet_alloc(); - if (!pkt) { - cerr << "Could not allocate packet" << endl; - ret = AVERROR(ENOMEM); - return false; - } - auto frameRGB = av_frame_alloc(); - if (!frameRGB) { - cerr << "Could not allocate frameRGB" << endl; - return false; - } - /* initialize packet, set data to NULL, let the demuxer fill it */ - if (PVideoStream && (__dumpControl > 0)) { - std::ostringstream str_data; - str_data << "Demuxing video from file '" << videoPath << "' into Python-List" << endl; - auto str_data_s = str_data.str(); - av_log(nullptr, AV_LOG_INFO, "%s", str_data_s.c_str()); - } - - /* Reset the contex to remove the flushed state. */ - avcodec_flush_buffers(PCodecCtx); - - /* read frames from the file */ - int64_t count = 0; - bool frameProcessed = false; - PVideoFrameCount = 0; - - int64_t framePos_TimeBase; - if (mode && 0x1) { - framePos_TimeBase = __TimeToPts(timePos); - } - else { - framePos_TimeBase = __FrameToPts(framePos); - } - if (av_seek_frame(PFormatCtx, PVideoStreamIDX, framePos_TimeBase, AVSEEK_FLAG_BACKWARD) < 0) { - cerr << "AV seek frame fail!" << endl; - av_seek_frame(PFormatCtx, -1, 0, AVSEEK_FLAG_BACKWARD); - } - - // Assign appropriate parts of buffer to image planes in pFrameRGB Note that pFrameRGB is an AVFrame, but AVFrame is a superset of AVPicture - if (widthDst > 0 && heightDst > 0) { - av_image_fill_arrays(frameRGB->data, frameRGB->linesize, RGBbuffer, AVPixelFormat::AV_PIX_FMT_RGB24, widthDst, heightDst, 1); - } - else { - av_image_fill_arrays(frameRGB->data, frameRGB->linesize, RGBbuffer, AVPixelFormat::AV_PIX_FMT_RGB24, width, height, 1); - } - - auto temp_pkt = av_packet_alloc(); - while (av_read_frame(PFormatCtx, pkt) >= 0) { - av_packet_ref(temp_pkt, pkt); - frameProcessed = false; - do { - ret = _SaveFrame(PyFrameList, frame, frameRGB, temp_pkt, got_frame, framePos_TimeBase, frameProcessed, 0); - if (ret < 0) - break; - temp_pkt->data += ret; - temp_pkt->size -= ret; - } while (temp_pkt->size > 0); - /* flush cached frames */ - av_packet_unref(temp_pkt); - av_packet_unref(pkt); - if (frameProcessed) - count++; - if (count >= frameNum) - break; - } - av_packet_free(&temp_pkt); - - do { - _SaveFrame(PyFrameList, frame, frameRGB, pkt, got_frame, framePos_TimeBase, frameProcessed, 1); - } while (got_frame); - - if (PVideoStream && count > 0 && (__dumpControl > 0)) { - std::ostringstream str_data; - str_data << "Succeed in convert frames into Python_List" << endl; - auto str_data_s = str_data.str(); - av_log(nullptr, AV_LOG_INFO, "%s", str_data_s.c_str()); - } - - if (frameRGB) { - av_frame_free(&frameRGB); - } - if (frame) { - av_frame_free(&frame); - } - if (pkt) { - av_packet_free(&pkt); - } - - return true; -} - -ostream& cmpc::operator<<(ostream& out, cmpc::CMpegDecoder& self_class) { - out << std::setw(1) << "/"; - out << std::setfill('*') << std::setw(44) << "" << std::setfill(' ') << endl; - out << std::setw(1) << " * Packed FFmpeg Decoder - Y. Jin V" << MPEGCODER_CURRENT_VERSION << endl; - out << " " << std::setfill('*') << std::setw(44) << "" << std::setfill(' ') << endl; - out << std::setiosflags(std::ios::left) << std::setw(25) << " * VideoPath: " \ - << self_class.videoPath << endl; - out << std::setiosflags(std::ios::left) << std::setw(25) << " * (Width, Height): " \ - << self_class.width << ", " << self_class.height << endl; - if (self_class.widthDst > 0 && self_class.heightDst > 0) { - out << std::setiosflags(std::ios::left) << std::setw(25) << " * (WidthDst, HeightDst): " \ - << self_class.widthDst << ", " << self_class.heightDst << endl; - } - out << std::setiosflags(std::ios::left) << std::setw(25) << " * Deccoder: " \ - << self_class._str_codec << endl; - if (self_class.PCodecCtx) { - out << std::setiosflags(std::ios::left) << std::setw(25) << " * Thread number: " \ - << self_class.PCodecCtx->thread_count << endl; - } - else { - out << std::setiosflags(std::ios::left) << std::setw(25) << " * Thread number (P): " \ - << self_class.nthread << endl; - } - out << std::setiosflags(std::ios::left) << std::setw(25) << " * Duration: " \ - << self_class._duration << " [s]" << endl; - out << std::setiosflags(std::ios::left) << std::setw(25) << " * Predicted FrameNum: " \ - << self_class._predictFrameNum << endl; - out << std::setw(1) << " */"; - return out; -} - - -/** - * Related with the encoder. - */ - - // Constructors following 3-5 law. -cmpc::CMpegEncoder::CMpegEncoder(void) : - videoPath(), codecName(), bitRate(1024), width(100), height(100), widthSrc(0), heightSrc(0), - timeBase(_setAVRational(1, 25)), frameRate(_setAVRational(25, 1)), GOPSize(10), MaxBFrame(1), - PStreamContex({ 0 }), PFormatCtx(nullptr), Ppacket(nullptr), PswsCtx(nullptr), - __frameRGB(nullptr), RGBbuffer(nullptr), __have_video(false), __enable_header(false), - nthread(0) { - videoPath.clear(); - codecName.clear(); -} - -void cmpc::CMpegEncoder::clear(void) { - FFmpegClose(); - videoPath.clear(); - codecName.clear(); - bitRate = 1024; - width = 100; - height = 100; - heightSrc = 0; - widthSrc = 0; - timeBase = _setAVRational(1, 25); - frameRate = _setAVRational(25, 1); - GOPSize = 10; - MaxBFrame = 1; - nthread = 0; - PStreamContex = { 0 }; - __have_video = false; - __enable_header = false; -} - -cmpc::CMpegEncoder::~CMpegEncoder(void) { - clear(); -} - -cmpc::CMpegEncoder::CMpegEncoder(const CMpegEncoder& ref) : - videoPath(ref.videoPath), codecName(ref.codecName), bitRate(ref.bitRate), - width(ref.width), height(ref.height), widthSrc(ref.widthSrc), heightSrc(ref.heightSrc), - timeBase(ref.timeBase), frameRate(ref.frameRate), GOPSize(ref.GOPSize), MaxBFrame(ref.MaxBFrame), - PStreamContex({ 0 }), PFormatCtx(nullptr), Ppacket(nullptr), PswsCtx(nullptr), - __frameRGB(nullptr), RGBbuffer(nullptr), __have_video(false), __enable_header(false), - nthread(ref.nthread) { - if (!FFmpegSetup()) { - clear(); - } -} - -cmpc::CMpegEncoder& cmpc::CMpegEncoder::operator=(const CMpegEncoder& ref) { - if (this != &ref) { - videoPath.assign(ref.videoPath); - codecName.assign(ref.codecName); - bitRate = ref.bitRate; - width = ref.width; - height = ref.height; - widthSrc = ref.widthSrc; - heightSrc = ref.heightSrc; - timeBase = ref.timeBase; - frameRate = ref.frameRate; - GOPSize = ref.GOPSize; - MaxBFrame = ref.MaxBFrame; - PStreamContex = { 0 }; - PFormatCtx = nullptr; - Ppacket = nullptr; - PswsCtx = nullptr; - __frameRGB = nullptr; - RGBbuffer = nullptr; - __have_video = false; - __enable_header = false; - nthread = ref.nthread; - if (!FFmpegSetup()) { - clear(); - } - } - return *this; -} - -cmpc::CMpegEncoder::CMpegEncoder(CMpegEncoder&& ref) noexcept : - videoPath(std::move(ref.videoPath)), codecName(std::move(ref.codecName)), bitRate(ref.bitRate), - width(ref.width), height(ref.height), widthSrc(ref.widthSrc), heightSrc(ref.heightSrc), - timeBase(ref.timeBase), frameRate(ref.frameRate), GOPSize(ref.GOPSize), MaxBFrame(ref.MaxBFrame), - PStreamContex(std::move(ref.PStreamContex)), PFormatCtx(ref.PFormatCtx), Ppacket(ref.Ppacket), - PswsCtx(ref.PswsCtx), __frameRGB(ref.__frameRGB), RGBbuffer(ref.RGBbuffer), - __have_video(ref.__have_video), __enable_header(ref.__enable_header), nthread(ref.nthread) { - ref.PFormatCtx = nullptr; - ref.PStreamContex = { 0 }; - ref.PswsCtx = nullptr; - ref.RGBbuffer = nullptr; - ref.Ppacket = nullptr; - ref.__frameRGB = nullptr; -} - -cmpc::CMpegEncoder& cmpc::CMpegEncoder::operator=(CMpegEncoder&& ref) noexcept { - if (this != &ref) { - videoPath.assign(std::move(ref.videoPath)); - codecName.assign(std::move(ref.codecName)); - bitRate = ref.bitRate; - width = ref.width; - height = ref.height; - widthSrc = ref.widthSrc; - heightSrc = ref.heightSrc; - timeBase = ref.timeBase; - frameRate = ref.frameRate; - GOPSize = ref.GOPSize; - nthread = ref.nthread; - MaxBFrame = ref.MaxBFrame; - PFormatCtx = ref.PFormatCtx; - PStreamContex = std::move(ref.PStreamContex); - PswsCtx = ref.PswsCtx; - RGBbuffer = ref.RGBbuffer; - Ppacket = ref.Ppacket; - __frameRGB = ref.__frameRGB; - __have_video = ref.__have_video; - __enable_header = ref.__enable_header; - ref.PFormatCtx = nullptr; - ref.PStreamContex = { 0 }; - ref.PswsCtx = nullptr; - ref.RGBbuffer = nullptr; - ref.Ppacket = nullptr; - ref.__frameRGB = nullptr; - } - return *this; -} - -void cmpc::CMpegEncoder::resetPath(string inVideoPath) { - videoPath.assign(inVideoPath); -} - -bool cmpc::CMpegEncoder::FFmpegSetup(string inVideoPath) { - resetPath(inVideoPath); - return FFmpegSetup(); -} - -cmpc::AVRational cmpc::CMpegEncoder::_setAVRational(int num, int den) { - AVRational res; - res.num = num; res.den = den; - return res; -} - -int64_t cmpc::CMpegEncoder::__FrameToPts(int64_t seekFrame) const { - return av_rescale(av_rescale(seekFrame, timeBase.den, timeBase.num), frameRate.den, frameRate.num); -} - -int64_t cmpc::CMpegEncoder::__TimeToPts(double seekTime) const { - return av_rescale(static_cast(seekTime * 1000), timeBase.den, timeBase.num) / 1000; -} - -void cmpc::CMpegEncoder::__log_packet() { - AVRational* time_base = &PFormatCtx->streams[Ppacket->stream_index]->time_base; - std::ostringstream str_data; - str_data << "pts:" << av_ts2str(Ppacket->pts) << " pts_time:" << av_ts2timestr(Ppacket->pts, time_base) - << " dts:" << av_ts2str(Ppacket->dts) << " dts_time:" << av_ts2timestr(Ppacket->dts, time_base) << endl; - auto str_data_s = str_data.str(); - av_log(nullptr, AV_LOG_INFO, "%s", str_data_s.c_str()); -} - -int cmpc::CMpegEncoder::__write_frame() { - /* rescale output packet timestamp values from codec to stream timebase */ - av_packet_rescale_ts(Ppacket, PStreamContex.enc->time_base, PStreamContex.st->time_base); - Ppacket->stream_index = PStreamContex.st->index; - - /* Write the compressed frame to the media file. */ - if (__dumpControl > 0) - __log_packet(); - return av_interleaved_write_frame(PFormatCtx, Ppacket); -} - -/* Add an output stream. */ -const cmpc::AVCodec* cmpc::CMpegEncoder::__add_stream() { - /* find the encoder */ - AVCodecID codec_id; - auto srcwidth = widthSrc > 0 ? widthSrc : width; - auto srcheight = heightSrc > 0 ? heightSrc : height; - auto const_codec = avcodec_find_encoder_by_name(codecName.c_str()); - const AVCodec* codec; - if (!(const_codec)) { - codec_id = PFormatCtx->oformat->video_codec; - cerr << "Could not find encoder " << codecName << ", use " << avcodec_get_name(codec_id) << " as an alternative." << endl; - codec = avcodec_find_encoder(codec_id); - } - else { - codec = const_codec; - codec_id = codec->id; - } - - if (!codec) { - cerr << "Could not find encoder for '" << avcodec_get_name(codec_id) << "'" << endl; - return nullptr; - } - - PStreamContex.st = avformat_new_stream(PFormatCtx, nullptr); - if (!PStreamContex.st) { - cerr << "Could not allocate stream" << endl; - return nullptr; - } - PStreamContex.st->id = PFormatCtx->nb_streams - 1; - auto c = avcodec_alloc_context3(codec); - if (!c) { - cerr << "Could not alloc an encoding context" << endl; - return nullptr; - } - if (nthread > 0) { - c->thread_count = nthread; - } - PStreamContex.enc = c; - - switch (codec->type) { - case AVMediaType::AVMEDIA_TYPE_VIDEO: - c->codec_id = codec_id; - - c->bit_rate = bitRate; - /* Resolution must be a multiple of two. */ - c->width = width; - c->height = height; - /* timebase: This is the fundamental unit of time (in seconds) in terms - * of which frame timestamps are represented. For fixed-fps content, - * timebase should be 1/framerate and timestamp increments should be - * identical to 1. */ - PStreamContex.st->time_base.den = 0; - PStreamContex.st->time_base.num = 0; - //av_stream_set_r_frame_rate(PStreamContex.st, frameRate); - //cout << "(" << frameRate.num << ", " << frameRate.den << ")" << endl; - //PStreamContex.st->r_frame_rate - c->time_base = timeBase; - - //PStreamContex.st->frame - c->framerate = frameRate; - - c->gop_size = GOPSize; /* emit one intra frame every twelve frames at most */ - c->max_b_frames = MaxBFrame; - c->pix_fmt = STREAM_PIX_FMT; - if (c->codec_id == AVCodecID::AV_CODEC_ID_FLV1) { - /* just for testing, we also add B-frames */ - c->max_b_frames = 0; - } - if (c->codec_id == AVCodecID::AV_CODEC_ID_MPEG2VIDEO) { - /* just for testing, we also add B-frames */ - c->max_b_frames = 2; - } - if (c->codec_id == AVCodecID::AV_CODEC_ID_MPEG1VIDEO) { - /* Needed to avoid using macroblocks in which some coeffs overflow. - * This does not happen with normal video, it just happens here as - * the motion of the chroma plane does not match the luma plane. */ - c->mb_decision = 2; - } - if (c->pix_fmt != STREAM_PIX_FMT) { - /* as we only generate a YUV420P picture, we must convert it - * to the codec pixel format if needed */ - if (!PStreamContex.sws_ctx) { - PStreamContex.sws_ctx = sws_getContext(c->width, c->height, - STREAM_PIX_FMT, - c->width, c->height, - c->pix_fmt, - SCALE_FLAGS, nullptr, nullptr, nullptr); - if (!PStreamContex.sws_ctx) { - cerr << "Could not initialize the conversion context" << endl; - return nullptr; - } - } - } - if (!PswsCtx) { - PswsCtx = sws_getContext(srcwidth, srcheight, - AVPixelFormat::AV_PIX_FMT_RGB24, - c->width, c->height, - c->pix_fmt, - SCALE_FLAGS, nullptr, nullptr, nullptr); - if (!PswsCtx) { - cerr << "Could not initialize the conversion context" << endl; - return nullptr; - } - } - if (!RGBbuffer) { - auto numBytes = av_image_get_buffer_size(AVPixelFormat::AV_PIX_FMT_RGB24, srcwidth, srcheight, 1); - RGBbuffer = (uint8_t*)av_malloc(numBytes * sizeof(uint8_t)); - } - break; - - default: - break; - } - - /* Some formats want stream headers to be separate. */ - if (PFormatCtx->oformat->flags & AVFMT_GLOBALHEADER) - c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; - return codec; -} - -/* video output */ -cmpc::AVFrame* cmpc::CMpegEncoder::__alloc_picture(enum AVPixelFormat pix_fmt, int width, int height) { - auto picture = av_frame_alloc(); - if (!picture) - return nullptr; - picture->format = pix_fmt; - picture->width = width; - picture->height = height; - /* allocate the buffers for the frame data */ - auto ret = av_frame_get_buffer(picture, 32); - if (ret < 0) { - cerr << "Could not allocate frame data." << endl; - return nullptr; - } - return picture; -} - -bool cmpc::CMpegEncoder::__open_video(const AVCodec* codec, const AVDictionary* opt_arg) { - int ret; - auto c = PStreamContex.enc; - AVDictionary* opt = nullptr; - - av_dict_copy(&opt, opt_arg, 0); - /* open the codec */ - ret = avcodec_open2(c, codec, &opt); - av_dict_free(&opt); - if (ret < 0) { - cerr << "Could not open video codec: " << av_err2str(ret) << endl; - return false; - } - /* allocate and init a re-usable frame */ - PStreamContex.frame = __alloc_picture(c->pix_fmt, c->width, c->height); - if (!PStreamContex.frame) { - cerr << "Could not allocate video frame" << endl; - return false; - } - /* If the output format is not YUV420P, then a temporary YUV420P - * picture is needed too. It is then converted to the required - * output format. */ - PStreamContex.tmp_frame = nullptr; - if (c->pix_fmt != STREAM_PIX_FMT) { - PStreamContex.tmp_frame = __alloc_picture(STREAM_PIX_FMT, c->width, c->height); - if (!PStreamContex.tmp_frame) { - cerr << "Could not allocate temporary picture" << endl; - return false; - } - } - /* copy the stream parameters to the muxer */ - ret = avcodec_parameters_from_context(PStreamContex.st->codecpar, c); - if (ret < 0) { - cerr << "Could not copy the stream parameters" << endl; - return false; - } - return true; -} - -cmpc::AVFrame* cmpc::CMpegEncoder::__get_video_frame(PyArrayObject* PyFrame) { - auto c = PStreamContex.enc; - - /* check if we want to generate more frames */ - //if (av_compare_ts(PStreamContex.next_pts, c->time_base, STREAM_DURATION, { 1, 1 }) >= 0) - // return nullptr; - /* when we pass a frame to the encoder, it may keep a reference to it - * internally; make sure we do not overwrite it here */ - if (av_frame_make_writable(PStreamContex.frame) < 0) - return nullptr; - if (c->pix_fmt != STREAM_PIX_FMT) { - /* as we only generate a YUV420P picture, we must convert it - * to the codec pixel format if needed */ - if (!PStreamContex.sws_ctx) { - PStreamContex.sws_ctx = sws_getContext(c->width, c->height, - STREAM_PIX_FMT, - c->width, c->height, - c->pix_fmt, - SCALE_FLAGS, nullptr, nullptr, nullptr); - if (!PStreamContex.sws_ctx) { - cerr << "Could not initialize the conversion context" << endl; - return nullptr; - } - } - if (!_LoadFrame_castFromPyFrameArray(PStreamContex.tmp_frame, PyFrame)) { - return nullptr; - } - sws_scale(PStreamContex.sws_ctx, - (const uint8_t* const*)PStreamContex.tmp_frame->data, PStreamContex.tmp_frame->linesize, - 0, c->height, PStreamContex.frame->data, PStreamContex.frame->linesize); - } - else { - if (!_LoadFrame_castFromPyFrameArray(PStreamContex.frame, PyFrame)) { - return nullptr; - } - } - - PStreamContex.frame->pts = PStreamContex.next_frame; - PStreamContex.next_frame++; - return PStreamContex.frame; -} - -bool cmpc::CMpegEncoder::_LoadFrame_castFromPyFrameArray(AVFrame* frame, PyArrayObject* PyFrame) { - /* make sure the frame data is writable */ - if (!__frameRGB) { - cerr << "Could not allocate frameRGB" << endl; - return false; - } - auto out_dataptr = reinterpret_cast(PyArray_DATA(PyFrame)); - auto srcwidth = widthSrc > 0 ? widthSrc : width; - auto srcheight = heightSrc > 0 ? heightSrc : height; - memcpy(RGBbuffer, out_dataptr, static_cast(srcwidth) * static_cast(srcheight) * 3 * sizeof(uint8_t)); - // Assign appropriate parts of buffer to image planes in pFrameRGB Note that pFrameRGB is an AVFrame, but AVFrame is a superset of AVPicture - av_image_fill_arrays(__frameRGB->data, __frameRGB->linesize, RGBbuffer, AVPixelFormat::AV_PIX_FMT_RGB24, srcwidth, srcheight, 1); - sws_scale(PswsCtx, __frameRGB->data, __frameRGB->linesize, 0, srcheight, frame->data, frame->linesize); - //cout << "Free 1" << endl; - //delete frameRGB; - //cout << "Free 2" << endl; - return true; -} - -/* -* encode one video frame and send it to the muxer -* return 1 when encoding is finished, 0 otherwise -*/ -int cmpc::CMpegEncoder::__avcodec_encode_video2(AVCodecContext* enc_ctx, AVPacket* pkt, AVFrame* frame) { - int ret; - int wfret = 0; - - if (frame) { - if (__dumpControl > 1) { - std::ostringstream str_data; - str_data << "Send frame " << frame->pts << endl; - auto str_data_s = str_data.str(); - av_log(nullptr, AV_LOG_DEBUG, "%s", str_data_s.c_str()); - } - } - else { - return AVERROR(EAGAIN); - } - - ret = avcodec_send_frame(enc_ctx, frame); - // In particular, we don't expect AVERROR(EAGAIN), because we read all - // decoded frames with avcodec_receive_frame() until done. - if (ret < 0) { - return ret == AVERROR_EOF ? 0 : ret; - } - - ret = avcodec_receive_packet(enc_ctx, pkt); - if (ret == AVERROR(EAGAIN)) - return 0; - - if (__dumpControl > 0) { - std::ostringstream str_data; - str_data << "Write packet " << pkt->pts << " (size=" << pkt->size << "), "; - auto str_data_s = str_data.str(); - av_log(nullptr, AV_LOG_INFO, "%s", str_data_s.c_str()); - } - - if (!ret) { - wfret = __write_frame(); - av_packet_unref(Ppacket); - if (wfret < 0) { - cerr << "Error while writing video frame: " << av_err2str(ret) << endl; - return wfret; - } - } - return ret; -} - -int cmpc::CMpegEncoder::__avcodec_encode_video2_flush(AVCodecContext* enc_ctx, AVPacket* pkt) { - int ret; - int wfret = 0; - if (__dumpControl > 1) { - std::ostringstream str_data; - str_data << "Flush all packets" << endl; - auto str_data_s = str_data.str(); - av_log(nullptr, AV_LOG_DEBUG, "%s", str_data_s.c_str()); - } - - ret = avcodec_send_frame(enc_ctx, nullptr); - // In particular, we don't expect AVERROR(EAGAIN), because we read all - // decoded frames with avcodec_receive_frame() until done. - if (ret < 0) { - return ret == AVERROR_EOF ? 0 : ret; - } - - while (ret >= 0) { - ret = avcodec_receive_packet(enc_ctx, pkt); - if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN)) { - return 0; - } - if (__dumpControl > 0) { - std::ostringstream str_data; - str_data << "Write packet " << pkt->pts << " (size=" << pkt->size << "), "; - auto str_data_s = str_data.str(); - av_log(nullptr, AV_LOG_INFO, "%s", str_data_s.c_str()); - } - if (!ret) { - wfret = __write_frame(); - av_packet_unref(pkt); - } - else { - wfret = 0; - } - if (wfret < 0) { - cerr << "Error while writing video frame: " << av_err2str(ret) << endl; - return wfret; - } - } - return ret; -} - -int cmpc::CMpegEncoder::EncodeFrame(PyArrayObject* PyFrame) { - int ret; - auto c = PStreamContex.enc; - AVFrame* frame = nullptr; - if ((!__have_video) || (!__enable_header)) - cerr << "Not allowed to use this method before FFmpegSetup()" << endl; - if (PyFrame) { - frame = __get_video_frame(PyFrame); - ret = __avcodec_encode_video2(c, Ppacket, frame); - } - else { - frame = nullptr; - ret = __avcodec_encode_video2_flush(c, Ppacket); - } - - if (ret < 0) { - cerr << "Error encoding video frame: " << av_err2str(ret) << endl; - return ret; - } - return frame ? 0 : 1; -} - -void cmpc::CMpegEncoder::setParameter(string keyword, void* ptr) { - if (keyword.compare("decoder") == 0) { - CMpegDecoder* ref = reinterpret_cast(ptr); - resetPath(ref->videoPath); - codecName.assign(ref->_str_codec); - if (ref->PCodecCtx) { - bitRate = ref->PCodecCtx->bit_rate; - GOPSize = ref->PCodecCtx->gop_size; - MaxBFrame = ref->PCodecCtx->max_b_frames; - if (PStreamContex.enc) { - PStreamContex.enc->thread_count = ref->PCodecCtx->thread_count; - } - nthread = ref->PCodecCtx->thread_count; - } - else { - if (PStreamContex.enc) { - PStreamContex.enc->thread_count = ref->nthread; - } - nthread = ref->nthread; - } - if (ref->widthDst > 0 && ref->heightDst > 0) { - width = ref->widthDst; - height = ref->heightDst; - } - else { - width = ref->width; - height = ref->height; - } - widthSrc = width; - heightSrc = height; - if (ref->PVideoStream) { - //timeBase = ref->PVideoStream->time_base; - frameRate = ref->PVideoStream->avg_frame_rate; - timeBase = _setAVRational(frameRate.den, frameRate.num); - } - } - else if (keyword.compare("client") == 0) { - CMpegClient* ref = reinterpret_cast(ptr); - resetPath(ref->videoPath); - codecName.assign(ref->_str_codec); - if (ref->PCodecCtx) { - bitRate = ref->PCodecCtx->bit_rate; - GOPSize = ref->PCodecCtx->gop_size; - MaxBFrame = ref->PCodecCtx->max_b_frames; - if (PStreamContex.enc) { - PStreamContex.enc->thread_count = ref->PCodecCtx->thread_count; - } - nthread = ref->PCodecCtx->thread_count; - } - else { - if (PStreamContex.enc) { - PStreamContex.enc->thread_count = ref->nthread; - } - nthread = ref->nthread; - } - if (ref->widthDst > 0 && ref->heightDst > 0) { - width = ref->widthDst; - height = ref->heightDst; - } - else { - width = ref->width; - height = ref->height; - } - widthSrc = width; - heightSrc = height; - if (ref->PVideoStream) { - //timeBase = ref->PVideoStream->time_base; - frameRate = ref->PVideoStream->avg_frame_rate; - timeBase = _setAVRational(frameRate.den, frameRate.num); - } - } - else if (keyword.compare("configDict") == 0) { - PyObject* ref = reinterpret_cast(ptr); - if (PyDict_Check(ref)) { - string key; - PyObject* val; - // Set parameters. - key.assign("videoPath"); - val = PyDict_GetItemString(ref, key.c_str()); - if (val) { - if (PyBytes_Check(val)) { - auto val_str = string(PyBytes_AsString(val)); - resetPath(val_str); - } - } - else { - key.assign("videoAddress"); - val = PyDict_GetItemString(ref, key.c_str()); - if (val) { - if (PyBytes_Check(val)) { - auto val_str = string(PyBytes_AsString(val)); - resetPath(val_str); - } - } - } - key.assign("codecName"); - val = PyDict_GetItemString(ref, key.c_str()); - if (val) { - if (PyBytes_Check(val)) { - auto val_str = string(PyBytes_AsString(val)); - codecName.assign(val_str); - } - } - key.assign("bitRate"); - val = PyDict_GetItemString(ref, key.c_str()); - if (val) { - if (PyLong_Check(val)) { - auto val_num = static_cast(PyLong_AsLongLong(val)); - bitRate = val_num; - } - } - key.assign("GOPSize"); - val = PyDict_GetItemString(ref, key.c_str()); - if (val) { - if (PyLong_Check(val)) { - auto val_num = static_cast(PyLong_AsLong(val)); - GOPSize = val_num; - } - } - key.assign("maxBframe"); - val = PyDict_GetItemString(ref, key.c_str()); - if (val) { - if (PyLong_Check(val)) { - auto val_num = static_cast(PyLong_AsLong(val)); - MaxBFrame = val_num; - } - } - key.assign("width"); - val = PyDict_GetItemString(ref, key.c_str()); - if (val) { - if (PyLong_Check(val)) { - auto val_num = static_cast(PyLong_AsLong(val)); - width = val_num; - widthSrc = val_num; - } - } - key.assign("height"); - val = PyDict_GetItemString(ref, key.c_str()); - if (val) { - if (PyLong_Check(val)) { - auto val_num = static_cast(PyLong_AsLong(val)); - height = val_num; - heightSrc = val_num; - } - } - key.assign("widthSrc"); - val = PyDict_GetItemString(ref, key.c_str()); - if (val) { - if (PyLong_Check(val)) { - auto val_num_1 = static_cast(PyLong_AsLong(val)); - key.assign("heightSrc"); - val = PyDict_GetItemString(ref, key.c_str()); - if (val) { - if (PyLong_Check(val)) { - auto val_num_2 = static_cast(PyLong_AsLong(val)); - widthSrc = val_num_1; - heightSrc = val_num_2; - } - } - } - } - key.assign("widthDst"); - val = PyDict_GetItemString(ref, key.c_str()); - if (val) { - if (PyLong_Check(val)) { - auto val_num_1 = static_cast(PyLong_AsLong(val)); - key.assign("heightDst"); - val = PyDict_GetItemString(ref, key.c_str()); - if (val) { - if (PyLong_Check(val)) { - auto val_num_2 = static_cast(PyLong_AsLong(val)); - width = val_num_1; - height = val_num_2; - } - } - } - } - key.assign("frameRate"); - val = PyDict_GetItemString(ref, key.c_str()); - if (val) { - if (PyTuple_Check(val)) { - auto valObj = PyTuple_GetItem(val, 0); - int num = static_cast(PyLong_AsLong(valObj)); - valObj = PyTuple_GetItem(val, 1); - int den = static_cast(PyLong_AsLong(valObj)); - frameRate = _setAVRational(num, den); - timeBase = _setAVRational(den, num); - } - } - key.assign("nthread"); - val = PyDict_GetItemString(ref, key.c_str()); - if (val) { - if (PyLong_Check(val)) { - auto val_num = static_cast(PyLong_AsLong(val)); - if (PStreamContex.enc) { - PStreamContex.enc->thread_count = val_num; - } - nthread = val_num; - } - } - } - } - else if (keyword.compare("videoPath") == 0) { - string* ref = reinterpret_cast(ptr); - resetPath(*ref); - } - else if (keyword.compare("codecName") == 0) { - string* ref = reinterpret_cast(ptr); - codecName.assign(*ref); - } - else if (keyword.compare("bitRate") == 0) { - double* ref = reinterpret_cast(ptr); - auto bit_rate = static_cast((*ref) * 1024); - bitRate = bit_rate; - } - else if (keyword.compare("width") == 0) { - int* ref = reinterpret_cast(ptr); - width = *ref; - } - else if (keyword.compare("height") == 0) { - int* ref = reinterpret_cast(ptr); - height = *ref; - } - else if (keyword.compare("widthSrc") == 0) { - int* ref = reinterpret_cast(ptr); - widthSrc = *ref; - } - else if (keyword.compare("heightSrc") == 0) { - int* ref = reinterpret_cast(ptr); - heightSrc = *ref; - } - else if (keyword.compare("GOPSize") == 0) { - int* ref = reinterpret_cast(ptr); - GOPSize = *ref; - } - else if (keyword.compare("maxBframe") == 0) { - int* ref = reinterpret_cast(ptr); - MaxBFrame = *ref; - } - else if (keyword.compare("frameRate") == 0) { - PyObject* ref = reinterpret_cast(ptr); - auto refObj = PyTuple_GetItem(ref, 0); - int num = static_cast(PyLong_AsLong(refObj)); - refObj = PyTuple_GetItem(ref, 1); - int den = static_cast(PyLong_AsLong(refObj)); - frameRate = _setAVRational(num, den); - timeBase = _setAVRational(den, num); - } - else if (keyword.compare("nthread") == 0) { - auto ref = reinterpret_cast(ptr); - if (PStreamContex.enc) { - PStreamContex.enc->thread_count = *ref; - } - nthread = *ref; - } -} - -PyObject* cmpc::CMpegEncoder::getParameter(string keyword) { - if (keyword.compare("videoPath") == 0) { - return PyUnicode_DecodeFSDefaultAndSize(videoPath.c_str(), static_cast(videoPath.size())); - } - else if (keyword.compare("codecName") == 0) { - return PyUnicode_DecodeFSDefaultAndSize(codecName.c_str(), static_cast(codecName.size())); - } - else if (keyword.compare("bitRate") == 0) { - auto bit_rate = static_cast(bitRate) / 1024; - return Py_BuildValue("d", bit_rate); - } - else if (keyword.compare("width") == 0) { - return Py_BuildValue("i", width); - } - else if (keyword.compare("height") == 0) { - return Py_BuildValue("i", height); - } - else if (keyword.compare("widthSrc") == 0) { - return Py_BuildValue("i", widthSrc); - } - else if (keyword.compare("heightSrc") == 0) { - return Py_BuildValue("i", heightSrc); - } - else if (keyword.compare("GOPSize") == 0) { - return Py_BuildValue("i", GOPSize); - } - else if (keyword.compare("maxBframe") == 0) { - return Py_BuildValue("i", MaxBFrame); - } - else if (keyword.compare("frameRate") == 0) { - auto frame_base = frameRate; - double frameRate = static_cast(frame_base.num) / static_cast(frame_base.den); - return Py_BuildValue("d", frameRate); - } - else if (keyword.compare("nthread") == 0) { - if (PStreamContex.enc) { - return Py_BuildValue("i", PStreamContex.enc->thread_count); - } - else { - return Py_BuildValue("i", nthread); - } - } - else { - Py_RETURN_NONE; - } -} - -PyObject* cmpc::CMpegEncoder::getParameter() { - auto res = PyDict_New(); - string key; - PyObject* val = nullptr; - // Fill the values. - key.assign("videoPath"); - val = Py_BuildValue("y", videoPath.c_str()); - PyDict_SetItemString(res, key.c_str(), val); - Py_DECREF(val); - key.assign("codecName"); - val = Py_BuildValue("y", codecName.c_str()); - PyDict_SetItemString(res, key.c_str(), val); - Py_DECREF(val); - key.assign("bitRate"); - val = Py_BuildValue("L", bitRate); - PyDict_SetItemString(res, key.c_str(), val); - Py_DECREF(val); - key.assign("GOPSize"); - val = Py_BuildValue("i", GOPSize); - PyDict_SetItemString(res, key.c_str(), val); - Py_DECREF(val); - key.assign("maxBframe"); - val = Py_BuildValue("i", MaxBFrame); - PyDict_SetItemString(res, key.c_str(), val); - Py_DECREF(val); - if (widthSrc > 0) { - key.assign("widthSrc"); - val = Py_BuildValue("i", widthSrc); - PyDict_SetItemString(res, key.c_str(), val); - Py_DECREF(val); - } - if (heightSrc > 0) { - key.assign("heightSrc"); - val = Py_BuildValue("i", heightSrc); - PyDict_SetItemString(res, key.c_str(), val); - Py_DECREF(val); - } - key.assign("width"); - val = Py_BuildValue("i", width); - PyDict_SetItemString(res, key.c_str(), val); - Py_DECREF(val); - key.assign("height"); - val = Py_BuildValue("i", height); - PyDict_SetItemString(res, key.c_str(), val); - Py_DECREF(val); - key.assign("frameRate"); - val = Py_BuildValue("(ii)", frameRate.num, frameRate.den); - PyDict_SetItemString(res, key.c_str(), val); - Py_DECREF(val); - if (PStreamContex.enc) { - key.assign("nthread"); - val = Py_BuildValue("i", PStreamContex.enc->thread_count); - PyDict_SetItemString(res, key.c_str(), val); - Py_DECREF(val); - } - else { - key.assign("nthread"); - val = Py_BuildValue("i", nthread); - PyDict_SetItemString(res, key.c_str(), val); - Py_DECREF(val); - } - return res; -} - -bool cmpc::CMpegEncoder::FFmpegSetup() { - const AVCodec* video_codec; - int ret; - - if (Ppacket) - av_packet_free(&Ppacket); - Ppacket = av_packet_alloc(); - if (!Ppacket) - return false; - - AVDictionary* opt = nullptr; - //av_dict_set(&opt, "vcodec", codecName.c_str(), 0); - //av_dict_set(&opt, "fflags", "", 0); - - /* allocate the output media context */ - //auto getFormat = av_guess_format(codecName.c_str(), nullptr, nullptr); - avformat_alloc_output_context2(&PFormatCtx, nullptr, nullptr, videoPath.c_str()); - PFormatCtx->avoid_negative_ts = AVFMT_AVOID_NEG_TS_AUTO; - if (!PFormatCtx) { - cout << "Could not select the encoder automatically: using MPEG." << endl; - //cout << "Could not deduce output format from file extension: using MPEG." << endl; - avformat_alloc_output_context2(&PFormatCtx, nullptr, "mpeg", videoPath.c_str()); - } - if (!PFormatCtx) - return false; - - auto fmt = PFormatCtx->oformat; - - /* Add the audio and video streams using the default format codecs - * and initialize the codecs. */ - if (fmt->video_codec != AVCodecID::AV_CODEC_ID_NONE) { - video_codec = __add_stream(); - if (!video_codec) { - FFmpegClose(); - return false; - } - else - __have_video = true; - } - else { - video_codec = nullptr; - } - - /* Now that all the parameters are set, we can open the audio and - * video codecs and allocate the necessary encode buffers. */ - if (__have_video) { - if (!__open_video(video_codec, opt)) { - FFmpegClose(); - return false; - } - else - __have_video = true; - } - - if (__dumpControl > 1) { - av_dump_format(PFormatCtx, 0, videoPath.c_str(), 1); - } - - /* open the output file, if needed */ - if (!(fmt->flags & AVFMT_NOFILE)) { - ret = avio_open2(&PFormatCtx->pb, videoPath.c_str(), AVIO_FLAG_WRITE, nullptr, nullptr); - if (ret < 0) { - cerr << "Could not open '" << videoPath << "': " << av_err2str(ret) << endl; - FFmpegClose(); - return false; - } - } - - if (!(__frameRGB = av_frame_alloc())) { - cerr << "Could Allocate Temp Frame" << endl; - FFmpegClose(); - return false; - } - - /* Write the stream header, if any. */ - ret = avformat_write_header(PFormatCtx, &opt); - if (ret < 0) { - cerr << "Error occurred when opening output file: " << av_err2str(ret) << endl; - FFmpegClose(); - return false; - } - else { - __enable_header = true; - } - return true; -} - -void cmpc::CMpegEncoder::FFmpegClose() { - if (__enable_header && __have_video) { - //cout << "Flush Video" << endl; - int x; - if ((x = EncodeFrame(nullptr)) == 0) { - // cout << "Ret: " << x << endl; - } - if (__dumpControl > 0) { - std::ostringstream str_data; - str_data << "All frames are flushed from cache, the video would be closed." << endl; - auto str_data_s = str_data.str(); - av_log(nullptr, AV_LOG_INFO, "%s", str_data_s.c_str()); - } - } - if (PFormatCtx) { - if (__enable_header) { - av_write_trailer(PFormatCtx); - __enable_header = false; - } - /* Close each codec. */ - if (__have_video) { - /* free the stream */ - //avformat_free_context(PFormatCtx); - if (PStreamContex.enc) - avcodec_free_context(&PStreamContex.enc); - if (PStreamContex.frame) - av_frame_free(&PStreamContex.frame); - if (PStreamContex.tmp_frame) - av_frame_free(&PStreamContex.tmp_frame); - if (PStreamContex.sws_ctx) { - sws_freeContext(PStreamContex.sws_ctx); - PStreamContex.sws_ctx = nullptr; - } - if (PswsCtx) { - sws_freeContext(PswsCtx); - PswsCtx = nullptr; - } - if (RGBbuffer) { - av_free(RGBbuffer); - RGBbuffer = nullptr; - } - __have_video = false; - } - auto fmt = PFormatCtx->oformat; - if (!(fmt->flags & AVFMT_NOFILE)) - /* Close the output file. */ - avio_closep(&PFormatCtx->pb); - /* free the stream */ - avformat_free_context(PFormatCtx); - PFormatCtx = nullptr; - } - if (Ppacket) { - av_packet_free(&Ppacket); - Ppacket = nullptr; - } - if (__frameRGB) { - av_frame_free(&__frameRGB); - } -} - -void cmpc::CMpegEncoder::dumpFormat() { - if (PFormatCtx) - av_dump_format(PFormatCtx, 0, videoPath.c_str(), 1); - else - cerr << "Not loaded video format context now. dumpFormat() is not avaliable." << endl; -} - -ostream& cmpc::operator<<(ostream& out, cmpc::CMpegEncoder& self_class) { - out << std::setw(1) << "/"; - out << std::setfill('*') << std::setw(44) << "" << std::setfill(' ') << endl; - out << std::setw(1) << " * Packed FFmpeg Encoder - Y. Jin V" << MPEGCODER_CURRENT_VERSION << endl; - out << " " << std::setfill('*') << std::setw(44) << "" << std::setfill(' ') << endl; - out << std::setiosflags(std::ios::left) << std::setw(25) << " * VideoPath: " \ - << self_class.videoPath << endl; - out << std::setiosflags(std::ios::left) << std::setw(25) << " * (Width, Height): " \ - << self_class.width << ", " << self_class.height << endl; - if (self_class.widthSrc > 0 && self_class.heightSrc > 0) { - out << std::setiosflags(std::ios::left) << std::setw(25) << " * (WidthSrc, HeightSrc): " \ - << self_class.widthSrc << ", " << self_class.heightSrc << endl; - } - else if (self_class.widthSrc > 0) { - out << std::setiosflags(std::ios::left) << std::setw(25) << " * WidthSrc: " \ - << self_class.widthSrc << endl; - } - else if (self_class.heightSrc > 0) { - out << std::setiosflags(std::ios::left) << std::setw(25) << " * HeightSrc: " \ - << self_class.heightSrc << endl; - } - out << std::setiosflags(std::ios::left) << std::setw(25) << " * Enccoder: " \ - << self_class.codecName << endl; - if (self_class.PStreamContex.enc) { - out << std::setiosflags(std::ios::left) << std::setw(25) << " * Thread number: " \ - << self_class.PStreamContex.enc->thread_count << endl; - } - else { - out << std::setiosflags(std::ios::left) << std::setw(25) << " * Thread number (P): " \ - << self_class.nthread << endl; - } - out << std::setiosflags(std::ios::left) << std::setw(25) << " * Bit Rate: " \ - << (self_class.bitRate >> 10) << " [Kbit/s]" << endl; - out << std::setiosflags(std::ios::left) << std::setw(25) << " * Frame Rate: " \ - << static_cast(self_class.frameRate.num) / static_cast(self_class.frameRate.den) << " [FPS]" << endl; - out << std::setiosflags(std::ios::left) << std::setw(25) << " * GOP Size: " \ - << self_class.GOPSize << endl; - out << std::setiosflags(std::ios::left) << std::setw(25) << " * Maxmal Bframe Density: " \ - << self_class.MaxBFrame << " [/GOP]" << endl; - out << std::setw(1) << " */"; - return out; -} diff --git a/MpegCoder/MpegCoder.h b/MpegCoder/MpegCoder.h deleted file mode 100644 index 2ddf98b..0000000 --- a/MpegCoder/MpegCoder.h +++ /dev/null @@ -1,137 +0,0 @@ -// 下列 ifdef 块是创建使从 DLL 导出更简单的 -// 宏的标准方法。此 DLL 中的所有文件都是用命令行上定义的 MPEGCODER_EXPORT -// 符号编译的。在使用此 DLL 的 -// 任何其他项目上不应定义此符号。这样,源文件中包含此文件的任何其他项目都会将 -// MPEGCODER_API 函数视为自 DLL 导入,而此 DLL 则将用此宏定义的 -// 符号视为是被导出的。 -#ifndef MPEGCODER_H_INCLUDED -#define MPEGCODER_H_INCLUDED - -#include "MpegBase.h" - -#define MPEGCODER_DEBUG - -// Exported from MpegCoder.dll -namespace cmpc { - - extern int8_t __dumpControl; - class CMpegClient; - class CMpegServer; - - class CMpegDecoder { - public: - CMpegDecoder(void); // Constructor. - ~CMpegDecoder(void); // 3-5 law. Destructor. - CMpegDecoder(const CMpegDecoder& ref); // Copy constructor. - CMpegDecoder& operator=(const CMpegDecoder& ref); // Copy assignment operator. - CMpegDecoder(CMpegDecoder&& ref) noexcept; // Move constructor. - CMpegDecoder& operator=(CMpegDecoder&& ref) noexcept; // Move assignment operator. - friend class CMpegEncoder; // Let the encoder be able to access the member of this class. - friend class CMpegServer; // Let the server be able to access the member of this class. - friend ostream& operator<<(ostream& out, CMpegDecoder& self_class); // Show the results. - void clear(void); // Clear all configurations and resources. - void meta_protected_clear(void); // Clear the resources, but the configurations are remained. - void dumpFormat(); // Show the av_format results. - void setParameter(string keyword, void* ptr); // Set arguments. - PyObject* getParameter(string keyword); // Get the current arguments. - PyObject* getParameter(); // Get all key arguments. - void resetPath(string inVideoPath); // Reset the path (encoded) of the online video stream. - bool FFmpegSetup(); // Configure the decoder, and extract the basic meta-data. This method is also equipped in the constructor. - bool FFmpegSetup(string inVideoPath); // Configure the decoder with extra arguments. - bool ExtractFrame(PyObject* PyFrameList, int64_t framePos, int64_t frameNum, double timePos, int mode); // Extract n frames as PyFrame, where n is given by frameNum, and the starting postion is given by framePos. - bool ExtractGOP(PyObject* PyFrameList); // Extract a GOP as PyFrames. - void setGOPPosition(int64_t inpos); // Set the current GOP poistion by the index of frames. - void setGOPPosition(double inpos); // Set the cuurent GOP position by the time. - private: - string videoPath; // The path of video stream to be decoded. - int width, height; // Width, height of the video. - int widthDst, heightDst; // Target width, height of ExtractFrame(). - enum AVPixelFormat PPixelFormat; // Enum object of the pixel format. - AVFormatContext* PFormatCtx; // Format context of the video. - AVCodecContext* PCodecCtx; // Codec context of the video. - AVStream* PVideoStream; // Video stream. - - int PVideoStreamIDX; // The index of the video stream. - int PVideoFrameCount; // The counter of the decoded frames. - uint8_t* RGBbuffer; // The buffer of the RGB formatted images. - struct SwsContext* PswsCtx; // The context of the scale transformator. - - string _str_codec; // Show the name of the current codec. - double _duration; // Show the time of the video play. - int64_t _predictFrameNum; // The prediction of the total number of frames. - - int64_t currentGOPTSM; // The timestamp where the GOP cursor is pointinng to. - bool EndofGOP; // A flag of reading GOP. This value need to be reset to be false by the reset methods. - int nthread; // The number of threads; - - /* Enable or disable frame reference counting. You are not supposed to support - * both paths in your application but pick the one most appropriate to your - * needs. Look for the use of refcount in this example to see what are the - * differences of API usage between them. */ - int refcount; // Reference count of the video frame. - int _open_codec_context(int& stream_idx, AVCodecContext*& dec_ctx, AVFormatContext* PFormatCtx, enum AVMediaType type); - int _SaveFrame(PyObject* PyFrameList, AVFrame*& frame, AVFrame*& frameRGB, AVPacket*& pkt, bool& got_frame, int64_t minPTS, bool& processed, int cached); - int _SaveFrameForGOP(PyObject* PyFrameList, AVFrame*& frame, AVFrame*& frameRGB, AVPacket*& pkt, bool& got_frame, int& GOPstate, bool& processed, int cached); - PyObject* _SaveFrame_castToPyFrameArray(uint8_t* data[], int fWidth, int fHeight); - PyObject* _SaveFrame_castToPyFrameArrayOld(uint8_t* data[], int fWidth, int fHeight); - int __avcodec_decode_video2(AVCodecContext* avctx, AVFrame* frame, bool& got_frame, AVPacket* pkt); - int64_t __FrameToPts(int64_t seekFrame) const; - int64_t __TimeToPts(double seekTime) const; - }; - - class CMpegEncoder { - public: - CMpegEncoder(void); // Constructor. - ~CMpegEncoder(void); // 3-5 law. Destructor. - CMpegEncoder(const CMpegEncoder& ref); // Copy constructor. - CMpegEncoder& operator=(const CMpegEncoder& ref); // Copy assignment operator. - CMpegEncoder(CMpegEncoder&& ref) noexcept; // Move constructor. - CMpegEncoder& operator=(CMpegEncoder&& ref) noexcept; // Move assignment operator. - friend ostream& operator<<(ostream& out, CMpegEncoder& self_class); // Show the results. - void clear(void); // Clear all configurations and resources. - void resetPath(string inVideoPath); // Reset the path of the output video stream. - void dumpFormat(); // Show the av_format results. - bool FFmpegSetup(); // Configure the encoder, and create the file handle. This method is also equipped in the constructor. - bool FFmpegSetup(string inVideoPath); // Configure the encoder with extra arguments. - void FFmpegClose(); // Close the encoder, and finalize the written of the encoded video. - int EncodeFrame(PyArrayObject* PyFrame); // Encode one frame. - void setParameter(string keyword, void* ptr); // Set arguments. - PyObject* getParameter(string keyword); // Get the current arguments. - PyObject* getParameter(); // Get all key arguments. - private: - string videoPath; // The path of the output video stream. - string codecName; // The name of the codec - int64_t bitRate; // The bit rate of the output video. - int width, height; // The size of the frames in the output video. - int widthSrc, heightSrc; // The size of the input data (frames). - AVRational timeBase, frameRate; // The time base and the frame rate. - int GOPSize, MaxBFrame; // The size of GOPs, and the maximal number of B frames. - OutputStream PStreamContex; // The context of the current video parser. - AVFormatContext* PFormatCtx; // Format context of the video. - AVPacket* Ppacket; // AV Packet used for writing frames. - struct SwsContext* PswsCtx; // The context of the scale transformator. - AVFrame* __frameRGB; // A temp AV frame object. Used for converting the data format. - uint8_t* RGBbuffer; // Data buffer. - bool __have_video, __enable_header; - - int nthread; // The number of threads; - - AVRational _setAVRational(int num, int den); - int64_t __FrameToPts(int64_t seekFrame) const; - int64_t __TimeToPts(double seekTime) const; - bool _LoadFrame_castFromPyFrameArray(AVFrame* frame, PyArrayObject* PyFrame); - void __log_packet(); - int __write_frame(); - const AVCodec* __add_stream(); - AVFrame* __alloc_picture(enum AVPixelFormat pix_fmt, int width, int height); - bool __open_video(const AVCodec* codec, const AVDictionary* opt_arg); - AVFrame* __get_video_frame(PyArrayObject* PyFrame); - int __avcodec_encode_video2(AVCodecContext* enc_ctx, AVPacket* pkt, AVFrame* frame); - int __avcodec_encode_video2_flush(AVCodecContext* enc_ctx, AVPacket* pkt); - }; - - ostream& operator<<(ostream& out, CMpegDecoder& self_class); - ostream& operator<<(ostream& out, CMpegEncoder& self_class); -} - -#endif diff --git a/MpegCoder/MpegCoder.vcxproj b/MpegCoder/MpegCoder.vcxproj deleted file mode 100644 index 8aeef01..0000000 --- a/MpegCoder/MpegCoder.vcxproj +++ /dev/null @@ -1,196 +0,0 @@ - - - - - Debug - Win32 - - - Release - Win32 - - - Debug - x64 - - - Release - x64 - - - - 15.0 - {57C5DB39-2AA7-40DD-B7E1-162B3E7F7044} - Win32Proj - MpegCoder - 10.0 - - - - DynamicLibrary - true - v143 - Unicode - - - DynamicLibrary - false - v143 - true - Unicode - - - DynamicLibrary - true - v143 - Unicode - - - DynamicLibrary - false - v143 - true - Unicode - - - - - - - - - - - - - - - - - - - - - true - C:\Program Files\Python37\include;../include;$(IncludePath) - C:\Program Files\Python37\libs;../lib;$(LibraryPath) - - - true - C:\Users\cainm\.conda\envs\py310\include;C:\Users\cainm\.conda\envs\py310\lib\site-packages\numpy\core\include;..\include;$(IncludePath) - C:\Users\cainm\.conda\envs\py310\libs;C:\Users\cainm\.conda\envs\py310\lib\site-packages\numpy\core\lib;..\lib;$(LibraryPath) - - - false - C:\Program Files\Python37\include;../include;$(IncludePath) - C:\Program Files\Python37\libs;../lib;$(LibraryPath) - - - false - C:\Users\cainm\.conda\envs\py310\include;C:\Users\cainm\.conda\envs\py310\lib\site-packages\numpy\core\include;..\include;$(IncludePath) - C:\Users\cainm\.conda\envs\py310\libs;C:\Users\cainm\.conda\envs\py310\lib\site-packages\numpy\core\lib;..\lib;$(LibraryPath) - - - - Use - Level3 - Disabled - WIN32;_DEBUG;MpegCoder_EXPORTS;_WINDOWS;_USRDLL;%(PreprocessorDefinitions) - true - - - Windows - true - avcodec.lib;avdevice.lib;avfilter.lib;avformat.lib;avutil.lib;postproc.lib;swresample.lib;swscale.lib;%(AdditionalDependencies) - - - - - Use - Level3 - Disabled - _DEBUG;MpegCoder_EXPORTS;_WINDOWS;_USRDLL;%(PreprocessorDefinitions) - true - true - - - Windows - true - python310.lib;python3.lib;npymath.lib;avcodec.lib;avformat.lib;avutil.lib;swresample.lib;swscale.lib;%(AdditionalDependencies) - - - echo F | xcopy /y /i "$(OutDir)$(TargetName)$(TargetExt)" "$(OutDir)mpegCoder.pyd" - - - - - Use - Level3 - MaxSpeed - true - true - WIN32;NDEBUG;MpegCoder_EXPORTS;_WINDOWS;_USRDLL;%(PreprocessorDefinitions) - true - - - Windows - true - true - true - avcodec.lib;avdevice.lib;avfilter.lib;avformat.lib;avutil.lib;postproc.lib;swresample.lib;swscale.lib;%(AdditionalDependencies) - - - - - Use - Level3 - MaxSpeed - true - true - NDEBUG;MpegCoder_EXPORTS;_WINDOWS;_USRDLL;%(PreprocessorDefinitions) - true - true - - - Windows - true - true - true - python310.lib;python3.lib;npymath.lib;avcodec.lib;avformat.lib;avutil.lib;swresample.lib;swscale.lib;%(AdditionalDependencies) - - - echo F | xcopy /y /i "$(OutDir)$(TargetName)$(TargetExt)" "$(OutDir)mpegCoder.pyd" - - - - - - - - - - - - - - - - - - Create - Create - Create - Create - - - - - - - - - - - - - \ No newline at end of file diff --git a/MpegCoder/MpegCoder.vcxproj.filters b/MpegCoder/MpegCoder.vcxproj.filters deleted file mode 100644 index 573b077..0000000 --- a/MpegCoder/MpegCoder.vcxproj.filters +++ /dev/null @@ -1,64 +0,0 @@ - - - - - {4FC737F1-C7A5-4376-A066-2A32D752A2FF} - cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx - - - {93995380-89BD-4b04-88EB-625FBE52EBFB} - h;hh;hpp;hxx;hm;inl;inc;xsd - - - {67DA6AB6-F800-4c08-8B7A-83BB121AAD01} - rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms - - - - - 头文件 - - - 头文件 - - - 头文件 - - - 头文件 - - - 头文件 - - - 头文件 - - - - - 源文件 - - - 源文件 - - - 源文件 - - - 源文件 - - - 源文件 - - - 源文件 - - - - - - - - - - \ No newline at end of file diff --git a/MpegCoder/MpegCoder.vcxproj.user b/MpegCoder/MpegCoder.vcxproj.user deleted file mode 100644 index be25078..0000000 --- a/MpegCoder/MpegCoder.vcxproj.user +++ /dev/null @@ -1,4 +0,0 @@ - - - - \ No newline at end of file diff --git a/MpegCoder/MpegPyd.h b/MpegCoder/MpegPyd.h deleted file mode 100644 index 4aaa373..0000000 --- a/MpegCoder/MpegPyd.h +++ /dev/null @@ -1,1675 +0,0 @@ -#ifndef MPEGPYD_H_INCLUDED -#define MPEGPYD_H_INCLUDED - -#define PY_ARRAY_UNIQUE_SYMBOL MPEGARRAY_API - -#include -#include -#include -#include -#include -#include -#include -#include -#include "MpegCoder.h" -#include "MpegStreamer.h" -using std::string; -using std::ostringstream; - -PyObject *str2PyStr(string Str) { // Convert the output string to the widechar unicode string. - int wlen = MultiByteToWideChar(CP_ACP, NULL, Str.c_str(), int(Str.size()), NULL, 0); - wchar_t* wszString = new wchar_t[static_cast(wlen) + 1]; - MultiByteToWideChar(CP_ACP, NULL, Str.c_str(), int(Str.size()), wszString, wlen); - wszString[wlen] = 0; - PyObject* res = PyUnicode_FromWideChar(wszString, wlen); - delete[] wszString; - return res; -} - -bool PyStr2str(PyObject* py_str, string& s_str) { // Convert a python str to std::string. - if (!py_str) { - return false; - } - if (PyUnicode_Check(py_str)) { - auto py_bytes = PyUnicode_EncodeFSDefault(py_str); - if (!py_bytes) { - PyErr_SetString(PyExc_TypeError, "Error.PyStr2str: fail to encode the unicode str.'"); - return false; - } - auto c_str = PyBytes_AsString(py_bytes); - if (!c_str) { - PyErr_SetString(PyExc_TypeError, "Error.PyStr2str: fail to parse data from the encoded str.'"); - return false; - } - s_str.assign(c_str); - Py_DECREF(py_bytes); - } - else { - if (PyBytes_Check(py_str)) { - auto c_str = PyBytes_AsString(py_str); - if (!c_str) { - PyErr_SetString(PyExc_TypeError, "Error.PyStr2str: fail to parse data from the bytes object.'"); - return false; - } - s_str.assign(c_str); - } - else { - PyErr_SetString(PyExc_TypeError, "Error.PyStr2str: fail to convert the object to string, maybe the object is not str or bytes.'"); - return false; - } - } - return true; -} - -/***************************************************************************** -* C style definition of Python classes. -* Each class would ref the C implemented class directly. -* No extra python data member is added to these classes, -* because the data members have been already packed as private members of the -* C classes. -*****************************************************************************/ -typedef struct _C_MpegDecoder -{ - PyObject_HEAD // == PyObject ob_base; Define the PyObject header. - cmpc::CMpegDecoder* _in_Handle; // Define the implementation of the C Object. -} C_MpegDecoder; - -typedef struct _C_MpegEncoder -{ - PyObject_HEAD // == PyObject ob_base; Define the PyObject header. - cmpc::CMpegEncoder* _in_Handle; // Define the implementation of the C Object. -} C_MpegEncoder; - -typedef struct _C_MpegClient -{ - PyObject_HEAD // == PyObject ob_base; Define the PyObject header. - cmpc::CMpegClient* _in_Handle; // Define the implementation of the C Object. -} C_MpegClient; - -typedef struct _C_MpegServer -{ - PyObject_HEAD // == PyObject ob_base; Define the PyObject header. - cmpc::CMpegServer* _in_Handle; // Define the implementation of the C Object. -} C_MpegServer; - -static PyMemberDef C_MPDC_DataMembers[] = // Register the members of the python class. -{ // Do not register any data, because all data of this class is private. - //{"m_dEnglish", T_FLOAT, offsetof(CScore, m_dEnglish), 0, "The English score of instance."}, - { "hAddress", T_ULONGLONG, offsetof(C_MpegDecoder, _in_Handle), READONLY, "The address of the handle in memory." }, - { nullptr, 0, 0, 0, nullptr } -}; - -static PyMemberDef C_MPEC_DataMembers[] = // Register the members of the python class. -{ // Do not register any data, because all data of this class is private. - //{"m_dEnglish", T_FLOAT, offsetof(CScore, m_dEnglish), 0, "The English score of instance."}, - { "hAddress", T_ULONGLONG, offsetof(C_MpegEncoder, _in_Handle), READONLY, "The address of the handle in memory." }, - { nullptr, 0, 0, 0, nullptr } -}; - -static PyMemberDef C_MPCT_DataMembers[] = // Register the members of the python class. -{ // Do not register any data, because all data of this class is private. - //{"m_dEnglish", T_FLOAT, offsetof(CScore, m_dEnglish), 0, "The English score of instance."}, - { "hAddress", T_ULONGLONG, offsetof(C_MpegClient, _in_Handle), READONLY, "The address of the handle in memory." }, - { nullptr, 0, 0, 0, nullptr } -}; - -static PyMemberDef C_MPSV_DataMembers[] = // Register the members of the python class. -{ // Do not register any data, because all data of this class is private. - //{"m_dEnglish", T_FLOAT, offsetof(CScore, m_dEnglish), 0, "The English score of instance."}, - { "hAddress", T_ULONGLONG, offsetof(C_MpegServer, _in_Handle), READONLY, "The address of the handle in memory." }, - { nullptr, 0, 0, 0, nullptr } -}; - -/***************************************************************************** -* Delearaction of all methods and functions. -* Prepare the function objects for the registeration of the classes and -* functions. -*****************************************************************************/ -/*static void Example(ClassName* Self, PyObject* pArgs); -PyMODINIT_FUNC PyFunc_Example(void);*/ - -static PyObject* C_MPC_Global(PyObject* Self, PyObject* args, PyObject* kwargs) { - char dumpLevel = -1; - cmpc::CharList kwlist_str({ "dumpLevel" }); - auto kwlist_ptr = kwlist_str.c_str(); - auto kwlist = (char**)(kwlist_ptr.get()); - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|B", kwlist, &dumpLevel)) { - PyErr_SetString(PyExc_TypeError, "Error.GlobalSettings: invalid keyword'"); - return nullptr; - } - if (dumpLevel != -1) { - cmpc::__dumpControl = static_cast(dumpLevel); - switch (dumpLevel) { - case 0: - cmpc::av_log_set_level(AV_LOG_ERROR); - break; - case 1: - cmpc::av_log_set_level(AV_LOG_INFO); - break; - case 2: - default: - cmpc::av_log_set_level(AV_LOG_DEBUG); - break; - } - } - Py_RETURN_NONE; -} - -static PyObject* C_MPC_Help(PyObject* Self) { - cout << R"(================================================================================ - __ _ _ _ _ ,___ - ( / / / o ( / ) ) / / / - (__/ , , _, /_ _ _ _' ( / / / ,_ _ _, / __ __/ _ _ - _/_(_/_(__/ /_(/_/ / /_/_)_ / / (__/|_)_(/_(_)_(___/(_)(_/_(/_/ (_ - // /| /| - (/ (/ (/ -================================================================================ -Yuchen's Mpeg Coder - Readme - This is a mpegcoder adapted from FFmpeg & Python-c-api.Using it you could - get access to processing video easily. Just use it as a common module in - python like this. - >>> import mpegCoder - Noted that this API need you to install numpy. - An example of decoding a video in an arbitrary format: - >>> d = mpegCoder.MpegDecoder() - >>> d.FFmpegSetup(b'inputVideo.mp4') - >>> p = d.ExtractGOP(10) # Get a gop of current video by setting the - start position of 10th frame. - >>> p = d.ExtractGOP() # Get a gop of current video, using the current - position after the last ExtractGOP. - >>> d.ExtractFrame(100, 100) # Extract 100 frames from the begining of - 100th frame. - An example of transfer the coding of a video with an assigned codec: - >>> d = mpegCoder.MpegDecoder() - >>> d.FFmpegSetup(b'i.avi') - >>> e = mpegCoder.MpegEncoder() - >>> e.setParameter(decoder=d, codecName=b'libx264', videoPath=b'o.mp4') - # inherit most of parameters from the decoder. - >>> opened = e.FFmpegSetup() # Load the encoder. - >>> if opened: # If encoder is not loaded successfully, do not continue. - ... p = True - ... while p: - ... p = d.ExtractGOP() # Extract current GOP. - ... if p is not None: - ... for i in p: # Select every frame. - ... e.EncodeFrame(i) # Encode current frame. - ... e.FFmpegClose() # End encoding, and flush all frames in cache. - >>> d.clear() # Close the input video. - An example of demuxing the video streamer from a server: - >>> d = mpegCoder.MpegClient() # create the handle - >>> d.setParameter(dstFrameRate=(5,1), readSize=5, cacheSize=12) - # normalize the frame rate to 5 FPS, and use a cache which size is - # 12 frames. Read 5 frames each time. - >>> success = d.FFmpegSetup(b'rtsp://localhost:8554/video') - >>> if not success: # exit if fail to connect with the server - ... exit() - >>> d.start() # start the sub-thread for demuxing the stream. - >>> for i in range(10): # processing loop - ... time.sleep(5) - ... p = d.ExtractFrame() # every 5 seconds, read 5 frames (1 sec.) - ... # do some processing - >>> d.terminate() # shut down the current thread. You could call start() - # and let it restart. - >>> d.clear() # Disconnect with the stream. - For more instructions, you could tap help(mpegCoder). -================================================================================ -V3.2.0 update report: - 1. Upgrade FFMpeg to 5.0. - 2. Fix the const assignment bug caused by the codec configuration method. -V3.1.0 update report: - 1. Support str() type for all string arguments. - 2. Support http, ftp, sftp streams for MpegServer. - 3. Support "nthread" option for MpegDecoder, MpegEncoder, MpegClient and - MpegServer. - 4. Fix a bug caused by the constructor MpegServer(). - 5. Clean up all gcc warnings of the source codes. - 6. Fix typos in docstrings. -V3.0.0 update report: - 1. Fix a severe memory leaking bugs when using AVPacket. - 2. Fix a bug caused by using MpegClient.terminate() when a video is closed - by the server. - 3. Support the MpegServer. This class is used for serving the online video - streams. - 4. Refactor the implementation of the loggings. - 5. Add getParameter() and setParameter(configDict) APIs to MpegEncoder and - MpegServer. - 6. Move FFMpeg depedencies and the OutputStream class to the cmpc space. - 7. Fix dependency issues and cpp standard issues. - 8. Upgrade to `FFMpeg 4.4` Version. - 9. Add a quick script for fetching the `FFMpeg` dependencies. -V2.05 update report: - 1. Fix a severe bug that causes the memory leak when using MpegClient. - This bug also exists in MpegDecoder, but it seems that the bug would not cause - memory leak in that case. (Although we have also fixed it now.) - 2. Upgrade to FFMpeg 4.0 Version. -V2.01 update report: - Fix a bug that occurs when the first received frame may has a PTS larger than - zero. -V2.0 update report: - 1. Revise the bug of the encoder which may cause the stream duration is shorter - than the real duration of the video in some not advanced media players. - 2. Improve the structure of the code and remove some unnecessary codes. - 3. Provide a complete version of client, which could demux the video stream - from a server in any network protocol. -V1.8 update report: - 1. Provide options (widthDst, heightDst) to let MpegDecoder could control the - output size manually. To ensure the option is valid, we must use the method - 'setParameter' before 'FFmpegSetup'. - 2. Optimize some realization of Decoder so that its efficiency could be - improved. -V1.7 update report: - 1. Realize the encoder totally. - 2. Provide a global option 'dumpLevel' to control the log shown in the screen. - 3. Fix bugs in initalize functions. -V1.5 update report: - 1. Provide an incomplete version of encoder, which could encode frames as a - video stream that could not be played by player. -V1.4 update report: - 1. Fix a severe bug of the decoder, which causes the memory collapsed if - decoding a lot of frames. -V1.2 update report: - 1. Use numpy array to replace the native pyList, which improves the speed - significantlly. -V1.0 update report: - 1. Provide the decoder which could decode videos in arbitrary formats and - arbitrary coding. -)"; - Py_RETURN_NONE; -} - -/***************************************************************************** -* Declare the core methods of the classes. -*****************************************************************************/ -static int C_MPDC_init(C_MpegDecoder* Self, PyObject* args, PyObject* kwargs) { // Construct - PyObject* vpath = nullptr; - cmpc::CharList kwlist_str({ "videoPath" }); - auto kwlist_ptr = kwlist_str.c_str(); - auto kwlist = (char**)(kwlist_ptr.get()); - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O", kwlist, &vpath)) { - PyErr_SetString(PyExc_TypeError, "Error.Initialize: need 'videoPath(str)'"); - return -1; - } - string in_vpath; - if (!vpath) { - in_vpath.clear(); - } - else if (!PyStr2str(vpath, in_vpath)) { - return -1; - } - Self->_in_Handle = new cmpc::CMpegDecoder; - if (!in_vpath.empty()) { - Self->_in_Handle->FFmpegSetup(in_vpath); - } - - in_vpath.clear(); - //cout << sizeof(Self->_in_Handle) << " - " << sizeof(unsigned long long) << endl; - return 0; -} - -static int C_MPEC_init(C_MpegEncoder* Self) { // Construct - Self->_in_Handle = new cmpc::CMpegEncoder; - return 0; -} - -static int C_MPCT_init(C_MpegClient* Self) { // Construct - Self->_in_Handle = new cmpc::CMpegClient; - return 0; -} - -static int C_MPSV_init(C_MpegServer* Self) { // Construct - Self->_in_Handle = new cmpc::CMpegServer; - return 0; -} - -static void C_MPDC_Destruct(C_MpegDecoder* Self) { // Destructor - delete Self->_in_Handle; // Delete the allocated class implementation. - /* If there are still other members, also need to deallocate them, - * for example, Py_XDECREF(Self->Member); */ - Py_TYPE(Self)->tp_free((PyObject*)Self); // Destruct the PyObject. -} - -static void C_MPEC_Destruct(C_MpegEncoder* Self) { // Destructor - delete Self->_in_Handle; // Delete the allocated class implementation. - /* If there are still other members, also need to deallocate them, - * for example, Py_XDECREF(Self->Member); */ - Py_TYPE(Self)->tp_free((PyObject*)Self); // Destruct the PyObject. -} - -static void C_MPCT_Destruct(C_MpegClient* Self) { // Destructor - delete Self->_in_Handle; // Delete the allocated class implementation. - /* If there are still other members, also need to deallocate them, - * for example, Py_XDECREF(Self->Member); */ - Py_TYPE(Self)->tp_free((PyObject*)Self); // Destruct the PyObject. -} - -static void C_MPSV_Destruct(C_MpegServer* Self) { // Destructor - delete Self->_in_Handle; // Delete the allocated class implementation. - /* If there are still other members, also need to deallocate them, - * for example, Py_XDECREF(Self->Member); */ - Py_TYPE(Self)->tp_free((PyObject*)Self); // Destruct the PyObject. -} - -static PyObject* C_MPDC_Str(C_MpegDecoder* Self) { // The __str__ (print) operator. - ostringstream OStr; - OStr << *(Self->_in_Handle); - string Str = OStr.str(); - return str2PyStr(Str); // Convert the string to unicode wide char. -} - -static PyObject* C_MPEC_Str(C_MpegEncoder* Self) { // The __str__ (print) operator. - ostringstream OStr; - OStr << *(Self->_in_Handle); - string Str = OStr.str(); - return str2PyStr(Str); // Convert the string to unicode wide char. -} - -static PyObject* C_MPCT_Str(C_MpegClient* Self) { // The __str__ (print) operator. - ostringstream OStr; - OStr << *(Self->_in_Handle); - string Str = OStr.str(); - return str2PyStr(Str); // Convert the string to unicode wide char. -} - -static PyObject* C_MPSV_Str(C_MpegServer* Self) { // The __str__ (print) operator. - ostringstream OStr; - OStr << *(Self->_in_Handle); - string Str = OStr.str(); - return str2PyStr(Str); // Convert the string to unicode wide char. -} - -static PyObject* C_MPDC_Repr(C_MpegDecoder* Self) { // The __repr__ operator. - return C_MPDC_Str(Self); -} - -static PyObject* C_MPEC_Repr(C_MpegEncoder* Self) { // The __repr__ operator. - return C_MPEC_Str(Self); -} - -static PyObject* C_MPCT_Repr(C_MpegClient* Self) { // The __repr__ operator. - return C_MPCT_Str(Self); -} - -static PyObject* C_MPSV_Repr(C_MpegServer* Self) { // The __repr__ operator. - return C_MPSV_Str(Self); -} - -/***************************************************************************** -* Define the Python-C-APIs for . -* C_MPDC_Setup: Configure the decoder by the video. -* C_MPDC_ExtractFrame Extract serveral frames. -*****************************************************************************/ -static PyObject* C_MPDC_Setup(C_MpegDecoder* Self, PyObject* args, PyObject* kwargs) { - /* Wrapped (bool)C_MPDC_Setup method, the inputs are: - * videoPath [str/bytes->str]: the video path to be decoded. - */ - PyObject* vpath = nullptr; - cmpc::CharList kwlist_str({ "videoPath" }); - auto kwlist_ptr = kwlist_str.c_str(); - auto kwlist = (char**)(kwlist_ptr.get()); - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O", kwlist, &vpath)) { - PyErr_SetString(PyExc_TypeError, "Error.FFmpegSetup: need 'videoPath(str)'"); - return nullptr; - } - string in_vpath; - if (!vpath) { - in_vpath.clear(); - } - else if (!PyStr2str(vpath, in_vpath)) { - return nullptr; - } - bool res; - if (!in_vpath.empty()) - res = Self->_in_Handle->FFmpegSetup(in_vpath); - else - res = Self->_in_Handle->FFmpegSetup(); - - in_vpath.clear(); - if (res) - Py_RETURN_TRUE; - else - Py_RETURN_FALSE; -} - -static PyObject* C_MPEC_Setup(C_MpegEncoder* Self, PyObject* args, PyObject* kwargs) { - /* Wrapped (bool)C_MPEC_Setup method, the inputs are: - * videoPath [str/bytes->str]: the video path to be encoded. - */ - PyObject* vpath = nullptr; - cmpc::CharList kwlist_str({ "videoPath" }); - auto kwlist_ptr = kwlist_str.c_str(); - auto kwlist = (char**)(kwlist_ptr.get()); - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O", kwlist, &vpath)) { - PyErr_SetString(PyExc_TypeError, "Error.FFmpegSetup: need 'videoPath(str)'"); - return nullptr; - } - string in_vpath; - if (!vpath) { - in_vpath.clear(); - } - else if (!PyStr2str(vpath, in_vpath)) { - return nullptr; - } - bool res; - if (!in_vpath.empty()) - res = Self->_in_Handle->FFmpegSetup(in_vpath); - else - res = Self->_in_Handle->FFmpegSetup(); - - in_vpath.clear(); - if (res) - Py_RETURN_TRUE; - else - Py_RETURN_FALSE; -} - -static PyObject* C_MPCT_Setup(C_MpegClient* Self, PyObject* args, PyObject* kwargs) { - /* Wrapped (bool)C_MPCT_Setup method, the inputs are: - * videoAddress [str/bytes->str]: the video path to be demuxed. - */ - PyObject* vpath = nullptr; - cmpc::CharList kwlist_str({ "videoAddress" }); - auto kwlist_ptr = kwlist_str.c_str(); - auto kwlist = (char**)(kwlist_ptr.get()); - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O", kwlist, &vpath)) { - PyErr_SetString(PyExc_TypeError, "Error.FFmpegSetup: need 'videoAddress(str)'"); - return nullptr; - } - string in_vpath; - if (!vpath) { - in_vpath.clear(); - } - else if (!PyStr2str(vpath, in_vpath)) { - return nullptr; - } - bool res; - if (!in_vpath.empty()) - res = Self->_in_Handle->FFmpegSetup(in_vpath); - else - res = Self->_in_Handle->FFmpegSetup(); - - in_vpath.clear(); - if (res) - Py_RETURN_TRUE; - else - Py_RETURN_FALSE; -} - -static PyObject* C_MPSV_Setup(C_MpegServer* Self, PyObject* args, PyObject* kwargs) { - /* Wrapped (bool)C_MPSV_Setup method, the inputs are: - * videoAddress [str/bytes->str]: the video address to be served. - */ - PyObject* vpath = nullptr; - cmpc::CharList kwlist_str({ "videoAddress" }); - auto kwlist_ptr = kwlist_str.c_str(); - auto kwlist = (char**)(kwlist_ptr.get()); - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O", kwlist, &vpath)) { - PyErr_SetString(PyExc_TypeError, "Error.FFmpegSetup: need 'videoAddress(str)'"); - return nullptr; - } - string in_vpath; - if (!vpath) { - in_vpath.clear(); - } - else if (!PyStr2str(vpath, in_vpath)) { - return nullptr; - } - bool res; - if (!in_vpath.empty()) - res = Self->_in_Handle->FFmpegSetup(in_vpath); - else - res = Self->_in_Handle->FFmpegSetup(); - - in_vpath.clear(); - if (res) - Py_RETURN_TRUE; - else - Py_RETURN_FALSE; -} - -static PyObject* C_MPDC_resetPath(C_MpegDecoder* Self, PyObject* args, PyObject* kwargs) { - /* Wrapped (bool)C_MPDC_resetPath method, the inputs are: - * videoPath [str/bytes->str]: the video path to be decoded. - */ - PyObject* vpath = nullptr; - cmpc::CharList kwlist_str({ "videoPath" }); - auto kwlist_ptr = kwlist_str.c_str(); - auto kwlist = (char**)(kwlist_ptr.get()); - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O", kwlist, &vpath)) { - PyErr_SetString(PyExc_TypeError, "Error.FFmpegSetup: need 'videoPath(str)'"); - return nullptr; - } - string in_vpath; - if (!PyStr2str(vpath, in_vpath)) { - return nullptr; - } - Self->_in_Handle->resetPath(in_vpath); - - in_vpath.clear(); - Py_RETURN_NONE; -} - -static PyObject* C_MPEC_resetPath(C_MpegEncoder* Self, PyObject* args, PyObject* kwargs) { - /* Wrapped (bool)C_MPEC_resetPath method, the inputs are: - * videoPath [str/bytes->str]: the video path to be encoded. - */ - PyObject* vpath = nullptr; - cmpc::CharList kwlist_str({ "videoPath" }); - auto kwlist_ptr = kwlist_str.c_str(); - auto kwlist = (char**)(kwlist_ptr.get()); - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O", kwlist, &vpath)) { - PyErr_SetString(PyExc_TypeError, "Error.FFmpegSetup: need 'videoPath(str)'"); - return nullptr; - } - string in_vpath; - if (!PyStr2str(vpath, in_vpath)) { - return nullptr; - } - Self->_in_Handle->resetPath(in_vpath); - - in_vpath.clear(); - Py_RETURN_NONE; -} - -static PyObject* C_MPCT_resetPath(C_MpegClient* Self, PyObject* args, PyObject* kwargs) { - /* Wrapped (bool)C_MPCT_resetPath method, the inputs are: - * videoAddress [str/bytes->str]: the video path to be demuxed. - */ - PyObject* vpath = nullptr; - cmpc::CharList kwlist_str({ "videoAddress" }); - auto kwlist_ptr = kwlist_str.c_str(); - auto kwlist = (char**)(kwlist_ptr.get()); - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O", kwlist, &vpath)) { - PyErr_SetString(PyExc_TypeError, "Error.FFmpegSetup: need 'videoAddress(str)'"); - return nullptr; - } - string in_vpath; - if (!PyStr2str(vpath, in_vpath)) { - return nullptr; - } - Self->_in_Handle->resetPath(in_vpath); - - in_vpath.clear(); - Py_RETURN_NONE; -} - -static PyObject* C_MPSV_resetPath(C_MpegServer* Self, PyObject* args, PyObject* kwargs) { - /* Wrapped (bool)C_MPSV_resetPath method, the inputs are: - * videoAddress [str/bytes->str]: the video address to be served. - */ - PyObject* vpath = nullptr; - cmpc::CharList kwlist_str({ "videoAddress" }); - auto kwlist_ptr = kwlist_str.c_str(); - auto kwlist = (char**)(kwlist_ptr.get()); - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O", kwlist, &vpath)) { - PyErr_SetString(PyExc_TypeError, "Error.FFmpegSetup: need 'videoAddress(str)'"); - return nullptr; - } - string in_vpath; - if (!PyStr2str(vpath, in_vpath)) { - return nullptr; - } - Self->_in_Handle->resetPath(in_vpath); - - in_vpath.clear(); - Py_RETURN_NONE; -} - -static PyObject* C_MPCT_Start(C_MpegClient* Self) { - /* Wrapped (void)Start method, the input is required to be empty. */ - auto success = Self->_in_Handle->start(); - if (!success) { - PyErr_SetString(PyExc_ConnectionError, "Error.Start: before call this method, need to call FFmpegSetup() successfully, and also you should not call it when the decoding thread is running.'"); - return nullptr; - } - Py_RETURN_NONE; -} - -static PyObject* C_MPCT_Terminate(C_MpegClient* Self) { - /* Wrapped (void)Terminate method, the input is required to be empty. */ - Self->_in_Handle->terminate(); - Py_RETURN_NONE; -} - -/* Pay attention to the following two methods : - * Why do we remove the Py_IN/DECREF? - * Because no temp variables are created, so we do not need to manage them, - * but just use None as the returned value. */ -static PyObject* FreePyArray(PyArrayObject* PyArray) { - uint8_t* out_dataptr = (uint8_t*)PyArray_DATA(PyArray); - delete[] out_dataptr; - return nullptr; -} -void FreePyList(PyObject* PyList) { - Py_ssize_t getlen = PyList_Size(PyList); - for (Py_ssize_t i = 0; i < getlen; i++) { - PyObject* Item = PyList_GetItem(PyList, i); - FreePyArray((PyArrayObject*)Item); - } - Py_DECREF(PyList); - PyGC_Collect(); -} - -static PyObject* C_MPDC_ExtractFrame(C_MpegDecoder* Self, PyObject* args, PyObject* kwargs) { - /* Wrapped (int)ExtractFrame method, the inputs are: - * framePos [int->int64_t]: the start position of the extracted frames. - * frameNum [int->int64_t]: the number of extracted frames. - */ - int64_t framePos = 0, frameNum = 1; - cmpc::CharList kwlist_str({ "framePos", "frameNum" }); - auto kwlist_ptr = kwlist_str.c_str(); - auto kwlist = (char**)(kwlist_ptr.get()); - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|LL", kwlist, &framePos, &frameNum)) { - PyErr_SetString(PyExc_TypeError, "Error.ExtractFrame: need 'framePos(int)/frameNum(int)'"); - return nullptr; - } - PyObject* PyFrameList = PyList_New(static_cast(0)); - //cout << framePos << " - " << frameNum << endl; - bool res = Self->_in_Handle->ExtractFrame(PyFrameList, framePos, frameNum, 0, 0); - Py_ssize_t getlen = PyList_Size(PyFrameList); - res = res && (getlen > 0); - if (res) { - PyObject* PyFrameArray = PyArray_FromObject(PyFrameList, NPY_UINT8, 4, 4); - FreePyList(PyFrameList); - return PyFrameArray; - } - else { - Py_DECREF(PyFrameList); - PyGC_Collect(); - Py_RETURN_NONE; - } -} - -static PyObject* C_MPDC_ExtractFrame_Time(C_MpegDecoder* Self, PyObject* args, PyObject* kwargs) { - /* Wrapped (int)ExtractFrame method, the inputs are: - * timePos [float->double]: the start position (time unit) of the extracted frames. - * frameNum [int->int64_t]: the number of extracted frames. - */ - double timePos = 0; - int64_t frameNum = 1; - cmpc::CharList kwlist_str({ "timePos", "frameNum" }); - auto kwlist_ptr = kwlist_str.c_str(); - auto kwlist = (char**)(kwlist_ptr.get()); - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|dL", kwlist, &timePos, &frameNum)) { - PyErr_SetString(PyExc_TypeError, "Error.ExtractFrame_Time: need 'timePos(float)/frameNum(int)'"); - return nullptr; - } - PyObject* PyFrameList = PyList_New(static_cast(0)); - //cout << framePos << " - " << frameNum << endl; - bool res = Self->_in_Handle->ExtractFrame(PyFrameList, 0, frameNum, timePos, 1); - Py_ssize_t getlen = PyList_Size(PyFrameList); - res = res && (getlen > 0); - if (res) { - PyObject* PyFrameArray = PyArray_FromObject(PyFrameList, NPY_UINT8, 4, 4); - FreePyList(PyFrameList); - return PyFrameArray; - } - else { - Py_DECREF(PyFrameList); - PyGC_Collect(); - Py_RETURN_NONE; - } -} - -static PyObject* C_MPEC_EncodeFrame(C_MpegEncoder* Self, PyObject* args, PyObject* kwargs) { - /* Wrapped (bool)EncodeFrame method, the inputs are: - * PyArrayFrame [ndarray->PyArrayObject]: the frame to be encoded. - */ - PyObject* PyArrayFrame = nullptr; - cmpc::CharList kwlist_str({ "PyArrayFrame" }); - auto kwlist_ptr = kwlist_str.c_str(); - auto kwlist = (char**)(kwlist_ptr.get()); - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O", kwlist, &PyArrayFrame)) { - PyErr_SetString(PyExc_TypeError, "Error.EncodeFrame: need 'PyArrayFrame(ndarray)'"); - return nullptr; - } - int res = Self->_in_Handle->EncodeFrame(reinterpret_cast(PyArrayFrame)); - if (res >= 0) - Py_RETURN_TRUE; - else - Py_RETURN_FALSE; -} - -static PyObject* C_MPSV_ServeFrame(C_MpegServer* Self, PyObject* args, PyObject* kwargs) { - /* Wrapped (bool)ServeFrame method, the inputs are: - * PyArrayFrame [ndarray->PyArrayObject]: the frame to be encoded and served. - */ - PyObject* PyArrayFrame = nullptr; - cmpc::CharList kwlist_str({ "PyArrayFrame" }); - auto kwlist_ptr = kwlist_str.c_str(); - auto kwlist = (char**)(kwlist_ptr.get()); - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O", kwlist, &PyArrayFrame)) { - PyErr_SetString(PyExc_TypeError, "Error.EncodeFrame: need 'PyArrayFrame(ndarray)'"); - return nullptr; - } - int res = Self->_in_Handle->ServeFrame(reinterpret_cast(PyArrayFrame)); - if (res >= 0) - Py_RETURN_TRUE; - else - Py_RETURN_FALSE; -} - -static PyObject* C_MPSV_ServeFrameBlock(C_MpegServer* Self, PyObject* args, PyObject* kwargs) { - /* Wrapped (bool)ServeFrameBlock method, the inputs are: - * PyArrayFrame [ndarray->PyArrayObject]: the frame to be encoded and served. - */ - PyObject* PyArrayFrame = nullptr; - cmpc::CharList kwlist_str({ "PyArrayFrame" }); - auto kwlist_ptr = kwlist_str.c_str(); - auto kwlist = (char**)(kwlist_ptr.get()); - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O", kwlist, &PyArrayFrame)) { - PyErr_SetString(PyExc_TypeError, "Error.EncodeFrame: need 'PyArrayFrame(ndarray)'"); - return nullptr; - } - int res = Self->_in_Handle->ServeFrameBlock(reinterpret_cast(PyArrayFrame)); - if (res >= 0) - Py_RETURN_TRUE; - else - Py_RETURN_FALSE; -} - -static PyObject* C_MPCT_ExtractFrame(C_MpegClient* Self, PyObject* args, PyObject* kwargs) { - /* Wrapped (int)ExtractFrame method, the inputs are: - * readSize [int->int64_t]: the number of frames to be readed. This value could not - * exceeded the size of the frame buffer. - */ - int64_t readSize = 0; - cmpc::CharList kwlist_str({ "readSize" }); - auto kwlist_ptr = kwlist_str.c_str(); - auto kwlist = (char**)(kwlist_ptr.get()); - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|L", kwlist, &readSize)) { - PyErr_SetString(PyExc_TypeError, "Error.ExtractFrame: need 'readSize(int)'"); - return nullptr; - } - PyObject* res = nullptr; - if (readSize > 0) - res = Self->_in_Handle->ExtractFrame(readSize); - else - res = Self->_in_Handle->ExtractFrame(); - if (res) { - return res; - } - else { - Py_RETURN_NONE; - } -} - -static PyObject* C_MPDC_ExtractGOP(C_MpegDecoder* Self, PyObject* args, PyObject* kwargs) { - /* Wrapped (int)ExtractGOP method, the inputs are: - * framePos [int->int64_t]: the start position of the GOP to be extracted. - */ - int64_t framePos = -1; - cmpc::CharList kwlist_str({ "framePos" }); - auto kwlist_ptr = kwlist_str.c_str(); - auto kwlist = (char**)(kwlist_ptr.get()); - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|L", kwlist, &framePos)) { - PyErr_SetString(PyExc_TypeError, "Error.ExtractGOP: need 'framePos(int)'"); - return nullptr; - } - PyObject* PyFrameList = PyList_New(static_cast(0)); - //cout << framePos << " - " << frameNum << endl; - if (!(framePos < 0)) - Self->_in_Handle->setGOPPosition(framePos); - bool res = Self->_in_Handle->ExtractGOP(PyFrameList); - Py_ssize_t getlen = PyList_Size(PyFrameList); - res = res && (getlen > 0); - if (res) { - PyObject* PyFrameArray = PyArray_FromObject(PyFrameList, NPY_UINT8, 4, 4); - FreePyList(PyFrameList); - return PyFrameArray; - } - else { - Py_DECREF(PyFrameList); - PyGC_Collect(); - Py_RETURN_NONE; - } -} - -static PyObject* C_MPDC_ExtractGOP_Time(C_MpegDecoder* Self, PyObject* args, PyObject* kwargs) { - /* Wrapped (int)ExtractGOP_Time method, the inputs are: - * timePos [float->double]: the start position (time unit) of the GOP to be extracted. - */ - double timePos = -1; - cmpc::CharList kwlist_str({ "timePos" }); - auto kwlist_ptr = kwlist_str.c_str(); - auto kwlist = (char**)(kwlist_ptr.get()); - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|d", kwlist, &timePos)) { - PyErr_SetString(PyExc_TypeError, "Error.ExtractGOP_Time: need 'timePos(float)'"); - return nullptr; - } - PyObject* PyFrameList = PyList_New(static_cast(0)); - //cout << framePos << " - " << frameNum << endl; - if (!(timePos < 0)) - Self->_in_Handle->setGOPPosition(timePos); - bool res = Self->_in_Handle->ExtractGOP(PyFrameList); - Py_ssize_t getlen = PyList_Size(PyFrameList); - res = res && (getlen > 0); - if (res) { - PyObject* PyFrameArray = PyArray_FromObject(PyFrameList, NPY_UINT8, 4, 4); - FreePyList(PyFrameList); - return PyFrameArray; - } - else { - Py_DECREF(PyFrameList); - PyGC_Collect(); - Py_RETURN_NONE; - } -} - -static PyObject* C_MPDC_setGOPPosition(C_MpegDecoder* Self, PyObject* args, PyObject* kwargs) { - /* Wrapped (void)setGOPPosition method, the inputs are: - * framePos [int->int64_t]: the start position of the GOP to be extracted. - * timePos [float->double]: the start position (time unit) of the GOP to be extracted. - */ - int64_t framePos = -1; - double timePos = -1; - cmpc::CharList kwlist_str({ "framePos", "timePos" }); - auto kwlist_ptr = kwlist_str.c_str(); - auto kwlist = (char**)(kwlist_ptr.get()); - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|Ld", kwlist, &framePos, &timePos)) { - PyErr_SetString(PyExc_TypeError, "Error.setGOPPosition: need 'framePos(int)'/'timePos(float)'"); - return nullptr; - } - if (!(framePos < 0)) - Self->_in_Handle->setGOPPosition(framePos); - else if (!(timePos < 0)) - Self->_in_Handle->setGOPPosition(timePos); - Py_RETURN_NONE; -} - -static PyObject* C_MPDC_getParam(C_MpegDecoder* Self, PyObject* args, PyObject* kwargs) { - /* Wrapped (bool)C_MPDC_getParam function, the inputs are: - * paramName [str/bytes->str]: The name of the parameter to be gotten, could be. - * videoPath: [str] Path of the current video. - * width/height: [int] The width / height of the frame. - * frameCount: [int] The count of frames of the current decoding work. - * coderName: [str] The name of the decoder. - * nthread: [int] The number of decoder threads. - * duration: [float] The duration of the video. - * estFrameNum: [int] The estimated total frame number. - * avgFrameRate [float] The average frame rate. - */ - PyObject* param = nullptr; - cmpc::CharList kwlist_str({ "paramName" }); - auto kwlist_ptr = kwlist_str.c_str(); - auto kwlist = (char**)(kwlist_ptr.get()); - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O", kwlist, ¶m)) { - PyErr_SetString(PyExc_TypeError, "Error.getParameter: need 'paramName(str)'"); - return nullptr; - } - string in_param; - if (!param) { - in_param.clear(); - } - else if (!PyStr2str(param, in_param)) { - return nullptr; - } - PyObject* res = nullptr; - if (in_param.empty()) { - res = Self->_in_Handle->getParameter(); - } - else { - res = Self->_in_Handle->getParameter(in_param); - } - in_param.clear(); - return res; -} - -static PyObject* C_MPEC_getParam(C_MpegEncoder* Self, PyObject* args, PyObject* kwargs) { - /* Wrapped (bool)C_MPEC_getParam function, the inputs are: - * paramName [str/bytes->str]: The name of the parameter to be gotten, could be. - * videoPath: [str] Path of the current video. - * codecName: [str] The name of the codec. - * nthread: [int] The number of encoder threads. - * bitRate: [int] The target bit rate. - * width/height: [int] The width / height of the encoded frame. - * widthSrc/heightSrc: [int] The width / height of the input frame. - * GOPSize: [int] The size of one GOP. - * maxBframe: [int] The maximal number of continuous B frames. - * frameRate: [float] The target frame rate. - */ - PyObject* param = nullptr; - cmpc::CharList kwlist_str({ "paramName" }); - auto kwlist_ptr = kwlist_str.c_str(); - auto kwlist = (char**)(kwlist_ptr.get()); - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O", kwlist, ¶m)) { - PyErr_SetString(PyExc_TypeError, "Error.getParameter: need 'paramName(str)'"); - return nullptr; - } - string in_param; - if (!param) { - in_param.clear(); - } - else if (!PyStr2str(param, in_param)) { - return nullptr; - } - PyObject* res = nullptr; - if (in_param.empty()) { - res = Self->_in_Handle->getParameter(); - } - else { - res = Self->_in_Handle->getParameter(in_param); - } - in_param.clear(); - return res; -} - -static PyObject* C_MPCT_getParam(C_MpegClient* Self, PyObject* args, PyObject* kwargs) { - /* Wrapped (bool)C_MPCT_getParam method, the inputs are: - * parameter [str/bytes->str]: The name of the parameter to be gotten, could be. - * videoAddress: [str] The address of the current video. - * width/height: [int] The width / height of the received frame. - * frameCount: [int] The count of frames of the current decoding work. - * coderName: [str] The name of the decoder. - * nthread: [int] The number of decoder threads. - * duration: [float] The duration of the video. - * estFrameNum: [int] The estimated total frame number. - * avgFrameRate [float] The average frame rate. - */ - PyObject* param = nullptr; - cmpc::CharList kwlist_str({ "paramName" }); - auto kwlist_ptr = kwlist_str.c_str(); - auto kwlist = (char**)(kwlist_ptr.get()); - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O", kwlist, ¶m)) { - PyErr_SetString(PyExc_TypeError, "Error.getParameter: need 'paramName(str)'"); - return nullptr; - } - string in_param; - if (!param) { - in_param.clear(); - } - else if (!PyStr2str(param, in_param)) { - return nullptr; - } - PyObject* res = nullptr; - if (in_param.empty()) { - res = Self->_in_Handle->getParameter(); - } - else { - res = Self->_in_Handle->getParameter(in_param); - } - in_param.clear(); - return res; -} - -static PyObject* C_MPSV_getParam(C_MpegServer* Self, PyObject* args, PyObject* kwargs) { - /* Wrapped (bool)C_MPSV_getParam function, the inputs are: - * paramName [str/bytes->str]: The name of the parameter to be gotten, could be. - * videoAddress: [str] The address of the current video. - * codecName: [str] The name of the codec. - * formatName: [str] The name of the stream format. - * nthread: [int] The number of encoder threads. - * bitRate: [int] The target bit rate. - * width/height: [int] The width / height of the encoded frame. - * widthSrc/heightSrc: [int] The width / height of the input frame. - * GOPSize: [int] The size of one GOP. - * maxBframe: [int] The maximal number of continuous B frames. - * frameRate: [float] The target frame rate. - * waitRef [float] The reference used for sync. waiting. - * ptsAhead [int] The ahead time duration in the uit of time stamp. - */ - PyObject* param = nullptr; - cmpc::CharList kwlist_str({ "paramName" }); - auto kwlist_ptr = kwlist_str.c_str(); - auto kwlist = (char**)(kwlist_ptr.get()); - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O", kwlist, ¶m)) { - PyErr_SetString(PyExc_TypeError, "Error.getParameter: need 'paramName(str)'"); - return nullptr; - } - string in_param; - if (!param) { - in_param.clear(); - } - else if (!PyStr2str(param, in_param)) { - return nullptr; - } - PyObject* res = nullptr; - if (in_param.empty()) { - res = Self->_in_Handle->getParameter(); - } - else { - res = Self->_in_Handle->getParameter(in_param); - } - in_param.clear(); - return res; -} - -static PyObject* C_MPDC_setParam(C_MpegDecoder* Self, PyObject* args, PyObject* kwargs) { - /* Wrapped (void)C_MPDC_setParam method, the inputs are: - * widthDst/heightDst: [int] The width / height of the decoded frames. - * nthread: [int] The number of decoder threads. - */ - int widthDst = 0; - int heightDst = 0; - int nthread = 0; - cmpc::CharList kwlist_str({ "widthDst", "heightDst", "nthread" }); - auto kwlist_ptr = kwlist_str.c_str(); - auto kwlist = (char**)(kwlist_ptr.get()); - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|iii", kwlist, &widthDst, &heightDst, &nthread)) { - PyErr_SetString(PyExc_TypeError, "Error.FFmpegSetup: need 'params'"); - return nullptr; - } - if (widthDst > 0) { - Self->_in_Handle->setParameter("widthDst", &widthDst); - } - if (heightDst > 0) { - Self->_in_Handle->setParameter("heightDst", &heightDst); - } - if (nthread > 0) { - Self->_in_Handle->setParameter("nthread", &nthread); - } - Py_RETURN_NONE; -} - -static PyObject* C_MPEC_setParam(C_MpegEncoder* Self, PyObject* args, PyObject* kwargs) { - /* Wrapped (bool)C_MPEC_setParam method, the inputs are: - * decoder: [MpegDecoder / MpegClient]: The parameters to be configured. - * configDict: [dict] A collection of key params. - * videoPath: [str/bytes] Path of the current video. - * codecName: [str/bytes] The name of the codec. - * nthread: [int] The number of encoder threads. - * bitRate: [double] The target bit rate. - * width/height: [int] The width / height of the encoded frame. - * widthSrc/heightSrc: [int] The width / height of the input frame. - * GOPSize: [int] The size of one GOP. - * maxBframe: [int] The maximal number of continuous B frames. - * frameRate: [tuple] The target frame rate. - */ - PyObject* decoder = nullptr; - PyObject* configDict = nullptr; - PyObject* videoPath = nullptr; - PyObject* codecName = nullptr; - double bitRate = -1; - int nthread = 0; - int width = 0; - int height = 0; - int widthSrc = 0; - int heightSrc = 0; - int GOPSize = 0; - int MaxBframe = -1; - PyObject* frameRate = nullptr; - cmpc::CharList kwlist_str({ "decoder", "configDict", "videoPath", "codecName", "nthread", "bitRate", "width", "height", "widthSrc", "heightSrc", "GOPSize", "maxBframe", "frameRate" }); - auto kwlist_ptr = kwlist_str.c_str(); - auto kwlist = (char**)(kwlist_ptr.get()); - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOOOidiiiiiiO", kwlist, &decoder, &configDict, &videoPath, &codecName, &nthread, &bitRate, &width, &height, &widthSrc, &heightSrc, &GOPSize, &MaxBframe, &frameRate)) { - PyErr_SetString(PyExc_TypeError, "Error.setParameter: need 'params'"); - return nullptr; - } - string temp_str; - if (decoder) { - temp_str.assign(decoder->ob_type->tp_name); - if (temp_str.compare("mpegCoder.MpegDecoder") == 0) { - auto decoderPtr = reinterpret_cast(decoder); - Self->_in_Handle->setParameter("decoder", decoderPtr->_in_Handle); - } - else if (temp_str.compare("mpegCoder.MpegClient") == 0) { - auto decoderPtr = reinterpret_cast(decoder); - Self->_in_Handle->setParameter("client", decoderPtr->_in_Handle); - } - else { - cerr << "Warning.setParameter: Not intended decoder type, no valid update in this step." << endl; - } - } - else if (configDict) { - if (PyDict_Check(configDict)) { - Self->_in_Handle->setParameter("configDict", configDict); - } - else { - cerr << "Warning.setParameter: Not intended configDict type (require to be a dict), no valid update in this step." << endl; - } - } - if (videoPath) { - if (PyStr2str(videoPath, temp_str)) { - Self->_in_Handle->setParameter("videoPath", &temp_str); - } - else { - return nullptr; - } - } - if (codecName) { - if (PyStr2str(codecName, temp_str)) { - Self->_in_Handle->setParameter("codecName", &temp_str); - } - else { - return nullptr; - } - } - if (nthread > 0) { - Self->_in_Handle->setParameter("nthread", &nthread); - } - if (bitRate > 0) { - Self->_in_Handle->setParameter("bitRate", &bitRate); - } - if (width > 0) { - Self->_in_Handle->setParameter("width", &width); - } - if (height > 0) { - Self->_in_Handle->setParameter("height", &height); - } - if (widthSrc > 0) { - Self->_in_Handle->setParameter("widthSrc", &widthSrc); - } - if (heightSrc > 0) { - Self->_in_Handle->setParameter("heightSrc", &heightSrc); - } - if (GOPSize > 0) { - Self->_in_Handle->setParameter("GOPSize", &GOPSize); - } - if (MaxBframe >= 0) { - Self->_in_Handle->setParameter("maxBframe", &MaxBframe); - } - if (frameRate) { - if (PyTuple_Check(frameRate) && PyTuple_Size(frameRate) == 2) { - Self->_in_Handle->setParameter("frameRate", frameRate); - } - else { - cerr << "Warning.setParameter: {frameRate} must be a 2-dim tuple, so there is no valid update in this step." << endl; - } - } - temp_str.clear(); - Py_RETURN_NONE; -} - -static PyObject* C_MPCT_setParam(C_MpegClient* Self, PyObject* args, PyObject* kwargs) { - /* Wrapped (void)C_MPCT_setParam method, the inputs are: - * widthDst/heightDst: [int] The width / height of the decoded frames. - * cacheSize/readSize: [int] The size of the cache, and the reading size. - * dstFrameRate: [tuple] The target frame rate of the client. - * nthread: [int] The number of decoder threads. - */ - int widthDst = 0; - int heightDst = 0; - int nthread = 0; - int64_t cacheSize = 0; - int64_t readSize = 0; - PyObject* frameRate = nullptr; - cmpc::CharList kwlist_str({ "widthDst", "heightDst", "cacheSize", "readSize", "dstFrameRate", "nthread" }); - auto kwlist_ptr = kwlist_str.c_str(); - auto kwlist = (char**)(kwlist_ptr.get()); - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|iiLLOi", kwlist, &widthDst, &heightDst, &cacheSize, &readSize, &frameRate, &nthread)) { - PyErr_SetString(PyExc_TypeError, "Error.FFmpegSetup: need 'params'"); - return nullptr; - } - if (widthDst > 0) { - Self->_in_Handle->setParameter("widthDst", &widthDst); - } - if (heightDst > 0) { - Self->_in_Handle->setParameter("heightDst", &heightDst); - } - if (cacheSize > 0) { - Self->_in_Handle->setParameter("cacheSize", &cacheSize); - } - if (readSize > 0) { - Self->_in_Handle->setParameter("readSize", &readSize); - } - if (frameRate) { - if (PyTuple_Check(frameRate) && PyTuple_Size(frameRate) == 2) { - Self->_in_Handle->setParameter("dstFrameRate", frameRate); - } - else { - cerr << "Warning.setParameter: {dstFrameRate} must be a 2-dim tuple, so there is no valid update in this step." << endl; - } - } - if (nthread > 0) { - Self->_in_Handle->setParameter("nthread", &nthread); - } - Py_RETURN_NONE; -} - -static PyObject* C_MPSV_setParam(C_MpegServer* Self, PyObject* args, PyObject* kwargs) { - /* Wrapped (bool)C_MPSV_setParam method, the inputs are: - * decoder [MpegDecoder / MpegClient]: The parameters to be configured. - * videoAddress: [str/bytes] The address of the current video. - * codecName: [str/bytes] The name of the codec. - * nthread: [int] The number of encoder threads. - * bitRate: [double] The target bit rate. - * width/height: [int] The width / height of the encoded frame. - * widthSrc/heightSrc: [int] The width / height of the input frame. - * GOPSize: [int] The size of one GOP. - * maxBframe: [int] The maximal number of continuous B frames. - * frameRate: [tuple] The target frame rate. - * frameAhead [int] The number of ahead frames. This value is suggested - * to be larger than the GOPSize. - */ - PyObject* decoder = nullptr; - PyObject* configDict = nullptr; - PyObject* videoAddress = nullptr; - PyObject* codecName = nullptr; - double bitRate = -1; - int nthread = 0; - int width = 0; - int height = 0; - int widthSrc = 0; - int heightSrc = 0; - int GOPSize = 0; - int MaxBframe = -1; - int frameAhead = 0; - PyObject* frameRate = nullptr; - cmpc::CharList kwlist_str({ "decoder", "configDict", "videoAddress", "codecName", "nthread", "bitRate", "width", "height", "widthSrc", "heightSrc", "GOPSize", "maxBframe", "frameRate", "frameAhead" }); - auto kwlist_ptr = kwlist_str.c_str(); - auto kwlist = (char**)(kwlist_ptr.get()); - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOOOidiiiiiiOi", kwlist, &decoder, &configDict, &videoAddress, &codecName, &nthread, &bitRate, &width, &height, &widthSrc, &heightSrc, &GOPSize, &MaxBframe, &frameRate, &frameAhead)) { - PyErr_SetString(PyExc_TypeError, "Error.setParameter: need 'params'"); - return nullptr; - } - string temp_str; - if (decoder) { - temp_str.assign(decoder->ob_type->tp_name); - if (temp_str.compare("mpegCoder.MpegDecoder") == 0) { - auto decoderPtr = reinterpret_cast(decoder); - Self->_in_Handle->setParameter("decoder", decoderPtr->_in_Handle); - } - else if (temp_str.compare("mpegCoder.MpegClient") == 0) { - auto decoderPtr = reinterpret_cast(decoder); - Self->_in_Handle->setParameter("client", decoderPtr->_in_Handle); - } - else { - cerr << "Warning.setParameter: Not intended decoder type, no valid update in this step." << endl; - } - } - else if (configDict) { - if (PyDict_Check(configDict)) { - Self->_in_Handle->setParameter("configDict", configDict); - } - else { - cerr << "Warning.setParameter: Not intended configDict type (require to be a dict), no valid update in this step." << endl; - } - } - if (videoAddress) { - if (PyStr2str(videoAddress, temp_str)) { - Self->_in_Handle->setParameter("videoAddress", &temp_str); - } - else { - return nullptr; - } - } - if (codecName) { - if (PyStr2str(codecName, temp_str)) { - Self->_in_Handle->setParameter("codecName", &temp_str); - } - else { - return nullptr; - } - } - if (nthread > 0) { - Self->_in_Handle->setParameter("nthread", &nthread); - } - if (bitRate > 0) { - Self->_in_Handle->setParameter("bitRate", &bitRate); - } - if (width > 0) { - Self->_in_Handle->setParameter("width", &width); - } - if (height > 0) { - Self->_in_Handle->setParameter("height", &height); - } - if (widthSrc > 0) { - Self->_in_Handle->setParameter("widthSrc", &widthSrc); - } - if (heightSrc > 0) { - Self->_in_Handle->setParameter("heightSrc", &heightSrc); - } - if (GOPSize > 0) { - Self->_in_Handle->setParameter("GOPSize", &GOPSize); - } - if (MaxBframe >= 0) { - Self->_in_Handle->setParameter("maxBframe", &MaxBframe); - } - if (frameRate) { - if (PyTuple_Check(frameRate) && PyTuple_Size(frameRate) == 2) { - Self->_in_Handle->setParameter("frameRate", frameRate); - } - else { - cerr << "Warning.setParameter: {frameRate} must be a 2-dim tuple, so there is no valid update in this step." << endl; - } - } - if (frameAhead > 0) { - Self->_in_Handle->setParameter("frameAhead", &frameAhead); - } - temp_str.clear(); - Py_RETURN_NONE; -} - -static PyObject* C_MPDC_DumpFile(C_MpegDecoder* Self) { - /* Wrapped (void)dumpFormat method, the input is required to be empty. */ - Self->_in_Handle->dumpFormat(); - Py_RETURN_NONE; -} - -static PyObject* C_MPEC_DumpFile(C_MpegEncoder* Self) { - /* Wrapped (void)dumpFormat method, the input is required to be empty. */ - Self->_in_Handle->dumpFormat(); - Py_RETURN_NONE; -} - -static PyObject* C_MPCT_DumpFile(C_MpegClient* Self) { - /* Wrapped (void)dumpFormat method, the input is required to be empty. */ - Self->_in_Handle->dumpFormat(); - Py_RETURN_NONE; -} - -static PyObject* C_MPSV_DumpFile(C_MpegServer* Self) { - /* Wrapped (void)dumpFormat method, the input is required to be empty. */ - Self->_in_Handle->dumpFormat(); - Py_RETURN_NONE; -} - -static PyObject* C_MPDC_Clear(C_MpegDecoder* Self) { - /* Wrapped (void)clear method, the input is required to be empty. */ - Self->_in_Handle->clear(); - Py_RETURN_NONE; -} - -static PyObject* C_MPEC_Clear(C_MpegEncoder* Self) { - /* Wrapped (void)clear method, the input is required to be empty. */ - Self->_in_Handle->clear(); - Py_RETURN_NONE; -} - -static PyObject* C_MPCT_Clear(C_MpegClient* Self) { - /* Wrapped (void)clear method, the input is required to be empty. */ - Self->_in_Handle->clear(); - Py_RETURN_NONE; -} - -static PyObject* C_MPSV_Clear(C_MpegServer* Self) { - /* Wrapped (void)clear method, the input is required to be empty. */ - Self->_in_Handle->clear(); - Py_RETURN_NONE; -} - -static PyObject* C_MPEC_Close(C_MpegEncoder* Self) { - /* Wrapped (void)close method, the input is required to be empty. */ - Self->_in_Handle->FFmpegClose(); - Py_RETURN_NONE; -} - -static PyObject* C_MPSV_Close(C_MpegServer* Self) { - /* Wrapped (void)close method, the input is required to be empty. */ - Self->_in_Handle->FFmpegClose(); - Py_RETURN_NONE; -} - -/***************************************************************************** -* Register the methods of each class. -*****************************************************************************/ -static PyMethodDef C_MPC_MethodMembers[] = // Register the global method list. -{ - { "setGlobal", (PyCFunction)C_MPC_Global, METH_VARARGS | METH_KEYWORDS, \ - "Set global setting parameters.\n - dumpLevel: [int] the level of dumped log.\n -|- 0: silent executing.\n -|- 1: [default] dump basic informations.\n -|- 2: dump all informations." }, - { "readme", (PyCFunction)C_MPC_Help, METH_NOARGS, \ - "Use it to see readme and some useful instructions." }, - { nullptr, nullptr, 0, nullptr } -}; - -static PyMethodDef C_MPDC_MethodMembers[] = // Register the member methods of Decoder. -{ // This step add the methods to the C-API of the class. - { "FFmpegSetup", (PyCFunction)C_MPDC_Setup, METH_VARARGS | METH_KEYWORDS, \ - "Reset the decoder and the video format.\n - videoPath: [str/bytes] the path of decoded video file." }, - { "resetPath", (PyCFunction)C_MPDC_resetPath, METH_VARARGS | METH_KEYWORDS, \ - "Reset the path of decoded video.\n - videoPath: [str/bytes] the path of decoded video file." }, - { "ExtractFrame", (PyCFunction)C_MPDC_ExtractFrame, METH_VARARGS | METH_KEYWORDS, \ - "Extract a series of continius frames at the specific position.\n - framePos: [int] the start position of the decoder.\n - frameNum: [int] the expected number of extracted frames." }, - { "ExtractFrameByTime", (PyCFunction)C_MPDC_ExtractFrame_Time, METH_VARARGS | METH_KEYWORDS, \ - "Extract a series of continius frames at the specific position (time based).\n - timePos: [double] the start position (second) of the decoder.\n - frameNum: [int] the expected number of extracted frames." }, - { "ExtractGOP", (PyCFunction)C_MPDC_ExtractGOP, METH_VARARGS | METH_KEYWORDS, \ - "Extract a series of continius frames as a GOP at the specific position.\n - framePos: [int] the start position of the decoder." }, - { "ExtractGOPByTime", (PyCFunction)C_MPDC_ExtractGOP_Time, METH_VARARGS | METH_KEYWORDS, \ - "Extract a series of continius frames as a GOP at the specific position (time based).\n - timePos: [double] the start position (second) of the decoder." }, - { "ResetGOPPosition", (PyCFunction)C_MPDC_setGOPPosition, METH_VARARGS | METH_KEYWORDS, \ - "Reset the start position of GOP flow.\n - framePos: [int] the start position of the decoder.\n - timePos: [double] the start position (second) of the decoder." }, - { "clear", (PyCFunction)C_MPDC_Clear, METH_NOARGS, \ - "Clear all states (except the videoPath)." }, - { "dumpFile", (PyCFunction)C_MPDC_DumpFile, METH_NOARGS, \ - "Show current state of formatContex." }, - { "setParameter", (PyCFunction)C_MPDC_setParam, METH_VARARGS | METH_KEYWORDS, \ - "Set the optional parameters of 'Setup' & 'Extract' functions via different methods.\n - widthDst: [int] the width of destination (frame), if <=0 (default), it would take no effect.\n - heightDst: [int] the height of destination (frame), if <=0 (default), it would take no effect.\n - nthread: [int] number of decoder threads." }, - { "getParameter", (PyCFunction)C_MPDC_getParam, METH_VARARGS | METH_KEYWORDS, \ - "Input a parameter's name to get it.\n - paramName: [str/bytes] the name of needed parameter. If set empty, would return all key params.\n -|- videoPath: [str] the current path of the read video.\n -|- width/height: [int] the size of one frame.\n -|- frameCount: [int] the number of returned frames in the last ExtractFrame().\n -|- coderName: [str] the name of the decoder.\n -|- nthread: [int] number of decoder threads.\n -|- duration: [double] the total seconds of this video.\n -|- estFrameNum: [int] the estimated total frame number(may be not accurate).\n -|- avgFrameRate: [double] the average of FPS." }, - { nullptr, nullptr, 0, nullptr } -}; - -static PyMethodDef C_MPEC_MethodMembers[] = // Register the member methods of Encoder. -{ // This step add the methods to the C-API of the class. - { "FFmpegSetup", (PyCFunction)C_MPEC_Setup, METH_VARARGS | METH_KEYWORDS, \ - "Open the encoded video and reset the encoder.\n - videoPath: [str/bytes] the path of encoded(written) video file." }, - { "resetPath", (PyCFunction)C_MPEC_resetPath, METH_VARARGS | METH_KEYWORDS, \ - "Reset the output path of encoded video.\n - videoPath: [str/bytes] the path of encoded video file." }, - { "EncodeFrame", (PyCFunction)C_MPEC_EncodeFrame, METH_VARARGS | METH_KEYWORDS, \ - "Encode one frame.\n - PyArrayFrame: [ndarray] the frame that needs to be encoded." }, - { "setParameter", (PyCFunction)C_MPEC_setParam, METH_VARARGS | METH_KEYWORDS, \ - "Set the necessary parameters of 'Setup' & 'Encode' functions via different methods.\n - decoder: [MpegDecoder / MpegClient] copy metadata from a known decoder.\n - configDict: [dict] a config dict returned by getParameter().\n - videoPath: [str/bytes] the current path of the encoded video.\n - codecName: [str/bytes] the name of the encoder.\n - nthread: [int] number of encoder threads.\n - bitRate: [float] the indended bit rate (Kb/s).\n - width/height: [int] the size of one encoded (scaled) frame.\n - widthSrc/heightSrc: [int] the size of one input frame, if set <=0, these parameters would not be enabled.\n - GOPSize: [int] the number of frames in a GOP.\n - maxBframe: [int] the maximal number of B frames in a GOP.\n - frameRate: [tuple] a 2-dim tuple indicating the FPS(num, den) of the stream." }, - { "getParameter", (PyCFunction)C_MPEC_getParam, METH_VARARGS | METH_KEYWORDS, \ - "Input a parameter's name to get it.\n - paramName: [str/bytes] the name of needed parameter. If set empty, would return all key params.\n -|- videoPath: [str] the current path of the encoded video.\n -|- codecName: [str] the name of the encoder.\n -|- nthread: [int] number of encoder threads.\n -|- bitRate: [float] the indended bit rate (Kb/s).\n -|- width/height: [int] the size of one encoded (scaled) frame.\n -|- widthSrc/heightSrc: [int] the size of one input frame, if set <=0, these parameters would not be enabled.\n -|- GOPSize: [int] the number of frames in a GOP.\n -|- maxBframe: [int] the maximal number of B frames in a GOP.\n -|- frameRate: [tuple] a 2-dim tuple indicating the FPS(num, den) of the stream." }, - { "clear", (PyCFunction)C_MPEC_Clear, METH_NOARGS, \ - "Clear all states." }, - { "dumpFile", (PyCFunction)C_MPEC_DumpFile, METH_NOARGS, \ - "Show current state of formatContex." }, - { "FFmpegClose", (PyCFunction)C_MPEC_Close, METH_NOARGS, \ - "Close currently encoded video and write the end code of a MPEG file." }, - { nullptr, nullptr, 0, nullptr } -}; - -static PyMethodDef C_MPCT_MethodMembers[] = // Register the member methods of Encoder. -{ // This step add the methods to the C-API of the class. - { "FFmpegSetup", (PyCFunction)C_MPCT_Setup, METH_VARARGS | METH_KEYWORDS, \ - "Reset the decoder and the video format.\n - videoAddress: [str/bytes] the path of decoded video file." }, - { "resetPath", (PyCFunction)C_MPCT_resetPath, METH_VARARGS | METH_KEYWORDS, \ - "Reset the address of decoded video.\n - videoAddress: [str/bytes] the path of decoded video file." }, - { "start", (PyCFunction)C_MPCT_Start, METH_NOARGS, \ - "Start the demuxing thread, must be called after FFmpegSetup()." }, - { "terminate", (PyCFunction)C_MPCT_Terminate, METH_NOARGS, \ - "Terminate all current demuxing threads, usually used when there is only one thread." }, - { "ExtractFrame", (PyCFunction)C_MPCT_ExtractFrame, METH_VARARGS | METH_KEYWORDS, \ - "Extract frames from the current buffer.\n - readSize: [int] the number of extracted frames, should not be larger than cache number. \nIf not set, will be used as the default value." }, - { "clear", (PyCFunction)C_MPCT_Clear, METH_NOARGS, \ - "Clear all states (except the videoAddress)." }, - { "dumpFile", (PyCFunction)C_MPCT_DumpFile, METH_NOARGS, \ - "Show current state of formatContex." }, - { "setParameter", (PyCFunction)C_MPCT_setParam, METH_VARARGS | METH_KEYWORDS, \ - "Set the optional parameters of 'Setup' & 'Extract' functions and the demuxing thread via different methods.\n - widthDst: [int] the width of destination (frame), if <=0 (default), it would take no effect.\n - heightDst: [int] the height of destination (frame), if <=0 (default), it would take no effect.\n - cacheSize: [int] the number of allocated avaliable frames in the cache.\n - readSize: [int] the default value of ExtractFrame().\n - dstFrameRate: [tuple] a 2-dim tuple indicating the destination FPS(num, den) of the stream.\n - nthread: [int] number of decoder threads." }, - { "getParameter", (PyCFunction)C_MPCT_getParam, METH_VARARGS | METH_KEYWORDS, \ - "Input a parameter's name to get it.\n - paramName: [str/bytes] the name of needed parameter. If set empty, would return all key params.\n -|- videoAddress: [str] the current path of the read video.\n -|- width/height: [int] the size of one frame.\n -|- frameCount: [int] the number of returned frames in the last ExtractFrame().\n -|- coderName: [str] the name of the decoder.\n -|- nthread: [int] number of decoder threads.\n -|- duration: [double] the total seconds of this video.\n -|- estFrameNum: [int] the estimated total frame number(may be not accurate).\n -|- srcFrameRate: [double] the average of FPS of the source video." }, - { nullptr, nullptr, 0, nullptr } -}; - -static PyMethodDef C_MPSV_MethodMembers[] = // Register the member methods of Server. -{ // This step add the methods to the C-API of the class. - { "FFmpegSetup", (PyCFunction)C_MPSV_Setup, METH_VARARGS | METH_KEYWORDS, \ - "Open the encoded video and reset the encoder.\n - videoAddress: [str/bytes] the path of encoded(written) video file." }, - { "resetPath", (PyCFunction)C_MPSV_resetPath, METH_VARARGS | METH_KEYWORDS, \ - "Reset the output path of encoded video.\n - videoAddress: [str/bytes] the path of encoded video file." }, - { "ServeFrame", (PyCFunction)C_MPSV_ServeFrame, METH_VARARGS | METH_KEYWORDS, \ - "Encode one frame and send the frame non-blockly.\n - PyArrayFrame: [ndarray] the frame that needs to be encoded." }, - { "ServeFrameBlock", (PyCFunction)C_MPSV_ServeFrameBlock, METH_VARARGS | METH_KEYWORDS, \ - "Encode one frame and send the frame blockly. This method is suggested to be used in sub-processes.\n - PyArrayFrame: [ndarray] the frame that needs to be encoded." }, - { "setParameter", (PyCFunction)C_MPSV_setParam, METH_VARARGS | METH_KEYWORDS, \ - "Set the necessary parameters of 'Setup' & 'Serve' functions via different methods.\n - decoder: [MpegDecoder / MpegClient] copy metadata from a known decoder.\n - configDict: [dict] a config dict returned by getParameter().\n - videoAddress: [str/bytes] the current path of the encoded video.\n - codecName: [str/bytes] the name of the encoder.\n - nthread: [int] number of encoder threads.\n - bitRate: [float] the indended bit rate (Kb/s).\n - width/height: [int] the size of one encoded (scaled) frame.\n - widthSrc/heightSrc: [int] the size of one input frame, if set <=0, these parameters would not be enabled.\n - GOPSize: [int] the number of frames in a GOP.\n - maxBframe: [int] the maximal number of B frames in a GOP.\n - frameRate: [tuple] a 2-dim tuple indicating the FPS(num, den) of the stream.\n - frameAhead: [int] The number of ahead frames. This value is suggested to be larger than the GOPSize.." }, - { "getParameter", (PyCFunction)C_MPSV_getParam, METH_VARARGS | METH_KEYWORDS, \ - "Input a parameter's name to get it.\n - paramName: [str/bytes] the name of needed parameter. If set empty, would return all key params.\n -|- videoAddress: [str] the current path of the encoded video.\n -|- codecName: [str] the name of the encoder.\n -|- formatName: [str] the format name of the stream.\n -|- nthread: [int] number of encoder threads.\n -|- bitRate: [float] the indended bit rate (Kb/s).\n -|- width/height: [int] the size of one encoded (scaled) frame.\n -|- widthSrc/heightSrc: [int] the size of one input frame, if set <=0, these parameters would not be enabled.\n -|- GOPSize: [int] the number of frames in a GOP.\n -|- maxBframe: [int] the maximal number of B frames in a GOP.\n -|- frameRate: [tuple] a 2-dim tuple indicating the FPS(num, den) of the stream.\n -|- waitRef: [float] The reference used for sync. waiting.\n -|- ptsAhead: [int] The ahead time duration in the uit of time stamp." }, - { "clear", (PyCFunction)C_MPSV_Clear, METH_NOARGS, \ - "Clear all states." }, - { "dumpFile", (PyCFunction)C_MPSV_DumpFile, METH_NOARGS, \ - "Show current state of formatContex." }, - { "FFmpegClose", (PyCFunction)C_MPSV_Close, METH_NOARGS, \ - "Close currently encoded video and write the end code of a MPEG file." }, - { nullptr, nullptr, 0, nullptr } -}; - -/***************************************************************************** -* Declaration of the class, including the name, information and the members. -* This is the top-level packing of the class APIs. -*****************************************************************************/ -static PyTypeObject C_MPDC_ClassInfo = -{ - PyVarObject_HEAD_INIT(nullptr, 0)"mpegCoder.MpegDecoder", // The implementation of the __class__.__name__. - sizeof(C_MpegDecoder), // The memory length of the class. This value is required for PyObject_New. - 0, - (destructor)C_MPDC_Destruct, // Destructor. - 0, - 0, - 0, - 0, - (reprfunc)C_MPDC_Repr, // __repr__ method. - 0, - 0, - 0, - 0, - 0, - (reprfunc)C_MPDC_Str, // __str__ method. - 0, - 0, - 0, - Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, // If no methods are provided, this value is Py_TPFLAGS_DEFAULE. - "This class has wrapped the C-API of FFmpeg decoder so that users could call its methods\n to decode the frame data in python quickly.", // __doc__, the docstring of the class. - 0, - 0, - 0, - 0, - 0, - 0, - C_MPDC_MethodMembers, // The collection of all method members. - C_MPDC_DataMembers, // THe collection of all data members. - 0, - 0, - 0, - 0, - 0, - 0, - (initproc)C_MPDC_init, // Constructor. - 0, -}; - -static PyTypeObject C_MPEC_ClassInfo = -{ - PyVarObject_HEAD_INIT(nullptr, 0)"mpegCoder.MpegEncoder", // The implementation of the __class__.__name__. - sizeof(C_MpegEncoder), // The memory length of the class. This value is required for PyObject_New. - 0, - (destructor)C_MPEC_Destruct, // Destructor. - 0, - 0, - 0, - 0, - (reprfunc)C_MPEC_Repr, // __repr__ method. - 0, - 0, - 0, - 0, - 0, - (reprfunc)C_MPEC_Str, // __str__ method. - 0, - 0, - 0, - Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, // If no methods are provided, this value is Py_TPFLAGS_DEFAULE. - "This class has wrapped the C-API of FFmpeg encoder so that users could call its methods\n to encode frames by using numpy-data quickly.", // __doc__, the docstring of the class. - 0, - 0, - 0, - 0, - 0, - 0, - C_MPEC_MethodMembers, // The collection of all method members. - C_MPEC_DataMembers, // THe collection of all data members. - 0, - 0, - 0, - 0, - 0, - 0, - (initproc)C_MPEC_init, // Constructor. - 0, -}; - -static PyTypeObject C_MPCT_ClassInfo = -{ - PyVarObject_HEAD_INIT(nullptr, 0)"mpegCoder.MpegClient", // The implementation of the __class__.__name__. - sizeof(C_MpegClient), // The memory length of the class. This value is required for PyObject_New. - 0, - (destructor)C_MPCT_Destruct, // Destructor. - 0, - 0, - 0, - 0, - (reprfunc)C_MPCT_Repr, // __repr__ method. - 0, - 0, - 0, - 0, - 0, - (reprfunc)C_MPCT_Str, // __str__ method. - 0, - 0, - 0, - Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, // If no methods are provided, this value is Py_TPFLAGS_DEFAULE. - "This class has wrapped the C-API of FFmpeg demuxer so that users could call its methods\n to demux the network stream in python quickly.", // __doc__, the docstring of the class. - 0, - 0, - 0, - 0, - 0, - 0, - C_MPCT_MethodMembers, // The collection of all method members. - C_MPCT_DataMembers, // THe collection of all data members. - 0, - 0, - 0, - 0, - 0, - 0, - (initproc)C_MPCT_init, // Constructor. - 0, -}; - -static PyTypeObject C_MPSV_ClassInfo = -{ - PyVarObject_HEAD_INIT(nullptr, 0)"mpegCoder.MpegServer", // The implementation of the __class__.__name__. - sizeof(C_MpegServer), // The memory length of the class. This value is required for PyObject_New. - 0, - (destructor)C_MPSV_Destruct, // Destructor. - 0, - 0, - 0, - 0, - (reprfunc)C_MPSV_Repr, // __repr__ method. - 0, - 0, - 0, - 0, - 0, - (reprfunc)C_MPSV_Str, // __str__ method. - 0, - 0, - 0, - Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, // If no methods are provided, this value is Py_TPFLAGS_DEFAULE. - "This class has wrapped the C-API of FFmpeg stream server so that users could call its methods\n to server streamed frames by using numpy-data quickly.", // __doc__, the docstring of the class. - 0, - 0, - 0, - 0, - 0, - 0, - C_MPSV_MethodMembers, // The collection of all method members. - C_MPSV_DataMembers, // THe collection of all data members. - 0, - 0, - 0, - 0, - 0, - 0, - (initproc)C_MPSV_init, // Constructor. - 0, -}; - -/***************************************************************************** -* Decleartion of the module. -* This is the top-level packing of the module APIs. -*****************************************************************************/ -static PyModuleDef ModuleInfo = -{ - PyModuleDef_HEAD_INIT, - "mpegCoder", // The __name__ of the module. - "A FFmpeg module which could provide a class for encode/decode a video in any format.", // __doc__; The docstring of the module. - -1, - nullptr, nullptr, nullptr, nullptr, nullptr -}; - -#endif diff --git a/MpegCoder/MpegStreamer.cpp b/MpegCoder/MpegStreamer.cpp deleted file mode 100644 index 5be484e..0000000 --- a/MpegCoder/MpegStreamer.cpp +++ /dev/null @@ -1,2225 +0,0 @@ -#include "stdafx.h" - -#define NO_IMPORT_ARRAY -#define PY_ARRAY_UNIQUE_SYMBOL MPEGARRAY_API -#include -#include "MpegCoder.h" -#include "MpegStreamer.h" - -cmpc::CMpegClient::CMpegClient(void) : - videoPath(), width(0), height(0), widthDst(0), heightDst(0), - PPixelFormat(AVPixelFormat::AV_PIX_FMT_NONE), PFormatCtx(nullptr), PCodecCtx(nullptr), - PVideoStream(nullptr), frame(nullptr), PVideoStreamIDX(0), PVideoFrameCount(0), - buffer(), PswsCtx(nullptr), cache_size(0), read_size(0), - frameRate({ 0,0 }), read_handle(), read_check(), info_lock(), reading(false), - _str_codec(), _duration(0), _predictFrameNum(0), nthread(0), refcount(1) { -} -cmpc::CMpegClient::~CMpegClient(void) { - clear(); -} -cmpc::CMpegClient::CMpegClient(CMpegClient&& ref) noexcept : - videoPath(std::move(ref.videoPath)), width(ref.width), height(ref.height), - widthDst(ref.widthDst), heightDst(ref.heightDst), - PPixelFormat(ref.PPixelFormat), PFormatCtx(ref.PFormatCtx), PCodecCtx(ref.PCodecCtx), - PVideoStream(ref.PVideoStream), frame(ref.frame), - PVideoStreamIDX(ref.PVideoStreamIDX), PVideoFrameCount(ref.PVideoFrameCount), - buffer(std::move(ref.buffer)), PswsCtx(ref.PswsCtx), - cache_size(ref.cache_size), read_size(ref.read_size), - frameRate(ref.frameRate), read_handle(std::move(std::thread())), read_check(), info_lock(), - reading(ref.reading), _str_codec(std::move(ref._str_codec)), _duration(ref._duration), - _predictFrameNum(ref._predictFrameNum), nthread(ref.nthread), refcount(ref.refcount) { - ref.PFormatCtx = nullptr; - ref.PCodecCtx = nullptr; - ref.PVideoStream = nullptr; - ref.frame = nullptr; - ref.PswsCtx = nullptr; -} -cmpc::CMpegClient& cmpc::CMpegClient::operator=(CMpegClient&& ref) noexcept { - if (this != &ref) { - videoPath = std::move(ref.videoPath); - width = ref.width; - height = ref.height; - widthDst = ref.widthDst; - heightDst = ref.heightDst; - PPixelFormat = ref.PPixelFormat; - PVideoStreamIDX = ref.PVideoStreamIDX; - PVideoFrameCount = ref.PVideoFrameCount; - cache_size = ref.cache_size; - read_size = ref.read_size; - frameRate = ref.frameRate; - reading = ref.reading; - _duration = ref._duration; - _predictFrameNum = ref._predictFrameNum; - refcount = ref.refcount; - PFormatCtx = ref.PFormatCtx; - PCodecCtx = ref.PCodecCtx; - PVideoStream = ref.PVideoStream; - frame = ref.frame; - PswsCtx = ref.PswsCtx; - buffer = std::move(ref.buffer); - read_handle = std::move(std::thread()); - nthread = ref.nthread; - ref.PFormatCtx = nullptr; - ref.PCodecCtx = nullptr; - ref.PVideoStream = nullptr; - ref.frame = nullptr; - ref.PswsCtx = nullptr; - } - return *this; -} - -void cmpc::CMpegClient::meta_protected_clear(void) { - auto protectWidth = widthDst; - auto protectHeight = heightDst; - auto protectCacheSize = cache_size; - auto protectReadSize = read_size; - auto protectFrameRate = frameRate; - auto protectNthread = nthread; - clear(); - widthDst = protectWidth; - heightDst = protectHeight; - cache_size = protectCacheSize; - read_size = protectReadSize; - frameRate = protectFrameRate; - nthread = protectNthread; -} - -void cmpc::CMpegClient::clear(void) { - if (read_handle.joinable()) { - read_check.lock(); - reading = false; - read_check.unlock(); - read_handle.join(); - //std::terminate(); - read_handle = std::move(std::thread()); - } - else { - read_handle = std::move(std::thread()); - } - width = height = 0; - widthDst = heightDst = 0; - PPixelFormat = AVPixelFormat::AV_PIX_FMT_NONE; - PVideoStreamIDX = -1; - PVideoFrameCount = 0; - _duration = 0; - _predictFrameNum = 0; - _str_codec.clear(); - //videoPath.clear(); - buffer.clear(); - cache_size = 0; - read_size = 0; - frameRate = _setAVRational(0, 0); - read_check.lock(); - read_check.unlock(); - info_lock.lock(); - info_lock.unlock(); - nthread = 0; - PVideoStream = nullptr; - if (frame) { - av_frame_free(&frame); - frame = nullptr; - } - if (PswsCtx) { - sws_freeContext(PswsCtx); - PswsCtx = nullptr; - } - if (PCodecCtx) { - avcodec_free_context(&PCodecCtx); - PCodecCtx = nullptr; - } - if (PFormatCtx) { - avformat_close_input(&PFormatCtx); - PFormatCtx = nullptr; - } - refcount = 1; -} - -int cmpc::CMpegClient::_open_codec_context(int& stream_idx, AVCodecContext*& dec_ctx, \ - AVFormatContext* PFormatCtx, enum cmpc::AVMediaType type) { // Search the correct decoder, and make the configurations. - int ret; - - //search video stream - ret = av_find_best_stream(PFormatCtx, type, -1, -1, nullptr, 0); - if (ret < 0) { - cerr << "Could not find " << av_get_media_type_string(type) << \ - " stream in input address: '" << videoPath << "'" << endl; - return ret; - } - else { - auto stream_index = ret; - auto st = PFormatCtx->streams[stream_index]; // The AVStream object. - - /* find decoder for the stream */ - auto dec = avcodec_find_decoder(st->codecpar->codec_id); // Decoder (AVCodec). - if (!dec) { - cerr << "Failed to find " << av_get_media_type_string(type) << " codec" << endl; - return AVERROR(EINVAL); - } - _str_codec.assign(dec->name); - - /* Allocate a codec context for the decoder / Add this to allocate the context by codec */ - auto dec_ctx_ = avcodec_alloc_context3(dec); // Decoder context (AVCodecContext). - if (!dec_ctx_) { - cerr << "Failed to allocate the " << av_get_media_type_string(type) << " codec context" << endl; - return AVERROR(ENOMEM); - } - - if (nthread > 0) { - dec_ctx_->thread_count = nthread; - } - - /* Copy codec parameters from input stream to output codec context */ - if ((ret = avcodec_parameters_to_context(dec_ctx_, st->codecpar)) < 0) { - cerr << "Failed to copy " << av_get_media_type_string(type) << \ - " codec parameters to decoder context" << endl; - return ret; - } - - /* Init the decoders, with or without reference counting */ - AVDictionary* opts = nullptr; // The uninitialized argument dictionary. - av_dict_set(&opts, "refcounted_frames", refcount ? "1" : "0", 0); - if ((ret = avcodec_open2(dec_ctx_, dec, &opts)) < 0) { - cerr << "Failed to open " << av_get_media_type_string(type) << " codec" << endl; - return ret; - } - dec_ctx = dec_ctx_; - stream_idx = stream_index; - } - return 0; -} - -bool cmpc::CMpegClient::__setup_check() const { - if (cache_size > 0 && read_size > 0 && frameRate.den > 0 && frameRate.num > 0 && (!read_handle.joinable())) { - return true; - } - else { - return false; - } -} - -bool cmpc::CMpegClient::FFmpegSetup(string inVideoPath) { - videoPath.assign(inVideoPath); - return FFmpegSetup(); -} - -bool cmpc::CMpegClient::FFmpegSetup() { - if (!__setup_check()) { - cerr << "Have not get necessary and correct configurations, so FFmpegSetup() should not be called." << endl; - return false; - } - meta_protected_clear(); - - /* open Stream: register all formats and codecs */ - if (avformat_open_input(&PFormatCtx, videoPath.c_str(), nullptr, nullptr) < 0) { - cerr << "Could not open source address " << videoPath << endl; - clear(); - return false; - } // For example, "rtsp://localhost:8554/h264.3gp" - - /* retrieve stream information */ - if (avformat_find_stream_info(PFormatCtx, nullptr) < 0) { - cerr << "Could not find stream information" << endl; - clear(); - return false; - } - AVRational time_base, frame_base; - if (_open_codec_context(PVideoStreamIDX, PCodecCtx, PFormatCtx, AVMEDIA_TYPE_VIDEO) >= 0) { - PVideoStream = PFormatCtx->streams[PVideoStreamIDX]; - time_base = PVideoStream->time_base; - frame_base = PVideoStream->avg_frame_rate; - - /* allocate image where the decoded image will be put */ - width = PCodecCtx->width; - height = PCodecCtx->height; - if (widthDst <= 0) { - widthDst = width; - } - if (heightDst <= 0) { - heightDst = height; - } - PPixelFormat = PCodecCtx->pix_fmt; - _duration = static_cast(PVideoStream->duration) / static_cast(time_base.den) * static_cast(time_base.num); - _predictFrameNum = av_rescale(static_cast(_duration * 0xFFFF), frame_base.num, frame_base.den) / 0xFFFF; - } - else { - cerr << "Could not get codec context from the stream, aborting" << endl; - clear(); - return false; - } - - /* dump input information to stderr */ - if (__dumpControl > 1) { - av_dump_format(PFormatCtx, 0, videoPath.c_str(), 0); - } - - if (!PVideoStream) { // Check whether the video stream is correctly opened. - cerr << "Could not find audio or video stream in the network, aborting" << endl; - clear(); - return false; - } - - if (width == 0 || height == 0) { - cerr << "Could not get enough meta-data in the network, aborting" << endl; - clear(); - return false; - } - - PswsCtx = sws_getContext(width, height, PCodecCtx->pix_fmt, widthDst, heightDst, AV_PIX_FMT_RGB24, - SCALE_FLAGS, nullptr, nullptr, nullptr); - - buffer.set(cache_size, width, height, widthDst, heightDst); - buffer.set_timer(frameRate, time_base); - if (!buffer.reset_memory()) { // Check whether the buffer is allocated correctly. - cerr << "Could not allocate the memory of frame buffer list." << endl; - clear(); - return false; - } - - read_check.lock(); - reading = true; - read_check.unlock(); - return true; -} - -void cmpc::CMpegClient::dumpFormat() { - if ((!videoPath.empty()) && PFormatCtx) { - av_dump_format(PFormatCtx, 0, videoPath.c_str(), 0); - } - else { - cerr << "Still need to FFmpegSetup()" << endl; - } -} - -void cmpc::CMpegClient::resetPath(string inVideoPath) { - videoPath.assign(inVideoPath); -} - -cmpc::AVRational cmpc::CMpegClient::_setAVRational(int num, int den) { - AVRational res; - res.num = num; res.den = den; - return res; -} - -int cmpc::CMpegClient::__save_frame(AVFrame*& frame, AVPacket*& pkt, bool& got_frame, int cached) { - int ret = 0; - int decoded = pkt->size; - - got_frame = false; - - if (pkt->stream_index == PVideoStreamIDX) { - /* decode video frame */ - ret = __avcodec_decode_video2(PCodecCtx, frame, got_frame, pkt); - if (ret < 0) { - cout << "Error decoding video frame (" << av_err2str(ret) << ")" << endl; - return ret; - } - - if (got_frame) { - - if (frame->width != width || frame->height != height || - frame->format != PPixelFormat) { - /* To handle this change, one could call av_image_alloc again and - * decode the following frames into another rawvideo file. */ - cout << "Error: Width, height and pixel format have to be " - "constant in a rawvideo file, but the width, height or " - "pixel format of the input video changed:\n" - "old: width = " << width << ", height = " << height << ", format = " - << av_get_pix_fmt_name(PPixelFormat) << endl << - "new: width = " << frame->width << ", height = " << frame->height << ", format = " - << av_get_pix_fmt_name(static_cast(frame->format)) << endl; - return -1; - } - - info_lock.lock(); - PVideoFrameCount++; - info_lock.unlock(); - if (__dumpControl > 0) { - std::ostringstream str_data; - str_data << "video_frame" << (cached ? "(cached)" : "") << " n:" << PVideoFrameCount << - " coded_n:" << frame->coded_picture_number << endl; - auto str_data_s = str_data.str(); - av_log(nullptr, AV_LOG_INFO, "%s", str_data_s.c_str()); - } - - /* copy decoded frame to destination buffer: - * this is required since rawvideo expects non aligned data */ - - buffer.write(PswsCtx, frame); - } - } - - /* If we use frame reference counting, we own the data and need - * to de-reference it when we don't use it anymore */ - - if (got_frame && refcount) - av_frame_unref(frame); - - return decoded; -} - -void cmpc::CMpegClient::__client_holder() { - int ret; - bool got_frame; - if (frame) { - cerr << "Current frame is occupied, could not start a new client." << endl; - return; - } - frame = av_frame_alloc(); - auto pkt = av_packet_alloc(); - if (!frame) { - cerr << "Could not allocate frame" << endl; - ret = AVERROR(ENOMEM); - return; - } - /* initialize packet, set data to NULL, let the demuxer fill it */ - if (PVideoStream && (__dumpControl > 0)) { - std::ostringstream str_data; - str_data << "Demuxing video from address '" << videoPath << "' into Python-List" << endl; - auto str_data_s = str_data.str(); - av_log(nullptr, AV_LOG_INFO, "%s", str_data_s.c_str()); - } - - /* Reset the contex to remove the flushed state. */ - avcodec_flush_buffers(PCodecCtx); - - /* read frames from the file */ - info_lock.lock(); - PVideoFrameCount = 0; - info_lock.unlock(); - - //start reading packets from stream and write them to file - av_read_play(PFormatCtx); //play RTSP - - auto temp_pkt = av_packet_alloc(); - while (av_read_frame(PFormatCtx, pkt) >= 0) { - //cout << "[Test - " << pkt.size << " ]" << endl; - av_packet_ref(temp_pkt, pkt); - do { - ret = __save_frame(frame, temp_pkt, got_frame, 0); - if (ret < 0) - break; - temp_pkt->data += ret; - temp_pkt->size -= ret; - } while (temp_pkt->size > 0); - /* flush cached frames */ - av_packet_unref(pkt); - av_packet_unref(temp_pkt); - read_check.lock(); - if (!reading) { - read_check.unlock(); - break; - } - else { - read_check.unlock(); - } - } - av_packet_free(&temp_pkt); - - do { - __save_frame(frame, pkt, got_frame, 1); - } while (got_frame); - - //cout << "Demuxing succeeded." << endl; - - if (PVideoStream && (__dumpControl > 0)) { - std::ostringstream str_data; - str_data << "End of stream client." << endl; - auto str_data_s = str_data.str(); - av_log(nullptr, AV_LOG_INFO, "%s", str_data_s.c_str()); - } - - if (frame) { - av_frame_free(&frame); - frame = nullptr; - } - if (pkt) { - av_packet_free(&pkt); - } - - read_check.lock(); - reading = false; - read_check.unlock(); -} - -int cmpc::CMpegClient::__avcodec_decode_video2(AVCodecContext* avctx, AVFrame* frame, bool& got_frame, AVPacket* pkt) { - int ret; - - got_frame = false; - - if (pkt) { - ret = avcodec_send_packet(avctx, pkt); - // In particular, we don't expect AVERROR(EAGAIN), because we read all - // decoded frames with avcodec_receive_frame() until done. - if (ret < 0) { - //cout << ret << ", " << AVERROR(EAGAIN) << ", " << AVERROR_EOF << endl; - return ret == AVERROR_EOF ? 0 : ret; - } - } - - ret = avcodec_receive_frame(avctx, frame); - if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) - return ret; - if (ret >= 0) - got_frame = true; - - //cout << ret << ", " << AVERROR(EAGAIN) << ", " << AVERROR_EOF << endl; - - return 0; -} - -PyObject* cmpc::CMpegClient::ExtractFrame() { - return ExtractFrame(read_size); -} - -PyObject* cmpc::CMpegClient::ExtractFrame(int64_t readsize) { - if (readsize == 0 || readsize > cache_size) { - cerr << "Read size of frames is out of range." << endl; - return nullptr; - } - else if (frame == nullptr) { - cerr << "Current frame object is empty, maybe the client has not been started." << endl; - return nullptr; - } - buffer.freeze_write(readsize); - auto res = buffer.read(); - if (res == nullptr) { - cerr << "Unable to get frames from current buffer." << endl; - } - return res; -} - -void cmpc::CMpegClient::setParameter(string keyword, void* ptr) { - if (keyword.compare("widthDst") == 0) { - auto ref = reinterpret_cast(ptr); - widthDst = *ref; - } - else if (keyword.compare("heightDst") == 0) { - auto ref = reinterpret_cast(ptr); - heightDst = *ref; - } - else if (keyword.compare("cacheSize") == 0) { - auto ref = reinterpret_cast(ptr); - cache_size = *ref; - } - else if (keyword.compare("readSize") == 0) { - auto ref = reinterpret_cast(ptr); - read_size = *ref; - } - else if (keyword.compare("dstFrameRate") == 0) { - PyObject* ref = reinterpret_cast(ptr); - auto refObj = PyTuple_GetItem(ref, 0); - int num = static_cast(PyLong_AsLong(refObj)); - refObj = PyTuple_GetItem(ref, 1); - int den = static_cast(PyLong_AsLong(refObj)); - frameRate = _setAVRational(num, den); - } - else if (keyword.compare("nthread") == 0) { - auto ref = reinterpret_cast(ptr); - if (PCodecCtx) { - PCodecCtx->thread_count = *ref; - } - nthread = *ref; - } -} - -PyObject* cmpc::CMpegClient::getParameter(string keyword) { - if (keyword.compare("videoAddress") == 0) { - return PyUnicode_DecodeFSDefaultAndSize(videoPath.c_str(), static_cast(videoPath.size())); - } - else if (keyword.compare("width") == 0) { - return Py_BuildValue("i", width); - } - else if (keyword.compare("height") == 0) { - return Py_BuildValue("i", height); - } - else if (keyword.compare("frameCount") == 0) { - info_lock.lock(); - auto value = Py_BuildValue("i", PVideoFrameCount); - info_lock.unlock(); - return value; - } - else if (keyword.compare("coderName") == 0) { - return PyUnicode_DecodeFSDefaultAndSize(_str_codec.c_str(), static_cast(_str_codec.size())); - } - else if (keyword.compare("duration") == 0) { - return Py_BuildValue("d", _duration); - } - else if (keyword.compare("estFrameNum") == 0) { - return Py_BuildValue("L", _predictFrameNum); - } - else if (keyword.compare("srcFrameRate") == 0) { - if (!PVideoStream) { - return Py_BuildValue("d", 0.0); - } - auto frame_base = PVideoStream->avg_frame_rate; - double srcFrameRate = static_cast(frame_base.num) / static_cast(frame_base.den); - return Py_BuildValue("d", srcFrameRate); - } - else if (keyword.compare("nthread") == 0) { - if (PCodecCtx) { - return Py_BuildValue("i", PCodecCtx->thread_count); - } - else { - return Py_BuildValue("i", nthread); - } - } - else { - Py_RETURN_NONE; - } -} - -PyObject* cmpc::CMpegClient::getParameter() { - auto res = PyDict_New(); - string key; - PyObject* val = nullptr; - // Fill the values. - key.assign("videoAddress"); - val = Py_BuildValue("y", videoPath.c_str()); - PyDict_SetItemString(res, key.c_str(), val); - Py_DECREF(val); - key.assign("codecName"); - val = Py_BuildValue("y", _str_codec.c_str()); - PyDict_SetItemString(res, key.c_str(), val); - Py_DECREF(val); - if (PCodecCtx) { - key.assign("bitRate"); - val = Py_BuildValue("L", PCodecCtx->bit_rate); - PyDict_SetItemString(res, key.c_str(), val); - Py_DECREF(val); - key.assign("GOPSize"); - val = Py_BuildValue("i", PCodecCtx->gop_size); - PyDict_SetItemString(res, key.c_str(), val); - Py_DECREF(val); - key.assign("maxBframe"); - val = Py_BuildValue("i", PCodecCtx->max_b_frames); - PyDict_SetItemString(res, key.c_str(), val); - Py_DECREF(val); - key.assign("nthread"); - val = Py_BuildValue("i", PCodecCtx->thread_count); - PyDict_SetItemString(res, key.c_str(), val); - Py_DECREF(val); - } - else { - key.assign("nthread"); - val = Py_BuildValue("i", nthread); - PyDict_SetItemString(res, key.c_str(), val); - Py_DECREF(val); - } - if (widthDst > 0) { - key.assign("widthDst"); - val = Py_BuildValue("i", widthDst); - PyDict_SetItemString(res, key.c_str(), val); - Py_DECREF(val); - } - if (heightDst > 0) { - key.assign("heightDst"); - val = Py_BuildValue("i", heightDst); - PyDict_SetItemString(res, key.c_str(), val); - Py_DECREF(val); - } - key.assign("width"); - val = Py_BuildValue("i", width); - PyDict_SetItemString(res, key.c_str(), val); - Py_DECREF(val); - key.assign("height"); - val = Py_BuildValue("i", height); - PyDict_SetItemString(res, key.c_str(), val); - Py_DECREF(val); - if (PVideoStream) { - key.assign("frameRate"); - auto& frame_rate = PVideoStream->avg_frame_rate; - val = Py_BuildValue("(ii)", frame_rate.num, frame_rate.den); - PyDict_SetItemString(res, key.c_str(), val); - Py_DECREF(val); - } - return res; -} - -bool cmpc::CMpegClient::start() { - if (reading && (frame == nullptr)) { - read_handle = std::move(std::thread(std::mem_fn(&CMpegClient::__client_holder), std::ref(*this))); - return true; - } - return false; -} -void cmpc::CMpegClient::terminate() { - read_check.lock(); - auto protectReading = reading; - read_check.unlock(); - if (read_handle.joinable()) { - read_check.lock(); - reading = false; - read_check.unlock(); - read_handle.join(); - //std::terminate(); - read_handle = std::move(std::thread()); - } - else { - read_handle = std::move(std::thread()); - } - info_lock.lock(); - info_lock.unlock(); - read_check.lock(); - reading = protectReading; - read_check.unlock(); - if (frame) { - av_frame_free(&frame); - } -} -ostream& cmpc::operator<<(ostream& out, cmpc::CMpegClient& self_class) { - double dstFrameRate; - out << std::setw(1) << "/"; - out << std::setfill('*') << std::setw(44) << "" << std::setfill(' ') << endl; - out << std::setw(1) << " * Packed FFmpeg Client - Y. Jin V" << MPEGCODER_CURRENT_VERSION << endl; - out << " " << std::setfill('*') << std::setw(44) << "" << std::setfill(' ') << endl; - out << std::setiosflags(std::ios::left) << std::setw(25) << " * VideoAddress: " \ - << self_class.videoPath << endl; - out << std::setiosflags(std::ios::left) << std::setw(25) << " * (Width, Height): " \ - << self_class.width << ", " << self_class.height << endl; - if (self_class.widthDst > 0 && self_class.heightDst > 0) { - out << std::setiosflags(std::ios::left) << std::setw(25) << " * (WidthDst, HeightDst): " \ - << self_class.widthDst << ", " << self_class.heightDst << endl; - } - out << std::setiosflags(std::ios::left) << std::setw(25) << " * Deccoder: " \ - << self_class._str_codec << endl; - if (self_class.PCodecCtx) { - out << std::setiosflags(std::ios::left) << std::setw(25) << " * Thread number: " \ - << self_class.PCodecCtx->thread_count << endl; - } - else { - out << std::setiosflags(std::ios::left) << std::setw(25) << " * Thread number (P): " \ - << self_class.nthread << endl; - } - out << std::setiosflags(std::ios::left) << std::setw(25) << " * Duration: " \ - << self_class._duration << " [s]" << endl; - out << std::setiosflags(std::ios::left) << std::setw(25) << " * Predicted FrameNum: " \ - << self_class._predictFrameNum << endl; - out << std::setiosflags(std::ios::left) << std::setw(25) << " * Read/Cache size: " \ - << self_class.read_size << "/" << self_class.cache_size << endl; - if (self_class.PVideoStream) { - auto frame_base = self_class.PVideoStream->avg_frame_rate; - double srcFrameRate = static_cast(frame_base.num) / static_cast(frame_base.den); - if (self_class.frameRate.den) { - dstFrameRate = static_cast(self_class.frameRate.num) / static_cast(self_class.frameRate.den); - } - else { - dstFrameRate = 0; - } - out << std::setiosflags(std::ios::left) << std::setw(25) << " * Dst./Src. frame rate: " \ - << std::setprecision(3) << dstFrameRate << "/" << srcFrameRate << std::setprecision(6) << endl; - } - else { - if (self_class.frameRate.den) { - dstFrameRate = static_cast(self_class.frameRate.num) / static_cast(self_class.frameRate.den); - } - else { - dstFrameRate = 0; - } - out << std::setiosflags(std::ios::left) << std::setw(25) << " * Src. frame rate: " \ - << std::setprecision(3) << dstFrameRate << std::setprecision(6) << endl; - } - out << std::setw(1) << " */"; - return out; -} - -cmpc::BufferList::BufferList(void) : - _Buffer_pos(0), _Buffer_rpos(-1), _Buffer_size(0), __Read_size(0), - next_pts(0), interval_pts(0), dst_width(0), dst_height(0), - src_width(0), src_height(0), _Buffer_capacity(0), - frameRGB(nullptr), _Buffer_List(nullptr) { -} -cmpc::BufferList::~BufferList(void) { - if (_Buffer_List) { - for (auto i = 0; i < _Buffer_size; i++) { - if (_Buffer_List[i]) { - av_free(_Buffer_List[i]); - _Buffer_List[i] = nullptr; - } - } - delete[]_Buffer_List; - _Buffer_List = nullptr; - } - if (frameRGB) { - av_frame_free(&frameRGB); - } -} -cmpc::BufferList::BufferList(const BufferList& ref) : - _Buffer_pos(ref._Buffer_pos), _Buffer_rpos(ref._Buffer_rpos), _Buffer_size(ref._Buffer_size), - __Read_size(ref.__Read_size), next_pts(ref.next_pts), interval_pts(ref.interval_pts), - dst_width(ref.dst_width), dst_height(ref.dst_height), - src_width(ref.src_width), src_height(ref.src_height), - _Buffer_capacity(ref._Buffer_capacity), frameRGB(ref.frameRGB), _Buffer_List(nullptr) { - if (!(frameRGB = av_frame_alloc())) { - cerr << "Could Allocate Temp Frame (RGB)" << endl; - return; - } - _Buffer_List = new uint8_t * [_Buffer_size]; - memset(_Buffer_List, 0, _Buffer_size * sizeof(uint8_t*)); - if (_Buffer_capacity > 0) { - for (auto i = 0; i < _Buffer_size; i++) { - if (ref._Buffer_List[i] != nullptr) { - _Buffer_List[i] = (uint8_t*)av_malloc(_Buffer_capacity * sizeof(uint8_t)); - memcpy(_Buffer_List[i], ref._Buffer_List[i], _Buffer_capacity * sizeof(uint8_t)); - } - } - } -} -cmpc::BufferList& cmpc::BufferList::operator=(const BufferList& ref) { - if (this != &ref) { - _Buffer_pos = ref._Buffer_pos; - _Buffer_rpos = ref._Buffer_rpos; - _Buffer_size = ref._Buffer_size; - __Read_size = ref.__Read_size; - next_pts = ref.next_pts; - interval_pts = ref.interval_pts; - dst_width = ref.dst_width; - dst_height = ref.dst_height; - src_width = ref.src_width; - src_height = ref.src_height; - _Buffer_capacity = ref._Buffer_capacity; - if (!(frameRGB = av_frame_alloc())) { - cerr << "Could Allocate Temp Frame (RGB)" << endl; - return *this; - } - _Buffer_List = new uint8_t * [_Buffer_size]; - memset(_Buffer_List, 0, _Buffer_size * sizeof(uint8_t*)); - if (_Buffer_capacity > 0) { - for (auto i = 0; i < _Buffer_size; i++) { - if (ref._Buffer_List[i] != nullptr) { - _Buffer_List[i] = (uint8_t*)av_malloc(_Buffer_capacity * sizeof(uint8_t)); - memcpy(_Buffer_List[i], ref._Buffer_List[i], _Buffer_capacity * sizeof(uint8_t)); - } - } - } - } - return *this; -} -cmpc::BufferList::BufferList(BufferList&& ref) noexcept : - _Buffer_pos(ref._Buffer_pos), _Buffer_rpos(ref._Buffer_rpos), _Buffer_size(ref._Buffer_size), - __Read_size(ref.__Read_size), next_pts(ref.next_pts), interval_pts(ref.interval_pts), - dst_width(ref.dst_width), dst_height(ref.dst_height), - src_width(ref.src_width), src_height(ref.src_height), - _Buffer_capacity(ref._Buffer_capacity), frameRGB(ref.frameRGB), _Buffer_List(ref._Buffer_List) { - ref._Buffer_List = nullptr; - ref.frameRGB = nullptr; -} -cmpc::BufferList& cmpc::BufferList::operator=(BufferList&& ref) noexcept { - if (this != &ref) { - _Buffer_pos = ref._Buffer_pos; - _Buffer_rpos = ref._Buffer_rpos; - _Buffer_size = ref._Buffer_size; - __Read_size = ref.__Read_size; - interval_pts = ref.interval_pts; - next_pts = ref.next_pts; - dst_width = ref.dst_width; - dst_height = ref.dst_height; - src_width = ref.src_width; - src_height = ref.src_height; - _Buffer_capacity = ref._Buffer_capacity; - _Buffer_List = ref._Buffer_List; - frameRGB = ref.frameRGB; - ref._Buffer_List = nullptr; - ref.frameRGB = nullptr; - } - return *this; -} -void cmpc::BufferList::clear(void) { - if (_Buffer_List) { - for (auto i = 0; i < _Buffer_size; i++) { - if (_Buffer_List[i]) { - av_free(_Buffer_List[i]); - _Buffer_List[i] = nullptr; - } - } - delete[]_Buffer_List; - _Buffer_List = nullptr; - } - _Buffer_pos = 0; - _Buffer_rpos = -1; - _Buffer_size = 0; - __Read_size = 0; - next_pts = 0; - interval_pts = 0; - src_width = 0; - src_height = 0; - dst_width = 0; - dst_height = 0; - if (frameRGB) { - av_frame_free(&frameRGB); - } -} -const int64_t cmpc::BufferList::size() const { - return _Buffer_size; -} -void cmpc::BufferList::set(int64_t set_size, int width, int height, int widthDst, int heightDst) { - _Buffer_size = set_size; - if (widthDst != 0) { - dst_width = widthDst; - } - else { - dst_width = width; - } - if (heightDst != 0) { - dst_height = heightDst; - } - else { - dst_height = height; - } - src_width = width; - src_height = height; - _Buffer_capacity = av_image_get_buffer_size(AV_PIX_FMT_RGB24, dst_width, dst_height, 1); -} -void cmpc::BufferList::set_timer(AVRational targetFrameRate, AVRational timeBase) { - interval_pts = av_rescale(av_rescale(1, timeBase.den, timeBase.num), targetFrameRate.den, targetFrameRate.num); -} -bool cmpc::BufferList::reset_memory() { - if (!frameRGB) { - if (!(frameRGB = av_frame_alloc())) { - cerr << "Could Allocate Temp Frame (RGB)" << endl; - return false; - } - } - if (!_Buffer_List) { - _Buffer_List = new uint8_t * [_Buffer_size]; - memset(_Buffer_List, 0, _Buffer_size * sizeof(uint8_t*)); - } - for (auto i = 0; i < _Buffer_size; i++) { - if (!_Buffer_List[i]) { - _Buffer_List[i] = (uint8_t*)av_malloc(_Buffer_capacity * sizeof(uint8_t)); - } - memset(_Buffer_List[i], 0, _Buffer_capacity * sizeof(uint8_t)); - } - return true; -} -void cmpc::BufferList::freeze_write(int64_t read_size) { - auto read_pos = _Buffer_pos - read_size; - if (read_pos < 0) { - read_pos += _Buffer_size; - } - _Buffer_rpos = read_pos; - __Read_size = read_size; -} -bool cmpc::BufferList::write(SwsContext* PswsCtx, AVFrame* frame) { - if (frame->pts < next_pts) { - if (frame->pts > (next_pts - 2 * interval_pts)) { - return false; - } - else { - next_pts = frame->pts + interval_pts; - } - } - else { - if (next_pts > 0) - next_pts += interval_pts; - else - next_pts = frame->pts; - } - if (_Buffer_pos == _Buffer_rpos) { - return false; - } - av_image_fill_arrays(frameRGB->data, frameRGB->linesize, _Buffer_List[_Buffer_pos], AV_PIX_FMT_RGB24, dst_width, dst_height, 1); - sws_scale(PswsCtx, frame->data, frame->linesize, 0, src_height, frameRGB->data, frameRGB->linesize); - _Buffer_pos++; - if (_Buffer_pos >= _Buffer_size) - _Buffer_pos -= _Buffer_size; - return true; -} -PyObject* cmpc::BufferList::read() { - if (_Buffer_rpos < 0) { - return nullptr; - } - auto _Buffer_rend = (_Buffer_rpos + __Read_size) % _Buffer_size; - npy_intp dims[] = { __Read_size, dst_height, dst_width, 3 }; - auto newdata = new uint8_t[__Read_size * _Buffer_capacity]; - auto p = newdata; - for (auto i = _Buffer_rpos; i != _Buffer_rend; i = (i + 1) % _Buffer_size) { - memcpy(p, _Buffer_List[i], _Buffer_capacity * sizeof(uint8_t)); - p += _Buffer_capacity; - } - PyObject* PyFrame = PyArray_SimpleNewFromData(4, dims, NPY_UINT8, reinterpret_cast(newdata)); - PyArray_ENABLEFLAGS((PyArrayObject*)PyFrame, NPY_ARRAY_OWNDATA); - _Buffer_rpos = -1; - __Read_size = 0; - return PyArray_Return((PyArrayObject*)PyFrame); - //Py_RETURN_NONE; -} - -/** - * Related with the encoder. - */ - - // Constructors following 3-5 law. -cmpc::CMpegServer::CMpegServer(void) : - videoPath(), __formatName(), codecName(), bitRate(1024), - __start_time(0), __cur_time(0), width(100), height(100), widthSrc(0), heightSrc(0), - timeBase(_setAVRational(1, 25)), frameRate(_setAVRational(25, 1)), - time_base_q(_setAVRational(1, AV_TIME_BASE)), GOPSize(10), MaxBFrame(1), - PStreamContex({ 0 }), PFormatCtx(nullptr), Ppacket(nullptr), PswsCtx(nullptr), - __frameRGB(nullptr), RGBbuffer(nullptr), __have_video(false), __enable_header(false), - nthread(0) { - __pts_ahead = av_rescale(av_rescale(20, timeBase.den, timeBase.num), frameRate.den, frameRate.num); -} - -void cmpc::CMpegServer::meta_protected_clear(void) { - auto protectWidth = width; - auto protectHeight = height; - auto protectWidthSrc = widthSrc; - auto protectHeightSrc = heightSrc; - auto protectBitRate = bitRate; - auto protectGOPSize = GOPSize; - auto protectMaxBFrame = MaxBFrame; - auto protectPTSAhead = __pts_ahead; - auto protectVideoPath(videoPath); - auto protectFormatName(__formatName); - auto protectCodecName(codecName); - auto protectTimeBase(timeBase); - auto protectFrameRate(frameRate); - auto protectNthread = nthread; - clear(); - width = protectWidth; - height = protectHeight; - widthSrc = protectWidthSrc; - heightSrc = protectHeightSrc; - bitRate = protectBitRate; - GOPSize = protectGOPSize; - MaxBFrame = protectMaxBFrame; - timeBase = protectTimeBase; - frameRate = protectFrameRate; - __pts_ahead = protectPTSAhead; - videoPath.assign(protectVideoPath); - __formatName.assign(protectFormatName); - codecName.assign(protectCodecName); - nthread = protectNthread; -} - -void cmpc::CMpegServer::clear(void) { - FFmpegClose(); - videoPath.clear(); - __formatName.clear(); - codecName.clear(); - bitRate = 1024; - width = 100; - height = 100; - heightSrc = 0; - widthSrc = 0; - timeBase = _setAVRational(1, 25); - frameRate = _setAVRational(25, 1); - GOPSize = 10; - MaxBFrame = 1; - nthread = 0; - PStreamContex = { 0 }; - __have_video = false; - __enable_header = false; - __pts_ahead = av_rescale(av_rescale(20, timeBase.den, timeBase.num), frameRate.den, frameRate.num); - __start_time = 0; - __cur_time = 0; -} - -cmpc::CMpegServer::~CMpegServer(void) { - clear(); -} - - -cmpc::CMpegServer::CMpegServer(const CMpegServer& ref) : - videoPath(ref.videoPath), __formatName(ref.__formatName), codecName(ref.codecName), - bitRate(ref.bitRate), __pts_ahead(ref.__pts_ahead), __start_time(0), __cur_time(0), - width(ref.width), height(ref.height), widthSrc(ref.widthSrc), heightSrc(ref.heightSrc), - timeBase(ref.timeBase), frameRate(ref.frameRate), - time_base_q(_setAVRational(1, AV_TIME_BASE)), GOPSize(ref.GOPSize), MaxBFrame(ref.MaxBFrame), - PStreamContex({ 0 }), PFormatCtx(nullptr), Ppacket(nullptr), PswsCtx(nullptr), - __frameRGB(nullptr), RGBbuffer(nullptr), __have_video(false), __enable_header(false), - nthread(ref.nthread) { - if (!FFmpegSetup()) { - clear(); - } -} - -cmpc::CMpegServer& cmpc::CMpegServer::operator=(const CMpegServer& ref) { - if (this != &ref) { - videoPath = ref.videoPath; - __formatName = ref.__formatName; - codecName = ref.codecName; - bitRate = ref.bitRate; - __pts_ahead = ref.__pts_ahead; - __start_time = 0; - __cur_time = 0; - width = ref.width; - height = ref.height; - widthSrc = ref.widthSrc; - heightSrc = ref.heightSrc; - timeBase = ref.timeBase; - frameRate = ref.frameRate; - time_base_q = _setAVRational(1, AV_TIME_BASE); - GOPSize = ref.GOPSize; - MaxBFrame = ref.MaxBFrame; - PStreamContex = { 0 }; - PFormatCtx = nullptr; - Ppacket = nullptr; - PswsCtx = nullptr; - __frameRGB = nullptr; - RGBbuffer = nullptr; - __have_video = false; - __enable_header = false; - nthread = ref.nthread; - if (!FFmpegSetup()) { - clear(); - } - } - return *this; -} - -cmpc::CMpegServer::CMpegServer(CMpegServer&& ref) noexcept : - videoPath(std::move(ref.videoPath)), __formatName(std::move(ref.__formatName)), - codecName(std::move(ref.codecName)), bitRate(ref.bitRate), __pts_ahead(ref.__pts_ahead), - __start_time(ref.__start_time), __cur_time(ref.__cur_time), - width(ref.width), height(ref.height), widthSrc(ref.widthSrc), heightSrc(ref.heightSrc), - timeBase(ref.timeBase), frameRate(ref.frameRate), time_base_q(ref.time_base_q), - GOPSize(ref.GOPSize), MaxBFrame(ref.MaxBFrame), PStreamContex(std::move(ref.PStreamContex)), - PFormatCtx(ref.PFormatCtx), Ppacket(ref.Ppacket), PswsCtx(ref.PswsCtx), - __frameRGB(ref.__frameRGB), RGBbuffer(ref.RGBbuffer), - __have_video(ref.__have_video), __enable_header(ref.__enable_header), nthread(ref.nthread) { - ref.PFormatCtx = nullptr; - ref.PStreamContex = { 0 }; - ref.PswsCtx = nullptr; - ref.RGBbuffer = nullptr; - ref.Ppacket = nullptr; - ref.__frameRGB = nullptr; -} - -cmpc::CMpegServer& cmpc::CMpegServer::operator=(CMpegServer&& ref) noexcept { - if (this != &ref) { - videoPath.assign(std::move(ref.videoPath)); - __formatName.assign(std::move(ref.__formatName)); - codecName.assign(std::move(ref.codecName)); - bitRate = ref.bitRate; - width = ref.width; - height = ref.height; - widthSrc = ref.widthSrc; - heightSrc = ref.heightSrc; - timeBase = ref.timeBase; - frameRate = ref.frameRate; - time_base_q = ref.time_base_q; - GOPSize = ref.GOPSize; - MaxBFrame = ref.MaxBFrame; - __pts_ahead = ref.__pts_ahead; - __start_time = ref.__start_time; - __cur_time = ref.__cur_time; - PFormatCtx = ref.PFormatCtx; - PStreamContex = std::move(ref.PStreamContex); - PswsCtx = ref.PswsCtx; - RGBbuffer = ref.RGBbuffer; - Ppacket = ref.Ppacket; - nthread = ref.nthread; - __frameRGB = ref.__frameRGB; - __have_video = ref.__have_video; - __enable_header = ref.__enable_header; - ref.PFormatCtx = nullptr; - ref.PStreamContex = { 0 }; - ref.PswsCtx = nullptr; - ref.RGBbuffer = nullptr; - ref.Ppacket = nullptr; - ref.__frameRGB = nullptr; - } - return *this; -} - -void cmpc::CMpegServer::resetPath(string inVideoPath) { - videoPath.assign(inVideoPath); - if (videoPath.compare(0, 7, "rtsp://") == 0) { - __formatName.assign("rtsp"); - } - else if (videoPath.compare(0, 7, "rtmp://") == 0) { - __formatName.assign("rtmp"); - } - else if (videoPath.compare(0, 7, "http://") == 0) { - __formatName.assign("http"); - } - else if (videoPath.compare(0, 6, "ftp://") == 0) { - __formatName.assign("ftp"); - } - else if (videoPath.compare(0, 7, "sftp://") == 0) { - __formatName.assign("sftp"); - } - else { - __formatName.clear(); - } -} - -bool cmpc::CMpegServer::FFmpegSetup(string inVideoPath) { - resetPath(inVideoPath); - return FFmpegSetup(); -} - -cmpc::AVRational cmpc::CMpegServer::_setAVRational(int num, int den) { - AVRational res; - res.num = num; res.den = den; - return res; -} - -int64_t cmpc::CMpegServer::__FrameToPts(int64_t seekFrame) const { - return av_rescale(av_rescale(seekFrame, timeBase.den, timeBase.num), frameRate.den, frameRate.num); -} - -int64_t cmpc::CMpegServer::__TimeToPts(double seekTime) const { - return av_rescale(static_cast(seekTime * 1000), timeBase.den, timeBase.num) / 1000; -} - -bool cmpc::CMpegServer::__setup_check() const { - if ((!videoPath.empty()) && (!__formatName.empty()) && frameRate.den > 0 && frameRate.num > 0) { - return true; - } - else { - return false; - } -} - -void cmpc::CMpegServer::__log_packet() { - AVRational* time_base = &PFormatCtx->streams[Ppacket->stream_index]->time_base; - std::ostringstream str_data; - str_data << "pts:" << av_ts2str(Ppacket->pts) << " pts_time:" << av_ts2timestr(Ppacket->pts, time_base) - << " dts:" << av_ts2str(Ppacket->dts) << " dts_time:" << av_ts2timestr(Ppacket->dts, time_base) << endl; - auto str_data_s = str_data.str(); - av_log(nullptr, AV_LOG_INFO, "%s", str_data_s.c_str()); -} - -int cmpc::CMpegServer::__write_frame() { - /* rescale output packet timestamp values from codec to stream timebase */ - av_packet_rescale_ts(Ppacket, PStreamContex.enc->time_base, PStreamContex.st->time_base); - Ppacket->stream_index = PStreamContex.st->index; - - // Update the time cursor according to the packet index. - AVRational& time_base = PFormatCtx->streams[Ppacket->stream_index]->time_base; - - auto cur_time = av_rescale_q(Ppacket->pts, time_base, time_base_q); - if (cur_time > __cur_time) { - __cur_time = cur_time; - } - - /* Write the compressed frame to the media file. */ - if (__dumpControl > 0) - __log_packet(); - return av_interleaved_write_frame(PFormatCtx, Ppacket); -} - -/* Add an output stream. */ -const cmpc::AVCodec* cmpc::CMpegServer::__add_stream() { - /* find the encoder */ - AVCodecID codec_id; - auto srcwidth = widthSrc > 0 ? widthSrc : width; - auto srcheight = heightSrc > 0 ? heightSrc : height; - auto const_codec = avcodec_find_encoder_by_name(codecName.c_str()); - const AVCodec* codec; - if (!(const_codec)) { - codec_id = PFormatCtx->oformat->video_codec; - cerr << "Could not find encoder " << codecName << ", use " << avcodec_get_name(codec_id) << " as an alternative." << endl; - codec = avcodec_find_encoder(codec_id); - } - else { - codec = const_codec; - codec_id = codec->id; - } - - if (!codec) { - cerr << "Could not find encoder for '" << avcodec_get_name(codec_id) << "'" << endl; - return nullptr; - } - - PStreamContex.st = avformat_new_stream(PFormatCtx, nullptr); - if (!PStreamContex.st) { - cerr << "Could not allocate stream" << endl; - return nullptr; - } - PStreamContex.st->id = PFormatCtx->nb_streams - 1; - auto c = avcodec_alloc_context3(codec); - if (!c) { - cerr << "Could not alloc an encoding context" << endl; - return nullptr; - } - if (nthread > 0) { - c->thread_count = nthread; - } - PStreamContex.enc = c; - - switch (codec->type) { - case AVMediaType::AVMEDIA_TYPE_VIDEO: - c->codec_id = codec_id; - - c->bit_rate = bitRate; - /* Resolution must be a multiple of two. */ - c->width = width; - c->height = height; - /* timebase: This is the fundamental unit of time (in seconds) in terms - * of which frame timestamps are represented. For fixed-fps content, - * timebase should be 1/framerate and timestamp increments should be - * identical to 1. */ - PStreamContex.st->time_base.den = 0; - PStreamContex.st->time_base.num = 0; - //av_stream_set_r_frame_rate(PStreamContex.st, frameRate); - //cout << "(" << frameRate.num << ", " << frameRate.den << ")" << endl; - //PStreamContex.st->r_frame_rate - c->time_base = timeBase; - - //PStreamContex.st->frame - c->framerate = frameRate; - - c->gop_size = GOPSize; /* emit one intra frame every twelve frames at most */ - c->max_b_frames = MaxBFrame; - c->pix_fmt = STREAM_PIX_FMT; - if (c->codec_id == AVCodecID::AV_CODEC_ID_FLV1) { - /* just for testing, we also add B-frames */ - c->max_b_frames = 0; - } - if (c->codec_id == AVCodecID::AV_CODEC_ID_MPEG2VIDEO) { - /* just for testing, we also add B-frames */ - c->max_b_frames = 2; - } - if (c->codec_id == AVCodecID::AV_CODEC_ID_MPEG1VIDEO) { - /* Needed to avoid using macroblocks in which some coeffs overflow. - * This does not happen with normal video, it just happens here as - * the motion of the chroma plane does not match the luma plane. */ - c->mb_decision = 2; - } - if (c->pix_fmt != STREAM_PIX_FMT) { - /* as we only generate a YUV420P picture, we must convert it - * to the codec pixel format if needed */ - if (!PStreamContex.sws_ctx) { - PStreamContex.sws_ctx = sws_getContext(c->width, c->height, - STREAM_PIX_FMT, - c->width, c->height, - c->pix_fmt, - SCALE_FLAGS, nullptr, nullptr, nullptr); - if (!PStreamContex.sws_ctx) { - cerr << "Could not initialize the conversion context" << endl; - return nullptr; - } - } - } - if (!PswsCtx) { - PswsCtx = sws_getContext(srcwidth, srcheight, - AVPixelFormat::AV_PIX_FMT_RGB24, - c->width, c->height, - c->pix_fmt, - SCALE_FLAGS, nullptr, nullptr, nullptr); - if (!PswsCtx) { - cerr << "Could not initialize the conversion context" << endl; - return nullptr; - } - } - if (!RGBbuffer) { - auto numBytes = av_image_get_buffer_size(AVPixelFormat::AV_PIX_FMT_RGB24, srcwidth, srcheight, 1); - RGBbuffer = (uint8_t*)av_malloc(numBytes * sizeof(uint8_t)); - } - break; - - default: - break; - } - - /* Some formats want stream headers to be separate. */ - if (PFormatCtx->oformat->flags & AVFMT_GLOBALHEADER) - c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; - return codec; -} - -/* video output */ -cmpc::AVFrame* cmpc::CMpegServer::__alloc_picture(enum AVPixelFormat pix_fmt, int width, int height) { - auto picture = av_frame_alloc(); - if (!picture) - return nullptr; - picture->format = pix_fmt; - picture->width = width; - picture->height = height; - /* allocate the buffers for the frame data */ - auto ret = av_frame_get_buffer(picture, 32); - if (ret < 0) { - cerr << "Could not allocate frame data." << endl; - return nullptr; - } - return picture; -} - -bool cmpc::CMpegServer::__open_video(const AVCodec* codec, const AVDictionary* opt_arg) { - int ret; - auto c = PStreamContex.enc; - AVDictionary* opt = nullptr; - - av_dict_copy(&opt, opt_arg, 0); - /* open the codec */ - ret = avcodec_open2(c, codec, &opt); - av_dict_free(&opt); - if (ret < 0) { - cerr << "Could not open video codec: " << av_err2str(ret) << endl; - return false; - } - /* allocate and init a re-usable frame */ - PStreamContex.frame = __alloc_picture(c->pix_fmt, c->width, c->height); - if (!PStreamContex.frame) { - cerr << "Could not allocate video frame" << endl; - return false; - } - /* If the output format is not YUV420P, then a temporary YUV420P - * picture is needed too. It is then converted to the required - * output format. */ - PStreamContex.tmp_frame = nullptr; - if (c->pix_fmt != STREAM_PIX_FMT) { - PStreamContex.tmp_frame = __alloc_picture(STREAM_PIX_FMT, c->width, c->height); - if (!PStreamContex.tmp_frame) { - cerr << "Could not allocate temporary picture" << endl; - return false; - } - } - /* copy the stream parameters to the muxer */ - ret = avcodec_parameters_from_context(PStreamContex.st->codecpar, c); - if (ret < 0) { - cerr << "Could not copy the stream parameters" << endl; - return false; - } - return true; -} - -cmpc::AVFrame* cmpc::CMpegServer::__get_video_frame(PyArrayObject* PyFrame) { - auto c = PStreamContex.enc; - - /* check if we want to generate more frames */ - //if (av_compare_ts(PStreamContex.next_pts, c->time_base, STREAM_DURATION, { 1, 1 }) >= 0) - // return nullptr; - /* when we pass a frame to the encoder, it may keep a reference to it - * internally; make sure we do not overwrite it here */ - if (av_frame_make_writable(PStreamContex.frame) < 0) - return nullptr; - if (c->pix_fmt != STREAM_PIX_FMT) { - /* as we only generate a YUV420P picture, we must convert it - * to the codec pixel format if needed */ - if (!PStreamContex.sws_ctx) { - PStreamContex.sws_ctx = sws_getContext(c->width, c->height, - STREAM_PIX_FMT, - c->width, c->height, - c->pix_fmt, - SCALE_FLAGS, nullptr, nullptr, nullptr); - if (!PStreamContex.sws_ctx) { - cerr << "Could not initialize the conversion context" << endl; - return nullptr; - } - } - if (!_LoadFrame_castFromPyFrameArray(PStreamContex.tmp_frame, PyFrame)) { - return nullptr; - } - sws_scale(PStreamContex.sws_ctx, - (const uint8_t* const*)PStreamContex.tmp_frame->data, PStreamContex.tmp_frame->linesize, - 0, c->height, PStreamContex.frame->data, PStreamContex.frame->linesize); - } - else { - if (!_LoadFrame_castFromPyFrameArray(PStreamContex.frame, PyFrame)) { - return nullptr; - } - } - - PStreamContex.frame->pts = PStreamContex.next_frame; - PStreamContex.next_frame++; - return PStreamContex.frame; -} - -bool cmpc::CMpegServer::_LoadFrame_castFromPyFrameArray(AVFrame* frame, PyArrayObject* PyFrame) { - /* make sure the frame data is writable */ - if (!__frameRGB) { - cerr << "Could not allocate frameRGB" << endl; - return false; - } - auto out_dataptr = reinterpret_cast(PyArray_DATA(PyFrame)); - auto srcwidth = widthSrc > 0 ? widthSrc : width; - auto srcheight = heightSrc > 0 ? heightSrc : height; - memcpy(RGBbuffer, out_dataptr, static_cast(srcwidth) * static_cast(srcheight) * 3 * sizeof(uint8_t)); - // Assign appropriate parts of buffer to image planes in pFrameRGB Note that pFrameRGB is an AVFrame, but AVFrame is a superset of AVPicture - av_image_fill_arrays(__frameRGB->data, __frameRGB->linesize, RGBbuffer, AVPixelFormat::AV_PIX_FMT_RGB24, srcwidth, srcheight, 1); - sws_scale(PswsCtx, __frameRGB->data, __frameRGB->linesize, 0, srcheight, frame->data, frame->linesize); - //cout << "Free 1" << endl; - //delete frameRGB; - //cout << "Free 2" << endl; - return true; -} - -/* -* encode one video frame and send it to the muxer -* return 1 when encoding is finished, 0 otherwise -*/ -int cmpc::CMpegServer::__avcodec_encode_video2(AVCodecContext* enc_ctx, AVPacket* pkt, AVFrame* frame) { - int ret; - int wfret = 0; - - if (frame) { - if (__dumpControl > 1) { - std::ostringstream str_data; - str_data << "Send frame " << frame->pts << endl; - auto str_data_s = str_data.str(); - av_log(nullptr, AV_LOG_DEBUG, "%s", str_data_s.c_str()); - } - } - else { - return AVERROR(EAGAIN); - } - - ret = avcodec_send_frame(enc_ctx, frame); - // In particular, we don't expect AVERROR(EAGAIN), because we read all - // decoded frames with avcodec_receive_frame() until done. - if (ret < 0) { - return ret == AVERROR_EOF ? 0 : ret; - } - - ret = avcodec_receive_packet(enc_ctx, pkt); - if (ret == AVERROR(EAGAIN)) - return 0; - - if (__dumpControl > 0) { - std::ostringstream str_data; - str_data << "Write packet " << pkt->pts << " (size=" << pkt->size << "), "; - auto str_data_s = str_data.str(); - av_log(nullptr, AV_LOG_INFO, "%s", str_data_s.c_str()); - } - - if (!ret) { - wfret = __write_frame(); - av_packet_unref(Ppacket); - if (wfret < 0) { - cerr << "Error while writing video frame: " << av_err2str(ret) << endl; - return wfret; - } - } - return ret; -} - -int cmpc::CMpegServer::__avcodec_encode_video2_flush(AVCodecContext* enc_ctx, AVPacket* pkt) { - int ret; - int wfret = 0; - if (__dumpControl > 1) { - std::ostringstream str_data; - str_data << "Flush all packets" << endl; - auto str_data_s = str_data.str(); - av_log(nullptr, AV_LOG_DEBUG, "%s", str_data_s.c_str()); - } - - ret = avcodec_send_frame(enc_ctx, nullptr); - // In particular, we don't expect AVERROR(EAGAIN), because we read all - // decoded frames with avcodec_receive_frame() until done. - if (ret < 0) { - return ret == AVERROR_EOF ? 0 : ret; - } - - while (ret >= 0) { - ret = avcodec_receive_packet(enc_ctx, pkt); - if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN)) { - return 0; - } - if (__dumpControl > 0) { - std::ostringstream str_data; - str_data << "Write packet " << pkt->pts << " (size=" << pkt->size << "), "; - auto str_data_s = str_data.str(); - av_log(nullptr, AV_LOG_INFO, "%s", str_data_s.c_str()); - } - if (!ret) { - wfret = __write_frame(); - av_packet_unref(pkt); - } - else { - wfret = 0; - } - if (wfret < 0) { - cerr << "Error while writing video frame: " << av_err2str(ret) << endl; - return wfret; - } - } - return ret; -} - -int cmpc::CMpegServer::ServeFrameBlock(PyArrayObject* PyFrame) { - if (__start_time > 0) { - auto cur_time = static_cast(av_gettime() - __start_time); - if (cur_time < __cur_time) { - av_usleep(static_cast((__cur_time - cur_time) / 2)); - } - ServeFrame(PyFrame); - return 0; - } - else { - return -1; - } -} - -int cmpc::CMpegServer::ServeFrame(PyArrayObject* PyFrame) { - int ret; - auto c = PStreamContex.enc; - AVFrame* frame = nullptr; - - if ((!__have_video) || (!__enable_header)) - cerr << "Not allowed to use this method before FFmpegSetup()" << endl; - if (PyFrame) { - frame = __get_video_frame(PyFrame); - ret = __avcodec_encode_video2(c, Ppacket, frame); - } - else { - frame = nullptr; - ret = __avcodec_encode_video2_flush(c, Ppacket); - } - - if (ret < 0) { - cerr << "Error encoding video frame: " << av_err2str(ret) << endl; - return ret; - } - return frame ? 0 : 1; -} - -void cmpc::CMpegServer::setParameter(string keyword, void* ptr) { - if (keyword.compare("decoder") == 0) { - CMpegDecoder* ref = reinterpret_cast(ptr); - resetPath(ref->videoPath); - codecName.assign(ref->_str_codec); - if (ref->PCodecCtx) { - bitRate = ref->PCodecCtx->bit_rate; - GOPSize = ref->PCodecCtx->gop_size; - MaxBFrame = ref->PCodecCtx->max_b_frames; - if (PStreamContex.enc) { - PStreamContex.enc->thread_count = ref->PCodecCtx->thread_count; - } - nthread = ref->PCodecCtx->thread_count; - } - else { - if (PStreamContex.enc) { - PStreamContex.enc->thread_count = ref->nthread; - } - nthread = ref->nthread; - } - if (ref->widthDst > 0 && ref->heightDst > 0) { - width = ref->widthDst; - height = ref->heightDst; - } - else { - width = ref->width; - height = ref->height; - } - widthSrc = width; - heightSrc = height; - if (ref->PVideoStream) { - //timeBase = ref->PVideoStream->time_base; - frameRate = ref->PVideoStream->avg_frame_rate; - timeBase = _setAVRational(frameRate.den, frameRate.num); - } - if (GOPSize > 0) { - auto frame_ahead = 2 * GOPSize; - __pts_ahead = __FrameToPts(static_cast(frame_ahead)); - } - } - else if (keyword.compare("client") == 0) { - CMpegClient* ref = reinterpret_cast(ptr); - resetPath(ref->videoPath); - codecName.assign(ref->_str_codec); - if (ref->PCodecCtx) { - bitRate = ref->PCodecCtx->bit_rate; - GOPSize = ref->PCodecCtx->gop_size; - MaxBFrame = ref->PCodecCtx->max_b_frames; - if (PStreamContex.enc) { - PStreamContex.enc->thread_count = ref->PCodecCtx->thread_count; - } - nthread = ref->PCodecCtx->thread_count; - } - else { - if (PStreamContex.enc) { - PStreamContex.enc->thread_count = ref->nthread; - } - nthread = ref->nthread; - } - if (ref->widthDst > 0 && ref->heightDst > 0) { - width = ref->widthDst; - height = ref->heightDst; - } - else { - width = ref->width; - height = ref->height; - } - widthSrc = width; - heightSrc = height; - if (ref->PVideoStream) { - //timeBase = ref->PVideoStream->time_base; - frameRate = ref->PVideoStream->avg_frame_rate; - timeBase = _setAVRational(frameRate.den, frameRate.num); - } - if (GOPSize > 0) { - auto frame_ahead = 2 * GOPSize; - __pts_ahead = __FrameToPts(static_cast(frame_ahead)); - } - } - else if (keyword.compare("configDict") == 0) { - PyObject* ref = reinterpret_cast(ptr); - if (PyDict_Check(ref)) { - string key; - PyObject* val; - // Set parameters. - key.assign("videoPath"); - val = PyDict_GetItemString(ref, key.c_str()); - if (val) { - if (PyBytes_Check(val)) { - auto val_str = string(PyBytes_AsString(val)); - resetPath(val_str); - } - } - else { - key.assign("videoAddress"); - val = PyDict_GetItemString(ref, key.c_str()); - if (val) { - if (PyBytes_Check(val)) { - auto val_str = string(PyBytes_AsString(val)); - resetPath(val_str); - } - } - } - key.assign("codecName"); - val = PyDict_GetItemString(ref, key.c_str()); - if (val) { - if (PyBytes_Check(val)) { - auto val_str = string(PyBytes_AsString(val)); - codecName.assign(val_str); - } - } - key.assign("bitRate"); - val = PyDict_GetItemString(ref, key.c_str()); - if (val) { - if (PyLong_Check(val)) { - auto val_num = static_cast(PyLong_AsLongLong(val)); - bitRate = val_num; - } - } - key.assign("GOPSize"); - val = PyDict_GetItemString(ref, key.c_str()); - if (val) { - if (PyLong_Check(val)) { - auto val_num = static_cast(PyLong_AsLong(val)); - GOPSize = val_num; - } - } - key.assign("maxBframe"); - val = PyDict_GetItemString(ref, key.c_str()); - if (val) { - if (PyLong_Check(val)) { - auto val_num = static_cast(PyLong_AsLong(val)); - MaxBFrame = val_num; - } - } - key.assign("width"); - val = PyDict_GetItemString(ref, key.c_str()); - if (val) { - if (PyLong_Check(val)) { - auto val_num = static_cast(PyLong_AsLong(val)); - width = val_num; - widthSrc = val_num; - } - } - key.assign("height"); - val = PyDict_GetItemString(ref, key.c_str()); - if (val) { - if (PyLong_Check(val)) { - auto val_num = static_cast(PyLong_AsLong(val)); - height = val_num; - heightSrc = val_num; - } - } - key.assign("widthSrc"); - val = PyDict_GetItemString(ref, key.c_str()); - if (val) { - if (PyLong_Check(val)) { - auto val_num_1 = static_cast(PyLong_AsLong(val)); - key.assign("heightSrc"); - val = PyDict_GetItemString(ref, key.c_str()); - if (val) { - if (PyLong_Check(val)) { - auto val_num_2 = static_cast(PyLong_AsLong(val)); - widthSrc = val_num_1; - heightSrc = val_num_2; - } - } - } - } - key.assign("widthDst"); - val = PyDict_GetItemString(ref, key.c_str()); - if (val) { - if (PyLong_Check(val)) { - auto val_num_1 = static_cast(PyLong_AsLong(val)); - key.assign("heightDst"); - val = PyDict_GetItemString(ref, key.c_str()); - if (val) { - if (PyLong_Check(val)) { - auto val_num_2 = static_cast(PyLong_AsLong(val)); - width = val_num_1; - height = val_num_2; - } - } - } - } - key.assign("frameRate"); - val = PyDict_GetItemString(ref, key.c_str()); - if (val) { - if (PyTuple_Check(val)) { - auto valObj = PyTuple_GetItem(val, 0); - int num = static_cast(PyLong_AsLong(valObj)); - valObj = PyTuple_GetItem(val, 1); - int den = static_cast(PyLong_AsLong(valObj)); - frameRate = _setAVRational(num, den); - timeBase = _setAVRational(den, num); - if (GOPSize > 0) { - auto frame_ahead = 2 * GOPSize; - __pts_ahead = __FrameToPts(static_cast(frame_ahead)); - } - } - } - key.assign("nthread"); - val = PyDict_GetItemString(ref, key.c_str()); - if (val) { - if (PyLong_Check(val)) { - auto val_num = static_cast(PyLong_AsLong(val)); - if (PStreamContex.enc) { - PStreamContex.enc->thread_count = val_num; - } - nthread = val_num; - } - } - } - } - else if (keyword.compare("videoAddress") == 0) { - string* ref = reinterpret_cast(ptr); - resetPath(*ref); - } - else if (keyword.compare("codecName") == 0) { - string* ref = reinterpret_cast(ptr); - codecName.assign(*ref); - } - else if (keyword.compare("bitRate") == 0) { - double* ref = reinterpret_cast(ptr); - auto bit_rate = static_cast((*ref) * 1024); - bitRate = bit_rate; - } - else if (keyword.compare("width") == 0) { - int* ref = reinterpret_cast(ptr); - width = *ref; - } - else if (keyword.compare("height") == 0) { - int* ref = reinterpret_cast(ptr); - height = *ref; - } - else if (keyword.compare("widthSrc") == 0) { - int* ref = reinterpret_cast(ptr); - widthSrc = *ref; - } - else if (keyword.compare("heightSrc") == 0) { - int* ref = reinterpret_cast(ptr); - heightSrc = *ref; - } - else if (keyword.compare("GOPSize") == 0) { - int* ref = reinterpret_cast(ptr); - GOPSize = *ref; - } - else if (keyword.compare("frameAhead") == 0) { - int* ref = reinterpret_cast(ptr); - auto frame_ahead = *ref; - __pts_ahead = __FrameToPts(static_cast(frame_ahead)); - } - else if (keyword.compare("maxBframe") == 0) { - int* ref = reinterpret_cast(ptr); - MaxBFrame = *ref; - } - else if (keyword.compare("frameRate") == 0) { - PyObject* ref = reinterpret_cast(ptr); - auto refObj = PyTuple_GetItem(ref, 0); - int num = static_cast(PyLong_AsLong(refObj)); - refObj = PyTuple_GetItem(ref, 1); - int den = static_cast(PyLong_AsLong(refObj)); - frameRate = _setAVRational(num, den); - timeBase = _setAVRational(den, num); - if (GOPSize > 0) { - auto frame_ahead = 2 * GOPSize; - __pts_ahead = __FrameToPts(static_cast(frame_ahead)); - } - } - else if (keyword.compare("nthread") == 0) { - auto ref = reinterpret_cast(ptr); - if (PStreamContex.enc) { - PStreamContex.enc->thread_count = *ref; - } - nthread = *ref; - } -} - -PyObject* cmpc::CMpegServer::getParameter(string keyword) { - if (keyword.compare("videoAddress") == 0) { - return PyUnicode_DecodeFSDefaultAndSize(videoPath.c_str(), static_cast(videoPath.size())); - } - else if (keyword.compare("codecName") == 0) { - return PyUnicode_DecodeFSDefaultAndSize(codecName.c_str(), static_cast(codecName.size())); - } - else if (keyword.compare("formatName") == 0) { - return PyUnicode_DecodeFSDefaultAndSize(__formatName.c_str(), static_cast(__formatName.size())); - } - else if (keyword.compare("bitRate") == 0) { - auto bit_rate = static_cast(bitRate) / 1024; - return Py_BuildValue("d", bit_rate); - } - else if (keyword.compare("width") == 0) { - return Py_BuildValue("i", width); - } - else if (keyword.compare("height") == 0) { - return Py_BuildValue("i", height); - } - else if (keyword.compare("widthSrc") == 0) { - return Py_BuildValue("i", widthSrc); - } - else if (keyword.compare("heightSrc") == 0) { - return Py_BuildValue("i", heightSrc); - } - else if (keyword.compare("GOPSize") == 0) { - return Py_BuildValue("i", GOPSize); - } - else if (keyword.compare("maxBframe") == 0) { - return Py_BuildValue("i", MaxBFrame); - } - else if (keyword.compare("ptsAhead") == 0) { - return Py_BuildValue("L", __pts_ahead); - } - else if (keyword.compare("waitRef") == 0) { - int64_t cur_time = 0; - if (__start_time > 0) { - cur_time = av_gettime() - __start_time; - if (cur_time < __cur_time) { - return Py_BuildValue("d", static_cast(__cur_time - cur_time) * av_q2d(time_base_q) / 2); - } - else { - return Py_BuildValue("d", 0.0); - } - } - else { - return Py_BuildValue("d", 0.0); - } - } - else if (keyword.compare("frameRate") == 0) { - auto frame_base = frameRate; - auto frame_rate = static_cast(frame_base.num) / static_cast(frame_base.den); - return Py_BuildValue("d", frame_rate); - } - else if (keyword.compare("nthread") == 0) { - if (PStreamContex.enc) { - return Py_BuildValue("i", PStreamContex.enc->thread_count); - } - else { - return Py_BuildValue("i", nthread); - } - } - else { - Py_RETURN_NONE; - } -} - -PyObject* cmpc::CMpegServer::getParameter() { - auto res = PyDict_New(); - string key; - PyObject* val = nullptr; - // Fill the values. - key.assign("videoAddress"); - val = Py_BuildValue("y", videoPath.c_str()); - PyDict_SetItemString(res, key.c_str(), val); - Py_DECREF(val); - key.assign("codecName"); - val = Py_BuildValue("y", codecName.c_str()); - PyDict_SetItemString(res, key.c_str(), val); - Py_DECREF(val); - key.assign("formatName"); - val = Py_BuildValue("y", __formatName.c_str()); - PyDict_SetItemString(res, key.c_str(), val); - Py_DECREF(val); - key.assign("bitRate"); - val = Py_BuildValue("L", bitRate); - PyDict_SetItemString(res, key.c_str(), val); - Py_DECREF(val); - key.assign("GOPSize"); - val = Py_BuildValue("i", GOPSize); - PyDict_SetItemString(res, key.c_str(), val); - Py_DECREF(val); - key.assign("maxBframe"); - val = Py_BuildValue("i", MaxBFrame); - PyDict_SetItemString(res, key.c_str(), val); - Py_DECREF(val); - key.assign("ptsAhead"); - val = Py_BuildValue("L", __pts_ahead); - PyDict_SetItemString(res, key.c_str(), val); - Py_DECREF(val); - if (widthSrc > 0) { - key.assign("widthSrc"); - val = Py_BuildValue("i", widthSrc); - PyDict_SetItemString(res, key.c_str(), val); - Py_DECREF(val); - } - if (heightSrc > 0) { - key.assign("heightSrc"); - val = Py_BuildValue("i", heightSrc); - PyDict_SetItemString(res, key.c_str(), val); - Py_DECREF(val); - } - key.assign("width"); - val = Py_BuildValue("i", width); - PyDict_SetItemString(res, key.c_str(), val); - Py_DECREF(val); - key.assign("height"); - val = Py_BuildValue("i", height); - PyDict_SetItemString(res, key.c_str(), val); - Py_DECREF(val); - key.assign("frameRate"); - val = Py_BuildValue("(ii)", frameRate.num, frameRate.den); - PyDict_SetItemString(res, key.c_str(), val); - Py_DECREF(val); - if (PStreamContex.enc) { - key.assign("nthread"); - val = Py_BuildValue("i", PStreamContex.enc->thread_count); - PyDict_SetItemString(res, key.c_str(), val); - Py_DECREF(val); - } - else { - key.assign("nthread"); - val = Py_BuildValue("i", nthread); - PyDict_SetItemString(res, key.c_str(), val); - Py_DECREF(val); - } - return res; -} - -bool cmpc::CMpegServer::FFmpegSetup() { - if (!__setup_check()) { - cerr << "Have not get necessary and correct configurations, so FFmpegSetup() should not be called." << endl; - return false; - } - const AVCodec* video_codec; - int ret; - - if (Ppacket) - av_packet_free(&Ppacket); - Ppacket = av_packet_alloc(); - if (!Ppacket) - return false; - - AVDictionary* opt = nullptr; - //av_dict_set(&opt, "vcodec", codecName.c_str(), 0); - //av_dict_set(&opt, "fflags", "", 0); - - /* allocate the output media context */ - //auto getFormat = av_guess_format(codecName.c_str(), nullptr, nullptr); - string format_name; - if (__formatName.compare("rtsp") == 0) { - format_name.assign("rtsp"); - } - else if (__formatName.compare("rtmp") == 0) { - format_name.assign("flv"); - } - else if (__formatName.compare("http") == 0) { - format_name.assign("flv"); - } - else if (__formatName.compare("ftp") == 0) { - format_name.assign("flv"); - } - else if (__formatName.compare("sftp") == 0) { - format_name.assign("flv"); - } - else { - cout << "The format name " << __formatName << " is not supported. Now we only support \"rtsp\", \"rtmp\", \"http\"." << endl; - return false; - } - avformat_alloc_output_context2(&PFormatCtx, nullptr, format_name.c_str(), videoPath.c_str()); - PFormatCtx->avoid_negative_ts = AVFMT_AVOID_NEG_TS_AUTO; - if (!PFormatCtx) { - cout << "Could not select the encoder. The allocation is failed." << endl; - return false; - } - - auto fmt = PFormatCtx->oformat; - - /* Add the audio and video streams using the default format codecs - * and initialize the codecs. */ - if (fmt->video_codec != AVCodecID::AV_CODEC_ID_NONE) { - video_codec = __add_stream(); - if (!video_codec) { - FFmpegClose(); - return false; - } - else - __have_video = true; - } - else { - video_codec = nullptr; - } - - /* Now that all the parameters are set, we can open the audio and - * video codecs and allocate the necessary encode buffers. */ - if (__have_video) { - if (!__open_video(video_codec, opt)) { - FFmpegClose(); - return false; - } - else - __have_video = true; - } - - if (__dumpControl > 1) { - av_dump_format(PFormatCtx, 0, videoPath.c_str(), 1); - } - - /* open the output file, if needed */ - if (!(fmt->flags & AVFMT_NOFILE)) { - AVDictionary* opt_io = nullptr; - /*if (__formatName.compare("http") == 0) { - ret = av_dict_set(&opt_io, "listen", "1", 0); - if (ret < 0) { - cerr << "Could not set the options for the file: " << av_err2str(ret) << endl; - FFmpegClose(); - return false; - } - }*/ - ret = avio_open2(&PFormatCtx->pb, videoPath.c_str(), AVIO_FLAG_WRITE, nullptr, &opt_io); - if (ret < 0) { - cerr << "Could not open '" << videoPath << "': " << av_err2str(ret) << endl; - FFmpegClose(); - return false; - } - if (opt_io) { - av_dict_free(&opt_io); - } - } - - if (!(__frameRGB = av_frame_alloc())) { - cerr << "Could Allocate Temp Frame" << endl; - FFmpegClose(); - return false; - } - - /* Write the stream header, if any. */ - ret = avformat_write_header(PFormatCtx, &opt); - if (ret < 0) { - cerr << "Error occurred when opening output file: " << av_err2str(ret) << endl; - FFmpegClose(); - return false; - } - else { - __enable_header = true; - } - - // Register the start time. - __start_time = av_gettime(); - return true; -} - -void cmpc::CMpegServer::FFmpegClose() { - if (__enable_header && __have_video) { - //cout << "Flush Video" << endl; - int x; - if ((x = ServeFrame(nullptr)) == 0) { - // cout << "Ret: " << x << endl; - } - if (__dumpControl > 0) { - std::ostringstream str_data; - str_data << "All frames are flushed from cache, the video would be closed." << endl; - auto str_data_s = str_data.str(); - av_log(nullptr, AV_LOG_INFO, "%s", str_data_s.c_str()); - } - } - __start_time = 0; - __cur_time = 0; - if (PFormatCtx) { - if (__enable_header) { - av_write_trailer(PFormatCtx); - __enable_header = false; - } - /* Close each codec. */ - if (__have_video) { - /* free the stream */ - //avformat_free_context(PFormatCtx); - if (PStreamContex.enc) - avcodec_free_context(&PStreamContex.enc); - if (PStreamContex.frame) - av_frame_free(&PStreamContex.frame); - if (PStreamContex.tmp_frame) - av_frame_free(&PStreamContex.tmp_frame); - if (PStreamContex.sws_ctx) { - sws_freeContext(PStreamContex.sws_ctx); - PStreamContex.sws_ctx = nullptr; - } - if (PswsCtx) { - sws_freeContext(PswsCtx); - PswsCtx = nullptr; - } - if (RGBbuffer) { - av_free(RGBbuffer); - RGBbuffer = nullptr; - } - __have_video = false; - } - auto fmt = PFormatCtx->oformat; - if (!(fmt->flags & AVFMT_NOFILE)) - /* Close the output file. */ - avio_closep(&PFormatCtx->pb); - /* free the stream */ - avformat_free_context(PFormatCtx); - PFormatCtx = nullptr; - } - if (Ppacket) { - av_packet_free(&Ppacket); - Ppacket = nullptr; - } - if (__frameRGB) { - av_frame_free(&__frameRGB); - } -} - -void cmpc::CMpegServer::dumpFormat() { - if (PFormatCtx) - av_dump_format(PFormatCtx, 0, videoPath.c_str(), 1); - else - cerr << "Not loaded video format context now. dumpFormat() is not avaliable." << endl; -} - -ostream& cmpc::operator<<(ostream& out, cmpc::CMpegServer& self_class) { - out << std::setw(1) << "/"; - out << std::setfill('*') << std::setw(44) << "" << std::setfill(' ') << endl; - out << std::setw(1) << " * Packed FFmpeg Server - Y. Jin V" << MPEGCODER_CURRENT_VERSION << endl; - out << " " << std::setfill('*') << std::setw(44) << "" << std::setfill(' ') << endl; - out << std::setiosflags(std::ios::left) << std::setw(25) << " * VideoAddress: " \ - << self_class.videoPath << endl; - out << std::setiosflags(std::ios::left) << std::setw(25) << " * (Width, Height): " \ - << self_class.width << ", " << self_class.height << endl; - if (self_class.widthSrc > 0 && self_class.heightSrc > 0) { - out << std::setiosflags(std::ios::left) << std::setw(25) << " * (WidthSrc, HeightSrc): " \ - << self_class.widthSrc << ", " << self_class.heightSrc << endl; - } - else if (self_class.widthSrc > 0) { - out << std::setiosflags(std::ios::left) << std::setw(25) << " * WidthSrc: " \ - << self_class.widthSrc << endl; - } - else if (self_class.heightSrc > 0) { - out << std::setiosflags(std::ios::left) << std::setw(25) << " * HeightSrc: " \ - << self_class.heightSrc << endl; - } - out << std::setiosflags(std::ios::left) << std::setw(25) << " * Enccoder: " \ - << self_class.codecName << endl; - out << std::setiosflags(std::ios::left) << std::setw(25) << " * Stream format: " \ - << self_class.__formatName << endl; - if (self_class.PStreamContex.enc) { - out << std::setiosflags(std::ios::left) << std::setw(25) << " * Thread number: " \ - << self_class.PStreamContex.enc->thread_count << endl; - } - else { - out << std::setiosflags(std::ios::left) << std::setw(25) << " * Thread number (P): " \ - << self_class.nthread << endl; - } - out << std::setiosflags(std::ios::left) << std::setw(25) << " * Bit Rate: " \ - << (self_class.bitRate >> 10) << " [Kbit/s]" << endl; - out << std::setiosflags(std::ios::left) << std::setw(25) << " * Frame Rate: " \ - << static_cast(self_class.frameRate.num) / static_cast(self_class.frameRate.den) << " [FPS]" << endl; - out << std::setiosflags(std::ios::left) << std::setw(25) << " * Ahead PTS: " \ - << self_class.__pts_ahead << endl; - out << std::setiosflags(std::ios::left) << std::setw(25) << " * GOP Size: " \ - << self_class.GOPSize << endl; - out << std::setiosflags(std::ios::left) << std::setw(25) << " * Maxmal Bframe Density: " \ - << self_class.MaxBFrame << " [/GOP]" << endl; - out << std::setw(1) << " */"; - return out; -} diff --git a/MpegCoder/MpegStreamer.h b/MpegCoder/MpegStreamer.h deleted file mode 100644 index f809171..0000000 --- a/MpegCoder/MpegStreamer.h +++ /dev/null @@ -1,178 +0,0 @@ -// 下列 ifdef 块是创建使从 DLL 导出更简单的 -// 宏的标准方法。此 DLL 中的所有文件都是用命令行上定义的 MPEGCODER_EXPORT -// 符号编译的。在使用此 DLL 的 -// 任何其他项目上不应定义此符号。这样,源文件中包含此文件的任何其他项目都会将 -// MPEGCODER_API 函数视为自 DLL 导入,而此 DLL 则将用此宏定义的 -// 符号视为是被导出的。 -#ifndef MPEGSTREAMER_H_INCLUDED -#define MPEGSTREAMER_H_INCLUDED - -#include "MpegBase.h" - -// Exported from MpegCoder.dll -namespace cmpc { - - extern int8_t __dumpControl; - class CMpegDecoder; - class CMpegEncoder; - - class BufferList { // A buffer holder of several frames - public: - BufferList(void); - ~BufferList(void); - BufferList(const BufferList& ref); - BufferList& operator=(const BufferList& ref); - BufferList(BufferList&& ref) noexcept; - BufferList& operator=(BufferList&& ref) noexcept; - void clear(void); - const int64_t size() const; - void set(int64_t set_size, int width, int height, int widthDst = 0, int heightDst = 0); - void set_timer(AVRational targetFrameRate, AVRational timeBase); - bool reset_memory(); - void freeze_write(int64_t read_size); - bool write(SwsContext* PswsCtx, AVFrame* frame); - PyObject* read(); - private: - int64_t _Buffer_pos; // Writring cursor of the source buffer,pointing to the index of the currently written frame. - int64_t _Buffer_rpos; // Reading cursor of the source buffer,pointing to the index of the currently read frame. - int64_t _Buffer_size; // Size of the source buffer, it should be determined by the numeber of required frames. - int64_t __Read_size; // A temporary variable used for showing the size of the data to be read. - int64_t next_pts; - int64_t interval_pts; - int dst_width, dst_height; - int src_width, src_height; - int _Buffer_capacity; - AVFrame* frameRGB; - uint8_t** _Buffer_List; // Source buffer, the size of this buffer is determined by the number of required frames. - }; - - class CMpegClient { - public: - CMpegClient(void); // Constructor. - ~CMpegClient(void); // 3-5 law. Destructor. - CMpegClient(const CMpegClient& ref) = delete; // Delete the copy constructor. - CMpegClient& operator=(const CMpegClient& ref) = delete; // Delete the copy assignment operator. - CMpegClient(CMpegClient&& ref) noexcept; // Move constructor. - CMpegClient& operator=(CMpegClient&& ref) noexcept; // Move assignment operator. - friend class CMpegEncoder; // Let the encoder be able to access the member of this class. - friend class CMpegServer; // Let the server be able to access the member of this class. - friend ostream& operator<<(ostream& out, CMpegClient& self_class); // Show the results. - void clear(void); // Clear all configurations and resources. - void meta_protected_clear(void); // Clear the resources, but the configurations are remained. - void dumpFormat(); // Show the av_format results. - void setParameter(string keyword, void* ptr); // Set arguments. - PyObject* getParameter(string keyword); // Get the current arguments. - PyObject* getParameter(); // Get all key arguments. - void resetPath(string inVideoPath); // Reset the path (URL) of the online video stream. - bool FFmpegSetup(); // Configure the decoder, and extract the basic meta-data. This method is also equipped in the constructor. - bool FFmpegSetup(string inVideoPath); // Configure the decoder with extra arguments. - bool start(); // Start the listening to the online stream. - void terminate(); // Terminate the listener. - PyObject* ExtractFrame(int64_t readsize); // Extract frames with the given number. - PyObject* ExtractFrame(); // Extract frames. The number is configured in the class properties. - private: - string videoPath; // The path (URL) of the online video stream. - int width, height; // Width, height of the video. - int widthDst, heightDst; // Target width, height of ExtractFrame(). - enum AVPixelFormat PPixelFormat; // Enum object of the pixel format. - AVFormatContext* PFormatCtx; // Format context of the video. - AVCodecContext* PCodecCtx; // Codec context of the video. - AVStream* PVideoStream; // Video stream. - - AVFrame* frame; - - int PVideoStreamIDX; // The index of the video stream. - int PVideoFrameCount; // The counter of the decoded frames. - BufferList buffer; // The buffer of the RGB formatted images. - struct SwsContext* PswsCtx; // The context of the scale transformator. - int64_t cache_size, read_size; - AVRational frameRate; - - std::thread read_handle; // The thread of the circular frame reader. - std::mutex read_check; // Lock for reading the status. - std::mutex info_lock; // Lock for reading the info. - bool reading; - - string _str_codec; // The name of the current codec. - double _duration; // The duration of the current video. - int64_t _predictFrameNum; // The prediction of the total number of frames. - int nthread; // The number of threads; - - /* Enable or disable frame reference counting. You are not supposed to support - * both paths in your application but pick the one most appropriate to your - * needs. Look for the use of refcount in this example to see what are the - * differences of API usage between them. */ - int refcount; // Reference count of the video frame. - bool __setup_check() const; - int _open_codec_context(int& stream_idx, AVCodecContext*& dec_ctx, AVFormatContext* PFormatCtx, enum AVMediaType type); - void __client_holder(); - AVRational _setAVRational(int num, int den); - int __save_frame(AVFrame*& frame, AVPacket*& pkt, bool& got_frame, int cached); - int __avcodec_decode_video2(AVCodecContext* avctx, AVFrame* frame, bool& got_frame, AVPacket* pkt); - }; - - class CMpegServer { - public: - CMpegServer(void); // Constructor. - ~CMpegServer(void); // 3-5 law. Destructor. - CMpegServer(const CMpegServer& ref); // Delete the copy constructor. - CMpegServer& operator=(const CMpegServer& ref); // Delete the copy assignment operator. - CMpegServer(CMpegServer&& ref) noexcept; // Move constructor. - CMpegServer& operator=(CMpegServer&& ref) noexcept; // Move assignment operator. - //friend class CMpegEncoder; // Let the server be able to access the member of this class. - friend ostream& operator<<(ostream& out, CMpegServer& self_class); // Show the results. - void clear(void); // Clear all configurations and resources. - void meta_protected_clear(void); // Clear the resources, but the configurations are remained. - void resetPath(string inVideoPath); // Reset the path of the output video stream. - void dumpFormat(); // Show the av_format results. - bool FFmpegSetup(); // Configure the encoder, and create the file handle. This method is also equipped in the constructor. - bool FFmpegSetup(string inVideoPath); // Configure the encoder with extra arguments. - void FFmpegClose(); // Close the encoder, and finalize the written of the encoded video. - void setParameter(string keyword, void* ptr); // Set arguments. - PyObject* getParameter(string keyword); // Get the current arguments. - PyObject* getParameter(); // Get all key arguments. - int ServeFrameBlock(PyArrayObject* PyFrame); // Encode the frame into the output stream (block mode). - int ServeFrame(PyArrayObject* PyFrame); // Encode the frame into the output stream. - private: - string videoPath; // The path of the output video stream. - string __formatName; // The format name of the stream. Could be "rtsp" or "rtmp". This value is detected from the videoPath. - string codecName; // The name of the codec - int64_t bitRate; // The bit rate of the output video. - int64_t __pts_ahead; // The ahead pts. - int64_t __start_time; // The start time stamp. This value is used for controlling the writing of the frames. - int64_t __cur_time; // The current time stamp. This value is restricted by __pts_ahead. - int width, height; // The size of the frames in the output video. - int widthSrc, heightSrc; // The size of the input data (frames). - AVRational timeBase, frameRate; // The time base and the frame rate. - AVRational time_base_q; // The time base used for calculating the absolute time. - int GOPSize, MaxBFrame; // The size of GOPs, and the maximal number of B frames. - OutputStream PStreamContex; // The context of the current video parser. - AVFormatContext* PFormatCtx; // Format context of the video. - AVPacket* Ppacket; // AV Packet used for writing frames. - struct SwsContext* PswsCtx; // The context of the scale transformator. - AVFrame* __frameRGB; // A temp AV frame object. Used for converting the data format. - uint8_t* RGBbuffer; // Data buffer. - bool __have_video, __enable_header; - - int nthread; // The number of threads; - - AVRational _setAVRational(int num, int den); - int64_t __FrameToPts(int64_t seekFrame) const; - int64_t __TimeToPts(double seekTime) const; - bool __setup_check() const; - bool _LoadFrame_castFromPyFrameArray(AVFrame* frame, PyArrayObject* PyFrame); - void __log_packet(); - int __write_frame(); - const AVCodec* __add_stream(); - AVFrame* __alloc_picture(enum AVPixelFormat pix_fmt, int width, int height); - bool __open_video(const AVCodec* codec, const AVDictionary* opt_arg); - AVFrame* __get_video_frame(PyArrayObject* PyFrame); - int __avcodec_encode_video2(AVCodecContext* enc_ctx, AVPacket* pkt, AVFrame* frame); - int __avcodec_encode_video2_flush(AVCodecContext* enc_ctx, AVPacket* pkt); - }; - - ostream& operator<<(ostream& out, CMpegClient& self_class); - ostream& operator<<(ostream& out, CMpegServer& self_class); -} - -#endif diff --git a/MpegCoder/dllmain.cpp b/MpegCoder/dllmain.cpp deleted file mode 100644 index 0f676a8..0000000 --- a/MpegCoder/dllmain.cpp +++ /dev/null @@ -1,67 +0,0 @@ -// dllmain.cpp : The entry of the dll program. -#include "stdafx.h" -#include "MpegPyd.h" - -/***************************************************************************** -* The initialization of the module. Would be invoked when using import. -*****************************************************************************/ -PyMODINIT_FUNC // == __decslpec(dllexport) PyObject*, Define the exported main function. -PyInit_mpegCoder(void) { // The external module name is: --CppClass - import_array(); - /* Initialize libavcodec, and register all codecs and formats. */ - // Register everything - #ifndef FFMPG3_4 - av_register_all(); - #endif - #ifndef FFMPG4_0 - avformat_network_init(); - #endif - - PyObject* pReturn = 0; - // Configure the __new__ method as the default method. This method is used for building the instances. - C_MPDC_ClassInfo.tp_new = PyType_GenericNew; - C_MPEC_ClassInfo.tp_new = PyType_GenericNew; - C_MPCT_ClassInfo.tp_new = PyType_GenericNew; - C_MPSV_ClassInfo.tp_new = PyType_GenericNew; - - /* Finish the initialization, including the derivations. - * When success, return 0; Otherwise, return -1 and throw errors. */ - if (PyType_Ready(&C_MPDC_ClassInfo) < 0) - return nullptr; - if (PyType_Ready(&C_MPEC_ClassInfo) < 0) - return nullptr; - if (PyType_Ready(&C_MPCT_ClassInfo) < 0) - return nullptr; - if (PyType_Ready(&C_MPSV_ClassInfo) < 0) - return nullptr; - - pReturn = PyModule_Create(&ModuleInfo); // Create the module according to the module info. - if (pReturn == 0) - return nullptr; - - Py_INCREF(&ModuleInfo); // Because the module is not registered to the python counter, Py_INCREF is required to be invoked. - PyModule_AddFunctions(pReturn, C_MPC_MethodMembers); // Add the global method members. - PyModule_AddObject(pReturn, "MpegDecoder", (PyObject*)&C_MPDC_ClassInfo); // Add the class as one module member. - PyModule_AddObject(pReturn, "MpegEncoder", (PyObject*)&C_MPEC_ClassInfo); - PyModule_AddObject(pReturn, "MpegClient", (PyObject*)&C_MPCT_ClassInfo); - PyModule_AddObject(pReturn, "MpegServer", (PyObject*)&C_MPSV_ClassInfo); - return pReturn; -} - -/* -BOOL APIENTRY DllMain( HMODULE hModule, - DWORD ul_reason_for_call, - LPVOID lpReserved - ) -{ - switch (ul_reason_for_call) - { - case DLL_PROCESS_ATTACH: - case DLL_THREAD_ATTACH: - case DLL_THREAD_DETACH: - case DLL_PROCESS_DETACH: - break; - } - return TRUE; -} -*/ diff --git a/MpegCoder/snprintf.cpp b/MpegCoder/snprintf.cpp deleted file mode 100644 index 8142cf8..0000000 --- a/MpegCoder/snprintf.cpp +++ /dev/null @@ -1,78 +0,0 @@ -/* - * C99-compatible snprintf() and vsnprintf() implementations - * Copyright (c) 2012 Ronald S. Bultje - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - - -#include "stdafx.h" - -extern "C" -{ - #include - #include - #include - #include -} - -#include "compat/va_copy.h" -#include "libavutil/error.h" -#include "compat/msvcrt/snprintf.h" - -#if defined(__MINGW32__) -#define EOVERFLOW EFBIG -#endif - -extern "C" -{ - int avpriv_snprintf(char *s, size_t n, const char *fmt, ...) { - va_list ap; - int ret; - - va_start(ap, fmt); - ret = avpriv_vsnprintf(s, n, fmt, ap); - va_end(ap); - - return ret; - } - - int avpriv_vsnprintf(char *s, size_t n, const char *fmt, va_list ap) { - int ret; - va_list ap_copy; - - if (n == 0) - return _vscprintf(fmt, ap); - else if (n > INT_MAX) - return AVERROR(EOVERFLOW); - - /* we use n - 1 here because if the buffer is not big enough, the MS - * runtime libraries don't add a terminating zero at the end. MSDN - * recommends to provide _snprintf/_vsnprintf() a buffer size that - * is one less than the actual buffer, and zero it before calling - * _snprintf/_vsnprintf() to workaround this problem. - * See http://msdn.microsoft.com/en-us/library/1kt27hek(v=vs.80).aspx */ - memset(s, 0, n); - va_copy(ap_copy, ap); - ret = _vsnprintf_s(s, n - 1, INT_MAX, fmt, ap_copy); - va_end(ap_copy); - if (ret == -1) - ret = _vscprintf(fmt, ap); - - return ret; - } -} \ No newline at end of file diff --git a/MpegCoder/stdafx.cpp b/MpegCoder/stdafx.cpp deleted file mode 100644 index fa90b1c..0000000 --- a/MpegCoder/stdafx.cpp +++ /dev/null @@ -1,8 +0,0 @@ -// stdafx.cpp : 只包括标准包含文件的源文件 -// $safeprojectname$.pch 将作为预编译标头 -// stdafx.obj 将包含预编译类型信息 - -#include "stdafx.h" - -// TODO: 在 STDAFX.H 中引用任何所需的附加头文件, -//而不是在此文件中引用 diff --git a/MpegCoder/stdafx.h b/MpegCoder/stdafx.h deleted file mode 100644 index 4b4e4a8..0000000 --- a/MpegCoder/stdafx.h +++ /dev/null @@ -1,19 +0,0 @@ -// stdafx.h : 标准系统包含文件的包含文件, -// 或是经常使用但不常更改的 -// 特定于项目的包含文件 -// - -#pragma once - -#include "targetver.h" - -#define WIN32_LEAN_AND_MEAN // 从 Windows 头中排除极少使用的资料 -// Numpy header: -#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION -// Windows header: -#define _CRT_SECURE_NO_WARNINGS -#include - - - -// TODO: 在此处引用程序需要的其他头文件 diff --git a/MpegCoder/targetver.h b/MpegCoder/targetver.h deleted file mode 100644 index 91042b9..0000000 --- a/MpegCoder/targetver.h +++ /dev/null @@ -1,8 +0,0 @@ -#pragma once - -// 包括 SDKDDKVer.h 将定义可用的最高版本的 Windows 平台。 - -// 如果要为以前的 Windows 平台生成应用程序,请包括 WinSDKVer.h,并将 -// 将 _WIN32_WINNT 宏设置为要支持的平台,然后再包括 SDKDDKVer.h。 - -#include diff --git a/README.md b/README.md old mode 100644 new mode 100755 index 7d4b1a0..c46a8fc --- a/README.md +++ b/README.md @@ -8,67 +8,32 @@ import mpegCoder | Branch | Description | | :-------------: | :-----------: | -| `master` :link: | The source project of `mpegCoder`, Windows version. | +| [`master` :link:][git-master] | The source project of `mpegCoder`, Windows version. | | [`master-linux` :link:][git-linux] | The source project of `mpegCoder`, Linux version. | | [`example-client-check` :link:][exp1] | A testing project of the online video stream demuxing. | | [`example-client-player` :link:][exp2] | A testing project of the simple online video stream player. | -## Source project of `mpegCoder` (Windows) +## Scripts for building the PyPI package -The following instructions are used for building the project on Windows with Visual Studio 2019. +The scripts in this branch are used for building the PyPI package, which will be uploaded to: -1. Clone the `master` branch which only contains the codes of `mpegCoder`: +[`mpegCoder` on PyPI](https://pypi.org/project/mpegCoder) - ```bash - git clone --single-branch -b master https://github.com/cainmagi/FFmpeg-Encoder-Decoder-for-Python.git - ``` +The script will keep updated with the newest releases of the pre-compiled modules. The following table show releases that have been uploaded: -2. Download the FFMpeg dependencies, including `include` and `lib`. Users could download dependencies manually by checking [the release page :link:](https://github.com/cainmagi/FFmpeg-Encoder-Decoder-for-Python/releases/tag/deps-3.0.0). However, we recommend users to use the following script to get the dependencies quickly: +| `mpegCoder` | Uploaded | +| :-----------: | :--------: | +| `3.2.4` | :heavy_check_mark: | +| `3.2.3` | :heavy_check_mark: | +| `3.2.2` | :heavy_check_mark: | +| `3.2.1` | :heavy_check_mark: | +| `3.2.0` | :heavy_check_mark: | +| `3.1.0` | :heavy_check_mark: | - ```bash - python webtools.py - ``` - - This script requires users to install `urllib3`. The `tqdm` is also recommended to be installed. - -3. The following configurations should be set for `All` (both debug and release) and `x64`. Open the project by `MpegCoder.sln`. Then configure the following paths of the include directories and the library directories. In both configurations, the first item is required to be modified according to your python path, the second item is required to be modified according to your numpy path. - - | Path | Screenshot | - | :----- | :----------: | - | `includes` | ![Configure includes](./display/config-include.png) | - | `libs` | ![Configure libs](./display/config-include.png) | - -4. Modify the linker configs. We only need to change the item `python3x.lib` according to the python version you have. - ![Configure linker](./display/config-linker.png) - -5. Run the `Release`, `x64` build. The built file should be saved as `x64\Release\mpegCoder.pyd`. - -6. The `mpegCoder.pyd` should be used together with the FFMpeg shared libraries, including: - - ```shell - avcodec-59.dll - avformat-59.dll - avutil-57.dll - swresample-4.dll - swscale-6.dll - ``` - -## Update reports - -Has been moved to [:bookmark_tabs: CHANGELOG.md](./CHANGELOG.md) - -## Version of currently used FFmpeg library - -Current FFMpeg version is `5.0`. - -| Dependency | Version | -| :-------------: | :------------: | -| `libavcodec` | `59.18.100.0` | -| `libavformat` | `59.16.100.0` | -| `libavutil` | `57.17.100.0` | -| `libswresample` | `4.3.100.0` | -| `libswscale` | `6.4.100.0` | +To learn more about `mpegCoder`, please review the [documentation][docs]. +[git-master]:https://github.com/cainmagi/FFmpeg-Encoder-Decoder-for-Python "master (Windows)" [git-linux]:https://github.com/cainmagi/FFmpeg-Encoder-Decoder-for-Python/tree/master-linux "master (Linux)" [exp1]:https://github.com/cainmagi/FFmpeg-Encoder-Decoder-for-Python/tree/example-client-check "check the client" [exp2]:https://github.com/cainmagi/FFmpeg-Encoder-Decoder-for-Python/tree/example-client-player "client with player" +[docs]:https://cainmagi.github.io/FFmpeg-Encoder-Decoder-for-Python "Documentation of mpegCoder" diff --git a/README_PYPI.md b/README_PYPI.md new file mode 100755 index 0000000..baa8d62 --- /dev/null +++ b/README_PYPI.md @@ -0,0 +1,84 @@ +# FFmpeg-Encoder-Decoder-for-Python + +This is a mpegCoder adapted from FFmpeg & Python-c-api. Using it you could get access to processing video easily. Just use it as a common module in python like this. + +```python +import mpegCoder +``` + +| Branch | Description | +| :-------------: | :-----------: | +| [`master`][git-master] | The source project of `mpegCoder`, Windows version. | +| [`master-linux`][git-linux] | The source project of `mpegCoder`, Linux version. | +| [`example-client-check`][exp1] | A testing project of the online video stream demuxing. | +| [`example-client-player`][exp2] | A testing project of the simple online video stream player. | + +## Documentation + +The documentation could be reviewed here: + +https://cainmagi.github.io/FFmpeg-Encoder-Decoder-for-Python + +## Brief introduction of this project + +This project is also named as "*FFmpeg-Encoder-Decoder-for-Python*". It is implemented based on [FFMpeg][link-ffmpeg], [Python-C-API][link-python-c-api] and [C++11][link-cpp11]. It is under [GPL v3 License][git-license], and recommended for researching purposes. The project could be used for **processing** or **streaming** videos. + +With this package, users could: + +* Make use of **all** FFMpeg video encoders and decoders. When decoding a video (or an online stream), like the original FFMpeg (C version), the provided APIs could detect the video format and codec format automatically. When encoding a video, users could control the codec format, bit rate and some other options by setting parameters. +* Work with FFMpeg directly. This project invokes the FFMpeg C APIs in the bottom level. Unlike [ffmpeg-python][git-ffmpeg-python] and [pyffmpeg][git-pyffmpeg], our project is not driven by the FFMpeg CLI interfaces. The data format used by this package is [`np.ndarray`][link-ndarray]. In other words, our project enables users to combine [numpy][link-numpy] and FFMpeg directly. +* Frame-level APIs. Unlike [pyffmpeg][git-pyffmpeg], this package is not a simple wrapper of FFMpeg. Users could works on the frame-level APIs. For example, when decoding a video, users could get the data frame-by-frame. Each frame is a 3D [`np.ndarray`][link-ndarray]. +* Pre-compiled package. This package has been pre-compiled by the author. If users download the dependent dynamic libraries (`.so` or `.dll`), they do not need to compile the package by themself. + +However, users could not work with this project in such cases: + +* Platform limited. Currently, we only support Linux and Windows. The Linux release is pre-compiled on Debian. It has been only tested in Ubuntu, Debian and Windows. In other cases, the pre-compiled library may not work. Users may need to compile the package by themselves. +* Version limited. Currently, our project works with FFMpeg `4.4`, and `5.0`. Users need to download the dependent dynamic libraries to make the package work. The `pip` version is able to download the libraries automatically. The legacy versions of this project supports FFMpeg `3.3`, `3.4.2` and `4.0`. However, the legacy built packages are not technically supported now. +* Audio not supported. Although the original FFMpeg supports both video and audio streams, our project only works on video streams. For example, if a video contains audio streams, our package would omit all audio frames in the bottom level. In other words, you **could not** perform audio analysis now. In the future (`v4`), we may support the audio frame analysis. +* Filters not supported. Although the original FFMpeg supports some video processing tools ([`avfilter`][link-avfilter] and [`postproc`][link-postproc]), our implementation drops these modules. Instead, we suggest that users should process the frames with [pillow][pip-pillow] or [openCV][pip-opencv]. On the other hand, our implementation still supports frame scaling and re-sampling (supported by [`swscale`][link-swscale] and [`swresample`][link-swresample]). + +## An example of the usage + +Here we show an example of transcoding a video with our decoder and encoder. To learn more details, please review the [documentation](https://cainmagi.github.io/FFmpeg-Encoder-Decoder-for-Python). + +```python +import mpegCoder + +d = mpegCoder.MpegDecoder() +d.setParameter(nthread=4) +opened = d.FFmpegSetup('test-video.mp4') # Setup the decoder +e = mpegCoder.MpegEncoder() +e.setParameter(decoder=d, codecName='libx265', videoPath='test-video-x265.mp4', nthread=8) # inherit most of parameters from the decoder. +opened = opened and e.FFmpegSetup() # Setup the encoder. +if opened: # If either the decoder or the encoder is not loaded successfully, do not continue. + p = True + while p is not None: + p = d.ExtractGOP() # Extract current GOP. + if p is not None: + for i in p: # Iterate every frame. + e.EncodeFrame(i) # Encode current frame. + e.FFmpegClose() # End encoding, and flush all frames in cache. +e.clear() # Clean configs of the encoder. +d.clear() # Close configs of the decoder. +``` + +[git-master]:https://github.com/cainmagi/FFmpeg-Encoder-Decoder-for-Python "master (Windows)" +[git-linux]:https://github.com/cainmagi/FFmpeg-Encoder-Decoder-for-Python/tree/master-linux "master (Linux)" +[exp1]:https://github.com/cainmagi/FFmpeg-Encoder-Decoder-for-Python/tree/example-client-check "check the client" +[exp2]:https://github.com/cainmagi/FFmpeg-Encoder-Decoder-for-Python/tree/example-client-player "client with player" +[docs]:https://cainmagi.github.io/FFmpeg-Encoder-Decoder-for-Python "Documentation of mpegCoder" + +[git-ffmpeg-python]:https://github.com/kkroening/ffmpeg-python "ffmpeg-python" +[git-pyffmpeg]:https://github.com/deuteronomy-works/pyffmpeg "pyffmpeg" +[git-license]:https://github.com/cainmagi/FFmpeg-Encoder-Decoder-for-Python/blob/master/LICENSE +[pip-pillow]:https://pypi.org/project/Pillow "Pillow" +[pip-opencv]:https://pypi.org/project/opencv-python "OpenCV Python" +[link-cpp11]:https://en.cppreference.com/w/ "C++ 11" +[link-python-c-api]:https://docs.python.org/3/c-api/index.html "Python-C-API" +[link-numpy]:https://numpy.org "numpy" +[link-ndarray]:https://numpy.org/doc/stable/reference/generated/numpy.ndarray.html "np.ndarray" +[link-ffmpeg]:https://ffmpeg.org "FFMpeg" +[link-avfilter]:http://ffmpeg.org/doxygen/trunk/group__lavfi.html "libavfilter" +[link-postproc]:http://ffmpeg.org/doxygen/trunk/group__lpp.html "libpostproc" +[link-swscale]:http://ffmpeg.org/doxygen/trunk/group__libsws.html "libswscale" +[link-swresample]:http://ffmpeg.org/doxygen/trunk/group__lswr.html "libswresample" diff --git a/display/config-include.png b/display/config-include.png deleted file mode 100644 index dccb3ad..0000000 Binary files a/display/config-include.png and /dev/null differ diff --git a/display/config-libs.png b/display/config-libs.png deleted file mode 100644 index 546530d..0000000 Binary files a/display/config-libs.png and /dev/null differ diff --git a/display/config-linker.png b/display/config-linker.png deleted file mode 100644 index 47426dc..0000000 Binary files a/display/config-linker.png and /dev/null differ diff --git a/mpegCoder-pypi.code-workspace b/mpegCoder-pypi.code-workspace new file mode 100755 index 0000000..3495faa --- /dev/null +++ b/mpegCoder-pypi.code-workspace @@ -0,0 +1,22 @@ +{ + "folders": [ + { + "path": "." + } + ], + "settings": { + "python.linting.flake8Args": [ + "--ignore", "E501" + ], + "python.linting.pycodestyleArgs": [ + "--ignore", "E501" + ], + "python.linting.pylintArgs": [ + "-d", "C0301" + ], + "cSpell.enabled": false, + "python.linting.enabled": true, + "python.linting.flake8Enabled": true, + "python.formatting.provider": "black" + } +} \ No newline at end of file diff --git a/requirements.txt b/requirements.txt new file mode 100755 index 0000000..e1a695d --- /dev/null +++ b/requirements.txt @@ -0,0 +1,3 @@ +urllib3>=1.26.2 +tqdm>=4.50.0 +setuptools>=50.3.2 diff --git a/scripts/init_linux.py b/scripts/init_linux.py new file mode 100755 index 0000000..33ed823 --- /dev/null +++ b/scripts/init_linux.py @@ -0,0 +1,136 @@ +#!/usr/python +# -*- coding: UTF8-*- # +''' +mpegCoder +--------- +* FFmpeg-Encoder-Decoder-for-Python +* This is a mpegCoder adapted from FFmpeg & Python-c-api. Using it you could get + access to processing video easily. Just use it as a common module in python like + this. +* Author: cainmagi@gmail.com +* website: https://cainmagi.github.io/FFmpeg-Encoder-Decoder-for-Python/ +* OS: Linux +''' + +import os +import sysconfig +import ctypes +from . import webtools + +__version__ = '3.2.4' +__inner_version__ = '3.2.0' +PY_VERSION = sysconfig.get_python_version() + + +def get_release_name(mpegcoder_ver='3.x', python_ver='3.6'): + '''Get the name of the mpegCoder released module.''' + python_ver = python_ver.replace('.', '') + mpegcoder_ver = mpegcoder_ver.replace('.', '_') + return 'mpegCoder_{mp_ver}_{platform}_py{py_ver}.tar.xz'.format( + mp_ver=mpegcoder_ver, + py_ver=python_ver, + platform='Linux' + ) + + +class DynamicLibLoader: + '''A loader used for loading the dependencies automatically.''' + def __init__(self, base_dir): + '''Initialization. + Arguments: + base_dir: the directory storing all lib files. + ''' + self.base_dir = base_dir + self.dependencies = dict() + + def add_dependency(self, name): + '''Add one dependency lib. + Arguments: + name: the file name of the added dynamic lib. + ''' + self.dependencies[name] = ctypes.CDLL(os.path.join( + self.base_dir, name + )) + + def add_dependencies(self, *names): + '''Add multiple dependencies. + Load a series of dependencies (dynamic libs). The loading order is the + reversed list of the argument `names`. + Arguments: + names: A sequence of loaded dependencies. Each value is a str. + ''' + names = list(names) + for name in reversed(names): + self.add_dependency(name) + + +# Check existence of the dependency +basedir = os.path.abspath(os.path.dirname(__file__)) +if not os.path.isfile(os.path.join(basedir, 'mpegCoder.so')): + webtools.download_tarball( + 'cainmagi', 'FFmpeg-Encoder-Decoder-for-Python', + '{0}-linux'.format(__inner_version__), + get_release_name(__inner_version__, PY_VERSION), + path=basedir, mode='auto', verbose=True, token='' + ) +if ( + (not os.path.isdir(os.path.join(basedir, 'lib'))) or # noqa: W504 + (not os.path.isfile(os.path.join(basedir, 'lib', 'libcrypto.so.1.1'))) +): # Fix a missing dependency problem caused by libssh. + webtools.download_tarball( + 'cainmagi', 'FFmpeg-Encoder-Decoder-for-Python', + 'deps-3.2.0', 'so-linux-ffmpeg_5_0.tar.xz', + path=basedir, mode='auto', verbose=True, token='' + ) + + +__dependencies = DynamicLibLoader(os.path.join(basedir, 'lib')) +__dependencies.add_dependencies( + 'libva-drm.so.2', 'libva.so.2', 'libdrm.so.2' +) +__dependencies.add_dependencies( + 'libva-x11.so.2', 'libvdpau.so.1', 'libXfixes.so.3', + 'libXext.so.6', 'libX11.so.6', 'libxcb.so.1', 'libXau.so.6', + 'libXdmcp.so.6', 'libbsd.so.0', 'libmd.so.0', +) +__dependencies.add_dependencies( + 'libsrt.so.1.4', 'libssh.so.4', 'libcrypto.so.1.1', +) +__dependencies.add_dependencies( + 'libopencore-amrwb.so.0', 'libogg.so.0', 'libmpg123.so.0', + 'libnuma.so.1', +) +__dependencies.add_dependencies( + 'libopenjp2.so.7', +) +__dependencies.add_dependencies( + 'libfdk-aac.so.2', 'libmp3lame.so.0', 'libopus.so.0', + 'libtheoraenc.so.1', 'libtheoradec.so.1', 'libopenmpt.so.0', + 'libvorbisenc.so.2', 'libvorbisfile.so.3', 'libvorbis.so.0', +) +__dependencies.add_dependencies( + 'libvpx.so.7', 'libdav1d.so.6', 'librav1e.so.0', + 'libSvtAv1Enc.so.1', 'libx264.so.164', 'libx265.so.199', + 'libxvidcore.so.4', +) +__dependencies.add_dependencies( + 'libavformat.so.59', 'libavcodec.so.59', 'libswresample.so.4', + 'libswscale.so.6', 'libavutil.so.57', +) + +from . import mpegCoder as mpegCoder_ # noqa: E402 + + +__all__ = ( + 'webtools', '__version__', + 'setGlobal', 'readme', + 'MpegDecoder', 'MpegEncoder', + 'MpegClient', 'MpegServer' +) + +setGlobal = mpegCoder_.setGlobal +readme = mpegCoder_.readme +MpegDecoder = mpegCoder_.MpegDecoder +MpegEncoder = mpegCoder_.MpegEncoder +MpegClient = mpegCoder_.MpegClient +MpegServer = mpegCoder_.MpegServer diff --git a/scripts/init_win.py b/scripts/init_win.py new file mode 100755 index 0000000..80593d8 --- /dev/null +++ b/scripts/init_win.py @@ -0,0 +1,66 @@ +#!/usr/python +# -*- coding: UTF8-*- # +''' +mpegCoder +--------- +* FFmpeg-Encoder-Decoder-for-Python +* This is a mpegCoder adapted from FFmpeg & Python-c-api. Using it you could get + access to processing video easily. Just use it as a common module in python like + this. +* Author: cainmagi@gmail.com +* website: https://cainmagi.github.io/FFmpeg-Encoder-Decoder-for-Python/ +* OS: Windows version +''' + +import os +import sysconfig +from . import webtools + +__version__ = '3.2.4' +__inner_version__ = '3.2.0' +PY_VERSION = sysconfig.get_python_version() + + +def get_release_name(mpegcoder_ver='3.x', python_ver='3.6'): + '''Get the name of the mpegCoder released module.''' + python_ver = python_ver.replace('.', '') + mpegcoder_ver = mpegcoder_ver.replace('.', '_') + return 'mpegCoder_{mp_ver}_{platform}_py{py_ver}.tar.xz'.format( + mp_ver=mpegcoder_ver, + py_ver=python_ver, + platform='Win' + ) + + +# Check existence of the dependency +basedir = os.path.abspath(os.path.dirname(__file__)) +# Check existence of the dependency +if not os.path.isfile(os.path.join(basedir, 'mpegCoder.pyd')): + webtools.download_tarball( + 'cainmagi', 'FFmpeg-Encoder-Decoder-for-Python', + __inner_version__, get_release_name(__inner_version__, PY_VERSION), + path=basedir, mode='auto', verbose=True, token='' + ) +if not os.path.isfile(os.path.join(basedir, 'avcodec-59.dll')): + webtools.download_tarball( + 'cainmagi', 'FFmpeg-Encoder-Decoder-for-Python', + 'deps-3.2.0', 'dll-win-ffmpeg_5_0.tar.xz', + path=basedir, mode='auto', verbose=True, token='' + ) + + +__all__ = ( + 'webtools', '__version__', + 'setGlobal', 'readme', + 'MpegDecoder', 'MpegEncoder', + 'MpegClient', 'MpegServer' +) + +from . import mpegCoder as mpegCoder_ # noqa: E402 + +setGlobal = mpegCoder_.setGlobal +readme = mpegCoder_.readme +MpegDecoder = mpegCoder_.MpegDecoder +MpegEncoder = mpegCoder_.MpegEncoder +MpegClient = mpegCoder_.MpegClient +MpegServer = mpegCoder_.MpegServer diff --git a/setup.py b/setup.py new file mode 100755 index 0000000..0725ac8 --- /dev/null +++ b/setup.py @@ -0,0 +1,248 @@ +#!python +# -*- coding: UTF-8 -*- +''' +################################################################ +# Package setup file for mpegCoder +# @ FFMpeg encoder and decoder. +# Yuchen Jin @ cainmagi@gmail.com +# Requirements: (Pay attention to version) +# python 3.3+ +# urllib3 1.26.2+ +# This script is used for building the pre-compiled package +# of mpegCoder. +################################################################ +''' + +import os +import sys +import site +import pip +import sysconfig +import shutil +import atexit + +# Add a small trick for fixing the requirement issue. +pip.main(['install', '-r', 'requirements.txt']) + +import webtools # noqa: E402 + +try: + from setuptools import setup, find_packages + from setuptools.dist import Distribution + from setuptools.command.install import install +except ImportError: + from distutils.core import setup, Distribution + from distutils.command.install import install + from pkgutil import walk_packages + + def find_packages(path=('.', ), prefix=''): + '''Alternative for setuptools.find_packages + ''' + for _, name, ispkg in walk_packages(path, prefix): + if ispkg: + yield name + + +VERSION = '3.2.4' +DEPENDENCY_VERSION = '3.2.0' +PUBLISH_VERSION = '' +# PUBLISH_VERSION Should begin from '', each failed attmpt, it need to be +# changed as '-b', '-c', ... + +INSTALL_REQUIRES_FILE = [ + 'numpy >= 1.16.0; python_version < "3.7.0"', + 'numpy >= 1.20.0; python_version >= "3.7.0" and python_version < "3.8.0"', + 'numpy >= 1.22.0; python_version >= "3.8.0"', + 'urllib3>=1.26.0' +] + +# Fetch the platform information and dependencies +PLATFORM_NAME = sysconfig.get_platform() +PY_VERSION = sysconfig.get_python_version() +if 'linux' in PLATFORM_NAME: + IS_LINUX = True +elif 'win' in PLATFORM_NAME: + IS_LINUX = False +else: + raise OSError('The platform {0} should not be used for ' + 'building the package.'.format(PLATFORM_NAME)) + + +def get_release_name(mpegcoder_ver='3.x', python_ver='3.6', is_linux=False): + '''Get the name of the mpegCoder released module.''' + python_ver = python_ver.replace('.', '') + mpegcoder_ver = mpegcoder_ver.replace('.', '_') + return 'mpegCoder_{mp_ver}_{platform}_py{py_ver}.tar.xz'.format( + mp_ver=mpegcoder_ver, + py_ver=python_ver, + platform='Linux' if is_linux else 'Win' + ) + + +def fetch_scripts(is_linux=False, source_path='.'): + '''Fetch dependencies, will return a list of the dependency file names.''' + package_path = os.path.join(source_path, 'mpegCoder') + init_file_name = os.path.join(package_path, '__init__.py') + os.makedirs(package_path, exist_ok=True) + if is_linux: + if not os.path.isfile(init_file_name): + shutil.copyfile( + os.path.join('.', 'scripts', 'init_linux.py'), + os.path.join('.', 'mpegCoder', '__init__.py'), + follow_symlinks=True + ) + else: + if not os.path.isfile(init_file_name): + shutil.copyfile( + os.path.join('.', 'scripts', 'init_win.py'), + os.path.join('.', 'mpegCoder', '__init__.py'), + follow_symlinks=True + ) + if not os.path.isfile(os.path.join(package_path, 'webtools.py')): + shutil.copyfile( + os.path.join('.', 'webtools.py'), + os.path.join('.', 'mpegCoder', 'webtools.py'), + follow_symlinks=True + ) + + +def fetch_dependencies(python_ver='3.6', is_linux=False, target_path='.'): + '''Fetch dependencies, will return a list of the dependency file names.''' + os.makedirs(target_path, exist_ok=True) + if is_linux: + if not os.path.isfile(os.path.join(target_path, 'mpegCoder.so')): + webtools.download_tarball( + 'cainmagi', 'FFmpeg-Encoder-Decoder-for-Python', + '{0}-linux'.format(DEPENDENCY_VERSION), + get_release_name(DEPENDENCY_VERSION, python_ver, is_linux), + path=target_path, mode='auto', verbose=True, token='' + ) + if ( + (not os.path.isdir(os.path.join(target_path, 'lib'))) or # noqa: W504 + (not os.path.isfile(os.path.join(target_path, 'lib', 'libcrypto.so.1.1'))) + ): # Fix a missing dependency problem caused by libssh. + webtools.download_tarball( + 'cainmagi', 'FFmpeg-Encoder-Decoder-for-Python', + 'deps-3.2.0', 'so-linux-ffmpeg_5_0.tar.xz', + path=target_path, mode='auto', verbose=True, token='' + ) + else: + if not os.path.isfile(os.path.join(target_path, 'mpegCoder.pyd')): + webtools.download_tarball( + 'cainmagi', 'FFmpeg-Encoder-Decoder-for-Python', + DEPENDENCY_VERSION, get_release_name(DEPENDENCY_VERSION, python_ver, is_linux), + path=target_path, mode='auto', verbose=True, token='' + ) + if not os.path.isfile(os.path.join(target_path, 'avcodec-59.dll')): + webtools.download_tarball( + 'cainmagi', 'FFmpeg-Encoder-Decoder-for-Python', + 'deps-3.2.0', 'dll-win-ffmpeg_5_0.tar.xz', + path=target_path, mode='auto', verbose=True, token='' + ) + + +class BinaryDistribution(Distribution): + '''Distribution which always forces a binary package with platform name. + This class is used for forcing the binary to be platform specific. + ''' + def is_pure(self): + return False + + def has_ext_modules(foo): + return True + + +class PostInstallCommand(install): + '''Post-installation for installation mode. + This technique is learned from + https://stackoverflow.com/questions/20288711/post-install-script-with-python-setuptools + The following script will be run after the installation. + ''' + def run(self): + def _post_install(): + def find_module_path(module_name): + if '--user' in sys.argv: + paths = (site.getusersitepackages(), ) + else: + paths = ( + sysconfig.get_paths()["purelib"], + *site.getsitepackages() + ) + for path in paths: + package_path = os.path.join(path, module_name) + if os.path.exists(package_path): + return package_path + print('No installation path found, mpegCoder may not get fully installed.', file=sys.stderr) + return None + + install_path = find_module_path('mpegCoder') + fetch_dependencies(python_ver=PY_VERSION, is_linux=IS_LINUX, target_path=install_path) + + atexit.register(_post_install) + install.run(self) + + +# Get into the current dir +os.chdir(os.path.dirname(os.path.abspath(__file__))) + +if IS_LINUX: + fetch_scripts(True) +else: + fetch_scripts(False) + + +# Fetch the long description. +with open('README_PYPI.md', 'r') as fh: + LONG_DESCRIPTION = fh.read() + + +s_obj = setup( + name='mpegCoder', + version=VERSION + PUBLISH_VERSION, + description='A FFmpeg module which could provide a class for encoding, ' + 'decoding, or streaming a video in any format.', + author='Yuchen Jin', + author_email='cainmagi@gmail.com', + url='https://github.com/cainmagi/FFmpeg-Encoder-Decoder-for-Python', + project_urls={ + 'Tracker': 'https://github.com/cainmagi/FFmpeg-Encoder-Decoder-for-Python/issues', + 'Documentation': 'https://cainmagi.github.io/FFmpeg-Encoder-Decoder-for-Python/', + }, + long_description=LONG_DESCRIPTION, + long_description_content_type='text/markdown', + classifiers=[ + 'Intended Audience :: Developers', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3 :: Only', + 'Programming Language :: Python :: 3.10', + 'Programming Language :: Python :: 3.9', + 'Programming Language :: Python :: 3.8', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.6', + 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)', + 'Operating System :: POSIX :: Linux', + 'Operating System :: Microsoft :: Windows', + 'Topic :: Software Development :: Libraries :: Python Modules', + 'Topic :: Multimedia :: Video', + 'Topic :: Multimedia :: Video :: Conversion', + 'Topic :: Multimedia :: Video :: Display', + 'Topic :: Scientific/Engineering' + ], + keywords=[ + 'python', 'h264', 'video', 'rtsp', 'ffmpeg', 'rtmp', 'encoder', + 'numpy', 'python3', 'python3-library', 'ffmpeg-wrapper', + 'video-stream', 'python-c-api', 'rtsp-push', 'rtmp-push', + 'rtsp-player', 'rtmp-player', 'ffmpeg-encoder' + ], + cmdclass={ + 'install': PostInstallCommand, + }, + python_requires='>=3.6,<3.11', + license='GPLv3', + install_requires=INSTALL_REQUIRES_FILE, + distclass=BinaryDistribution, + platforms=[sysconfig.get_platform()], + packages=list(find_packages()), + include_package_data=False, + # package_data={'mpegCoder': PACKAGE_DATA}, +) diff --git a/webtools.py b/webtools.py old mode 100644 new mode 100755 index 13b351d..2a060c3 --- a/webtools.py +++ b/webtools.py @@ -23,9 +23,9 @@ import urllib3 try: - from tqdm import tqdm - wrapattr=tqdm.wrapattr -except ImportError: + from tqdm import tqdm # Will trigger ImporError if tqdm is not installed. + wrapattr = tqdm.wrapattr # Will trigger AttributeError is tqdm<4.40.0 is installed. +except (ImportError, AttributeError): import contextlib @contextlib.contextmanager @@ -296,10 +296,3 @@ def download_tarball(user, repo, tag, asset, path='.', mode='auto', token=None, token = get_token(token) __download_tarball_from_repo(user=user, repo=repo, tag=tag, asset=asset, path=path, mode=mode, token=token, verbose=verbose) - - -if __name__ == '__main__': - - # token = get_token(token='') - print('Get ffmpeg dependencies...') - download_tarball('cainmagi', 'FFmpeg-Encoder-Decoder-for-Python', 'deps-3.2.0', 'dep-win-ffmpeg_5_0.tar.xz', path='.', mode='auto', verbose=True, token='')