Compare commits

...

17 Commits

Author SHA1 Message Date
Derek c9db70f57d Merge pull request 'Make configurable_pipelines main' (#1) from configurable_pipelines into main
Reviewed-on: #1
2021-11-21 23:51:31 +00:00
Derek b4b7fb988b Add silly default root origin - #tmp 2021-11-21 16:50:54 -07:00
Derek 139663fe66 Fix stub origin application 2021-11-21 16:50:54 -07:00
Derek 9a5f64c478 Add eye data 2021-11-21 16:50:54 -07:00
Derek 6c617ed585 Revert "Use single manager"
This reverts commit c61d6582af0eec51f14b01006e79d5f446c9ea45.
2021-11-21 16:50:54 -07:00
Derek 3d456a2e6a Fix crash on face occlusion in holsistic module 2021-11-21 16:50:54 -07:00
Derek 782e0abf9d Initial mediapipe holistic module
Still need the actual pose
2021-11-21 16:50:54 -07:00
Derek 55311aca1c Fix poor face_mesh defaults 2021-11-21 16:50:54 -07:00
Derek 429f6597e6 Initial ovtk output 2021-11-21 16:50:54 -07:00
Derek 4e70648695 Big refactor
+ Skeleton type is a little more fleshed out
+ Lots of logic taken out of the mediapipe file and moved into the types 
themselves for re-use
+ Re-aranged mediapipe transforms to their own dir
+ headlook-from-3d transform now outputs a skeleton + calculates roll 
correctly
2021-11-21 16:50:54 -07:00
Derek a3b2496edb Fix crash on bad crop rect 2021-11-21 16:50:54 -07:00
Derek 7f49ee01a9 Fix occasional crash from audio system + include dep
No idea why only sometimes this is an issue - maybe its a sign that 
wrong sample rate is in use somewhere and I should handle it 
differently? Idk, not a priority rn
2021-11-21 16:50:54 -07:00
Derek 8ff150af68 Use single manager
I think this reduces system load
2021-11-21 16:50:54 -07:00
Derek b54be8aa84 Fix bug where application would hang on transform error 2021-11-21 16:50:54 -07:00
Derek a83f156439 Refactor vetk->ovtk 2021-11-21 16:50:54 -07:00
Derek 73ba45f537 Refactor, add a smoothener for rect positions and allow crop borders 2021-11-21 16:50:54 -07:00
Derek 9349458543 Initial redesign for modular, configurable pipelines support
This impliments most of the abilities available previously,
missing most notible pupil detection atm.
This also currently uses a completely different tracking algo,
mediapipe's landmarks module, but by the nature of this system
is in no way limited to that!
2021-11-21 16:50:50 -07:00
41 changed files with 2368 additions and 833 deletions

1
.gitignore vendored Normal file
View File

@ -0,0 +1 @@
__pycache__

12
Pipfile
View File

@ -3,13 +3,17 @@ name = "pypi"
url = "https://pypi.org/simple"
verify_ssl = true
[dev-packages]
[packages]
face-alignment = "~=1.3.3"
opencv-python = "~=4.5.1"
numpy = "~=1.20.2"
toml = "*"
multiprocessing-logging = "*"
mediapipe = "*"
scipy = "*"
click = "*"
numba = "*"
websockets = "*"
pyaudio = "*"
[requires]
python_version = "3.7"
python_version = "3.9"

735
Pipfile.lock generated
View File

@ -1,11 +1,11 @@
{
"_meta": {
"hash": {
"sha256": "6b6894cfcaede0f28b3ffeca50f0a0ff0b82c5047684dbe5d44a12253ce41a7f"
"sha256": "e86997450272b6429281a4677374259b2e13f9d97269def7de429b6c8f8d6e1c"
},
"pipfile-spec": 6,
"requires": {
"python_version": "3.7"
"python_version": "3.9"
},
"sources": [
{
@ -16,411 +16,480 @@
]
},
"default": {
"cycler": {
"absl-py": {
"hashes": [
"sha256:1d8a5ae1ff6c5cf9b93e8811e581232ad8920aeec647c37316ceac982b08cb2d",
"sha256:cd7b2d1018258d7247a71425e9f26463dfb444d411c39569972f4ce586b0c9d8"
"sha256:72d782fbeafba66ba3e525d46bccac949b9a174dbf66233e50ece09ee688dc81",
"sha256:ea907384af023a7e681368bedb896159ab100c7db593efbbd5cde22af11270cd"
],
"version": "==0.10.0"
"version": "==0.15.0"
},
"decorator": {
"attrs": {
"hashes": [
"sha256:41fa54c2a0cc4ba648be4fd43cff00aedf5b9465c9bf18d64325bc225f08f760",
"sha256:e3a62f0520172440ca0dcc823749319382e377f37f140a0b99ef45fecb84bfe7"
"sha256:149e90d6d8ac20db7a955ad60cf0e6881a3f20d37096140088356da6c716b0b1",
"sha256:ef6aaac3ca6cd92904cdd0d83f629a15f18053ec84e6432106f7a4d04ae4f5fb"
],
"version": "==4.4.2"
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'",
"version": "==21.2.0"
},
"face-alignment": {
"click": {
"hashes": [
"sha256:cc35c1805fad5f7ea75df265a0d3b7d8252f017a105757366e2c65bd2e12b4e4"
"sha256:353f466495adaeb40b6b5f592f9f91cb22372351c84caeb068132442a4518ef3",
"sha256:410e932b050f5eed773c4cda94de75971c89cdb3155a72a0831139a79e5ecb5b"
],
"index": "pypi",
"version": "==1.3.3"
"version": "==8.0.3"
},
"imageio": {
"cycler": {
"hashes": [
"sha256:3604d751f03002e8e0e7650aa71d8d9148144a87daf17cb1f3228e80747f2e6b",
"sha256:52ddbaeca2dccf53ba2d6dec5676ca7bc3b2403ef8b37f7da78b7654bb3e10f0"
"sha256:3a27e95f763a428a739d2add979fa7494c912a32c17c4c38c4d5f082cad165a3",
"sha256:9c87405839a19696e837b3b818fed3f5f69f16f1eec1a1ad77e043dcea9c772f"
],
"version": "==2.9.0"
"markers": "python_version >= '3.6'",
"version": "==0.11.0"
},
"kiwisolver": {
"hashes": [
"sha256:0cd53f403202159b44528498de18f9285b04482bab2a6fc3f5dd8dbb9352e30d",
"sha256:1e1bc12fb773a7b2ffdeb8380609f4f8064777877b2225dec3da711b421fda31",
"sha256:225e2e18f271e0ed8157d7f4518ffbf99b9450fca398d561eb5c4a87d0986dd9",
"sha256:232c9e11fd7ac3a470d65cd67e4359eee155ec57e822e5220322d7b2ac84fbf0",
"sha256:31dfd2ac56edc0ff9ac295193eeaea1c0c923c0355bf948fbd99ed6018010b72",
"sha256:33449715e0101e4d34f64990352bce4095c8bf13bed1b390773fc0a7295967b3",
"sha256:401a2e9afa8588589775fe34fc22d918ae839aaaf0c0e96441c0fdbce6d8ebe6",
"sha256:44a62e24d9b01ba94ae7a4a6c3fb215dc4af1dde817e7498d901e229aaf50e4e",
"sha256:50af681a36b2a1dee1d3c169ade9fdc59207d3c31e522519181e12f1b3ba7000",
"sha256:563c649cfdef27d081c84e72a03b48ea9408c16657500c312575ae9d9f7bc1c3",
"sha256:5989db3b3b34b76c09253deeaf7fbc2707616f130e166996606c284395da3f18",
"sha256:5a7a7dbff17e66fac9142ae2ecafb719393aaee6a3768c9de2fd425c63b53e21",
"sha256:5c3e6455341008a054cccee8c5d24481bcfe1acdbc9add30aa95798e95c65621",
"sha256:5f6ccd3dd0b9739edcf407514016108e2280769c73a85b9e59aa390046dbf08b",
"sha256:72c99e39d005b793fb7d3d4e660aed6b6281b502e8c1eaf8ee8346023c8e03bc",
"sha256:78751b33595f7f9511952e7e60ce858c6d64db2e062afb325985ddbd34b5c131",
"sha256:834ee27348c4aefc20b479335fd422a2c69db55f7d9ab61721ac8cd83eb78882",
"sha256:8be8d84b7d4f2ba4ffff3665bcd0211318aa632395a1a41553250484a871d454",
"sha256:950a199911a8d94683a6b10321f9345d5a3a8433ec58b217ace979e18f16e248",
"sha256:a357fd4f15ee49b4a98b44ec23a34a95f1e00292a139d6015c11f55774ef10de",
"sha256:a53d27d0c2a0ebd07e395e56a1fbdf75ffedc4a05943daf472af163413ce9598",
"sha256:acef3d59d47dd85ecf909c359d0fd2c81ed33bdff70216d3956b463e12c38a54",
"sha256:b38694dcdac990a743aa654037ff1188c7a9801ac3ccc548d3341014bc5ca278",
"sha256:b9edd0110a77fc321ab090aaa1cfcaba1d8499850a12848b81be2222eab648f6",
"sha256:c08e95114951dc2090c4a630c2385bef681cacf12636fb0241accdc6b303fd81",
"sha256:c5518d51a0735b1e6cee1fdce66359f8d2b59c3ca85dc2b0813a8aa86818a030",
"sha256:c8fd0f1ae9d92b42854b2979024d7597685ce4ada367172ed7c09edf2cef9cb8",
"sha256:ca3820eb7f7faf7f0aa88de0e54681bddcb46e485beb844fcecbcd1c8bd01689",
"sha256:cf8b574c7b9aa060c62116d4181f3a1a4e821b2ec5cbfe3775809474113748d4",
"sha256:d3155d828dec1d43283bd24d3d3e0d9c7c350cdfcc0bd06c0ad1209c1bbc36d0",
"sha256:f8d6f8db88049a699817fd9178782867bf22283e3813064302ac59f61d95be05",
"sha256:fd34fbbfbc40628200730bc1febe30631347103fc8d3d4fa012c21ab9c11eca9"
"sha256:0007840186bacfaa0aba4466d5890334ea5938e0bb7e28078a0eb0e63b5b59d5",
"sha256:19554bd8d54cf41139f376753af1a644b63c9ca93f8f72009d50a2080f870f77",
"sha256:1d45d1c74f88b9f41062716c727f78f2a59a5476ecbe74956fafb423c5c87a76",
"sha256:1d819553730d3c2724582124aee8a03c846ec4362ded1034c16fb3ef309264e6",
"sha256:2210f28778c7d2ee13f3c2a20a3a22db889e75f4ec13a21072eabb5693801e84",
"sha256:22521219ca739654a296eea6d4367703558fba16f98688bd8ce65abff36eaa84",
"sha256:25405f88a37c5f5bcba01c6e350086d65e7465fd1caaf986333d2a045045a223",
"sha256:2b65bd35f3e06a47b5c30ea99e0c2b88f72c6476eedaf8cfbc8e66adb5479dcf",
"sha256:2ddb500a2808c100e72c075cbb00bf32e62763c82b6a882d403f01a119e3f402",
"sha256:2f8f6c8f4f1cff93ca5058d6ec5f0efda922ecb3f4c5fb76181f327decff98b8",
"sha256:30fa008c172355c7768159983a7270cb23838c4d7db73d6c0f6b60dde0d432c6",
"sha256:3dbb3cea20b4af4f49f84cffaf45dd5f88e8594d18568e0225e6ad9dec0e7967",
"sha256:4116ba9a58109ed5e4cb315bdcbff9838f3159d099ba5259c7c7fb77f8537492",
"sha256:44e6adf67577dbdfa2d9f06db9fbc5639afefdb5bf2b4dfec25c3a7fbc619536",
"sha256:5326ddfacbe51abf9469fe668944bc2e399181a2158cb5d45e1d40856b2a0589",
"sha256:70adc3658138bc77a36ce769f5f183169bc0a2906a4f61f09673f7181255ac9b",
"sha256:72be6ebb4e92520b9726d7146bc9c9b277513a57a38efcf66db0620aec0097e0",
"sha256:7843b1624d6ccca403a610d1277f7c28ad184c5aa88a1750c1a999754e65b439",
"sha256:7ba5a1041480c6e0a8b11a9544d53562abc2d19220bfa14133e0cdd9967e97af",
"sha256:80efd202108c3a4150e042b269f7c78643420cc232a0a771743bb96b742f838f",
"sha256:82f49c5a79d3839bc8f38cb5f4bfc87e15f04cbafa5fbd12fb32c941cb529cfb",
"sha256:83d2c9db5dfc537d0171e32de160461230eb14663299b7e6d18ca6dca21e4977",
"sha256:8d93a1095f83e908fc253f2fb569c2711414c0bfd451cab580466465b235b470",
"sha256:8dc3d842fa41a33fe83d9f5c66c0cc1f28756530cd89944b63b072281e852031",
"sha256:9661a04ca3c950a8ac8c47f53cbc0b530bce1b52f516a1e87b7736fec24bfff0",
"sha256:a498bcd005e8a3fedd0022bb30ee0ad92728154a8798b703f394484452550507",
"sha256:a7a4cf5bbdc861987a7745aed7a536c6405256853c94abc9f3287c3fa401b174",
"sha256:b5074fb09429f2b7bc82b6fb4be8645dcbac14e592128beeff5461dcde0af09f",
"sha256:b6a5431940f28b6de123de42f0eb47b84a073ee3c3345dc109ad550a3307dd28",
"sha256:ba677bcaff9429fd1bf01648ad0901cea56c0d068df383d5f5856d88221fe75b",
"sha256:bcadb05c3d4794eb9eee1dddf1c24215c92fb7b55a80beae7a60530a91060560",
"sha256:bf7eb45d14fc036514c09554bf983f2a72323254912ed0c3c8e697b62c4c158f",
"sha256:c358721aebd40c243894298f685a19eb0491a5c3e0b923b9f887ef1193ddf829",
"sha256:c4550a359c5157aaf8507e6820d98682872b9100ce7607f8aa070b4b8af6c298",
"sha256:c6572c2dab23c86a14e82c245473d45b4c515314f1f859e92608dcafbd2f19b8",
"sha256:cba430db673c29376135e695c6e2501c44c256a81495da849e85d1793ee975ad",
"sha256:dedc71c8eb9c5096037766390172c34fb86ef048b8e8958b4e484b9e505d66bc",
"sha256:e6f5eb2f53fac7d408a45fbcdeda7224b1cfff64919d0f95473420a931347ae9",
"sha256:ec2eba188c1906b05b9b49ae55aae4efd8150c61ba450e6721f64620c50b59eb",
"sha256:ee040a7de8d295dbd261ef2d6d3192f13e2b08ec4a954de34a6fb8ff6422e24c",
"sha256:eedd3b59190885d1ebdf6c5e0ca56828beb1949b4dfe6e5d0256a461429ac386",
"sha256:f441422bb313ab25de7b3dbfd388e790eceb76ce01a18199ec4944b369017009",
"sha256:f8eb7b6716f5b50e9c06207a14172cf2de201e41912ebe732846c02c830455b9",
"sha256:fc4453705b81d03568d5b808ad8f09c77c47534f6ac2e72e733f9ca4714aa75c"
],
"markers": "python_version >= '3.6'",
"version": "==1.3.1"
"markers": "python_version >= '3.7'",
"version": "==1.3.2"
},
"llvmlite": {
"hashes": [
"sha256:048a7c117641c9be87b90005684e64a6f33ea0897ebab1df8a01214a10d6e79a",
"sha256:05f807209a360d39526d98141b6f281b9c7c771c77a4d1fc22002440642c8de2",
"sha256:1ce5bc0a638d874a08d4222be0a7e48e5df305d094c2ff8dec525ef32b581551",
"sha256:1dee416ea49fd338c74ec15c0c013e5273b0961528169af06ff90772614f7f6c",
"sha256:3b17fc4b0dd17bd29d7297d054e2915fad535889907c3f65232ee21f483447c5",
"sha256:50b1828bde514b31431b2bba1aa20b387f5625b81ad6e12fede430a04645e47a",
"sha256:5a6548b4899facb182145147185e9166c69826fb424895f227e6b7cf924a8da1",
"sha256:6a3abc8a8889aeb06bf9c4a7e5df5bc7bb1aa0aedd91a599813809abeec80b5a",
"sha256:705f0323d931684428bb3451549603299bb5e17dd60fb979d67c3807de0debc1",
"sha256:765128fdf5f149ed0b889ffbe2b05eb1717f8e20a5c87fa2b4018fbcce0fcfc9",
"sha256:7768658646c418b9b3beccb7044277a608bc8c62b82a85e73c7e5c065e4157c2",
"sha256:7c4e7066447305d5095d0b0a9cae7b835d2f0fde143456b3124110eab0856426",
"sha256:7db4b0eef93125af1c4092c64a3c73c7dc904101117ef53f8d78a1a499b8d5f4",
"sha256:9dad7e4bb042492914292aea3f4172eca84db731f9478250240955aedba95e08",
"sha256:b3a77e46e6053e2a86e607e87b97651dda81e619febb914824a927bff4e88737",
"sha256:cc0f9b9644b4ab0e4a5edb17f1531d791630c88858220d3cc688d6edf10da100",
"sha256:d1fdd63c371626c25ad834e1c6297eb76cf2f093a40dbb401a87b6476ab4e34e",
"sha256:dbedff0f6d417b374253a6bab39aa4b5364f1caab30c06ba8726904776fcf1cb",
"sha256:f608bae781b2d343e15e080c546468c5a6f35f57f0446923ea198dd21f23757e",
"sha256:f7918dbac02b1ebbfd7302ad8e8307d7877ab57d782d5f04b70ff9696b53c21b",
"sha256:ff52fb9c2be66b95b0e67d56fce11038397e5be1ea410ee53f5f1175fdbb107a"
"sha256:14030a1c0f9aee0185db069163240c51d4e8a3eec0daf02468e057281dee612b",
"sha256:15b8ac7a489e31b7d5c482193edaa44374e3c18e409bea494224e31eb60e38e5",
"sha256:30431fe9a9b7b1c3585b71149cc11dc79b9d62dc86d3db15c3dcca33d274b5be",
"sha256:447b01c25d18921c4179f2eccba218d7c82b65cfe3952b0d018d569945427bf9",
"sha256:4616e17914fcc7c5bfb7d1014acbd4fca478949820e86218a29d9473d0aa221b",
"sha256:4c1e91fd4ba2764161e9a05b6fff46a52d26170186bad99629777e8c7246f0ef",
"sha256:57c1dae337863b497c141d40736041d4acb7769226b44fe05959fce3c3570d5d",
"sha256:6392b870cd018ec0c645d6bbb918d6aa0eeca8c62674baaee30862d6b6865b15",
"sha256:7449acca596f45e9e12b20c0b72d184f83025341cc2d44d7ccf5fe31356dcd08",
"sha256:74b6c3d2eb8cef32a09e8fd7637d0d37628c74f4deedf9361e0c0ebecc239208",
"sha256:794191922ac6414c55d66058eaba8b88a630c6e9f2cf0db7e8e661e74d71fa14",
"sha256:995c1a2c8b6a11a7f2c66e52576de6a28292d37842d383aae5be7b965b56d10f",
"sha256:ab00b7996e5ef795f59d95d3125850f3af28d19e43bdc08473947cb8045ce098",
"sha256:b6466d6369051e5c083b15cf285c00595ddb7f828be1ebecb1dfb97f3fab0bff",
"sha256:c92a209439fd0b8a41f6e2aba1d3afa260357028a29ed7db8c602c4d67c21540",
"sha256:cf7d623d33d24df51adc4e9e9f5734df752330661793d1662425ad2e926cb2d4",
"sha256:d31a3bd69894b31bbc68df00e0b37b0980a0cf025f9dbea9cdd37988230c33a3",
"sha256:df1d1b162a426480b37d6c4adeddff49e2fb9f71b307c7facac67bdce4767746",
"sha256:f9e84d683943c2f636b08db9b2d182d4b40b83e1a3e31e100af3bb9ed8d94bcd"
],
"markers": "python_version < '3.10' and python_version >= '3.6'",
"version": "==0.36.0"
"markers": "python_version < '3.10' and python_version >= '3.7'",
"version": "==0.37.0"
},
"matplotlib": {
"hashes": [
"sha256:1f83a32e4b6045191f9d34e4dc68c0a17c870b57ef9cca518e516da591246e79",
"sha256:2eee37340ca1b353e0a43a33da79d0cd4bcb087064a0c3c3d1329cdea8fbc6f3",
"sha256:53ceb12ef44f8982b45adc7a0889a7e2df1d758e8b360f460e435abe8a8cd658",
"sha256:574306171b84cd6854c83dc87bc353cacc0f60184149fb00c9ea871eca8c1ecb",
"sha256:7561fd541477d41f3aa09457c434dd1f7604f3bd26d7858d52018f5dfe1c06d1",
"sha256:7a54efd6fcad9cb3cd5ef2064b5a3eeb0b63c99f26c346bdcf66e7c98294d7cc",
"sha256:7f16660edf9a8bcc0f766f51c9e1b9d2dc6ceff6bf636d2dbd8eb925d5832dfd",
"sha256:81e6fe8b18ef5be67f40a1d4f07d5a4ed21d3878530193898449ddef7793952f",
"sha256:84a10e462120aa7d9eb6186b50917ed5a6286ee61157bfc17c5b47987d1a9068",
"sha256:84d4c4f650f356678a5d658a43ca21a41fca13f9b8b00169c0b76e6a6a948908",
"sha256:86dc94e44403fa0f2b1dd76c9794d66a34e821361962fe7c4e078746362e3b14",
"sha256:90dbc007f6389bcfd9ef4fe5d4c78c8d2efe4e0ebefd48b4f221cdfed5672be2",
"sha256:9f374961a3996c2d1b41ba3145462c3708a89759e604112073ed6c8bdf9f622f",
"sha256:a18cc1ab4a35b845cf33b7880c979f5c609fd26c2d6e74ddfacb73dcc60dd956",
"sha256:a97781453ac79409ddf455fccf344860719d95142f9c334f2a8f3fff049ffec3",
"sha256:a989022f89cda417f82dbf65e0a830832afd8af743d05d1414fb49549287ff04",
"sha256:ac2a30a09984c2719f112a574b6543ccb82d020fd1b23b4d55bf4759ba8dd8f5",
"sha256:be4430b33b25e127fc4ea239cc386389de420be4d63e71d5359c20b562951ce1",
"sha256:c45e7bf89ea33a2adaef34774df4e692c7436a18a48bcb0e47a53e698a39fa39"
"sha256:01c9de93a2ca0d128c9064f23709362e7fefb34910c7c9e0b8ab0de8258d5eda",
"sha256:41b6e307458988891fcdea2d8ecf84a8c92d53f84190aa32da65f9505546e684",
"sha256:48e1e0859b54d5f2e29bb78ca179fd59b971c6ceb29977fb52735bfd280eb0f5",
"sha256:54a026055d5f8614f184e588f6e29064019a0aa8448450214c0b60926d62d919",
"sha256:556965514b259204637c360d213de28d43a1f4aed1eca15596ce83f768c5a56f",
"sha256:5c988bb43414c7c2b0a31bd5187b4d27fd625c080371b463a6d422047df78913",
"sha256:6a724e3a48a54b8b6e7c4ae38cd3d07084508fa47c410c8757e9db9791421838",
"sha256:6be8df61b1626e1a142c57e065405e869e9429b4a6dab4a324757d0dc4d42235",
"sha256:844a7b0233e4ff7fba57e90b8799edaa40b9e31e300b8d5efc350937fa8b1bea",
"sha256:85f0c9cf724715e75243a7b3087cf4a3de056b55e05d4d76cc58d610d62894f3",
"sha256:a78a3b51f29448c7f4d4575e561f6b0dbb8d01c13c2046ab6c5220eb25c06506",
"sha256:b884715a59fec9ad3b6048ecf3860f3b2ce965e676ef52593d6fa29abcf7d330",
"sha256:b8b53f336a4688cfce615887505d7e41fd79b3594bf21dd300531a4f5b4f746a",
"sha256:c70b6311dda3e27672f1bf48851a0de816d1ca6aaf3d49365fbdd8e959b33d2b",
"sha256:ebfb01a65c3f5d53a8c2a8133fec2b5221281c053d944ae81ff5822a68266617",
"sha256:eeb1859efe7754b1460e1d4991bbd4a60a56f366bc422ef3a9c5ae05f0bc70b5",
"sha256:f15edcb0629a0801738925fe27070480f446fcaa15de65946ff946ad99a59a40",
"sha256:f1c5efc278d996af8a251b2ce0b07bbeccb821f25c8c9846bdcb00ffc7f158aa",
"sha256:f72657f1596199dc1e4e7a10f52a4784ead8a711f4e5b59bea95bdb97cf0e4fd",
"sha256:fc4f526dfdb31c9bd6b8ca06bf9fab663ca12f3ec9cdf4496fb44bc680140318",
"sha256:fcd6f1954943c0c192bfbebbac263f839d7055409f1173f80d8b11a224d236da"
],
"markers": "python_version >= '3.7'",
"version": "==3.4.1"
"version": "==3.4.3"
},
"networkx": {
"mediapipe": {
"hashes": [
"sha256:0635858ed7e989f4c574c2328380b452df892ae85084144c73d8cd819f0c4e06",
"sha256:109cd585cac41297f71103c3c42ac6ef7379f29788eb54cb751be5a663bb235a"
"sha256:1b88163adfa9f4a70b56803ed960435f0907688047d3b930156a1d35a38cf965",
"sha256:1b8e2940deee821adaaf405ebf20db57691c1beed16f8ab051246cbc4426a681",
"sha256:292e193f819cc8b9dbfa79d2013b8406dd3b51b4b7dcb3d130e9f3bb5ed7c417",
"sha256:4607d6ea19bd6373c325850e235e889ea5a429638467a1a7e13d9fb68e7720b1",
"sha256:4d431101dcf84aa09f3fd4160c338fb77fd62dd9a59ad7098f2e32a2e6323249",
"sha256:69cff4565b4065c44a6a42bba8a9b1c8d7f7d60e0e5e8695eb3c68e71008e93e",
"sha256:899f686f323ceeeb4a5ea359b08f572342b9fc5672b0fb2cdad1cee03b1d8609",
"sha256:aa8c49aed1b1ab7c0ffb1c2ae603dd8da0e03f14c75db9b339fd2ddd110f0633",
"sha256:cb84cbc890ab2ada6d0d7530725ede968b517485627afc9155a18fe3a98736d6"
],
"markers": "python_version >= '3.6'",
"version": "==2.5.1"
"index": "pypi",
"version": "==0.8.9"
},
"multiprocessing-logging": {
"hashes": [
"sha256:9d3eb0f1f859b7ba6250a029726f77a7701999deda939595122d8748751de2e3"
],
"index": "pypi",
"version": "==0.3.1"
},
"numba": {
"hashes": [
"sha256:0ef9d1f347b251282ae46e5a5033600aa2d0dfa1ee8c16cb8137b8cd6f79e221",
"sha256:17146885cbe4e89c9d4abd4fcb8886dee06d4591943dc4343500c36ce2fcfa69",
"sha256:1895ebd256819ff22256cd6fe24aa8f7470b18acc73e7917e8e93c9ac7f565dc",
"sha256:224d197a46a9e602a16780d87636e199e2cdef528caef084a4d8fd8909c2455c",
"sha256:276f9d1674fe08d95872d81b97267c6b39dd830f05eb992608cbede50fcf48a9",
"sha256:2e96958ed2ca7e6d967b2ce29c8da0ca47117e1de28e7c30b2c8c57386506fa5",
"sha256:4c4c8d102512ae472af52c76ad9522da718c392cb59f4cd6785d711fa5051a2a",
"sha256:5165709bf62f28667e10b9afe6df0ce1037722adab92d620f59cb8bbb8104641",
"sha256:6545b9e9b0c112b81de7f88a3c787469a357eeff8211e90b8f45ee243d521cc2",
"sha256:691adbeac17dbdf6ed7c759e9e33a522351f07d2065fe926b264b6b2c15fd89b",
"sha256:74df02e73155f669e60dcff07c4eef4a03dbf5b388594db74142ab40914fe4f5",
"sha256:8fa5c963a43855050a868106a87cd614f3c3f459951c8fc468aec263ef80d063",
"sha256:94aab3e0e9e8754116325ce026e1b29ae72443c706a3104cf7f3368dc3012912",
"sha256:9cd4e5216acdc66c4e9dab2dfd22ddb5bef151185c070d4a3cd8e78638aff5b0",
"sha256:aaa6ebf56afb0b6752607b9f3bf39e99b0efe3c1fa6849698373925ee6838fd7",
"sha256:aabeec89bb3e3162136eea492cea7ee8882ddcda2201f05caecdece192c40896",
"sha256:aba7acb247a09d7f12bd17a8e28bbb04e8adef9fc20ca29835d03b7894e1b49f",
"sha256:b08b3df38aab769df79ed948d70f0a54a3cdda49d58af65369235c204ec5d0f3",
"sha256:b23de6b6837c132087d06b8b92d343edb54b885873b824a037967fbd5272ebb7",
"sha256:bd126f1f49da6fc4b3169cf1d96f1c3b3f84a7badd11fe22da344b923a00e744",
"sha256:bf5c463b62d013e3f709cc8277adf2f4f4d8cc6757293e29c6db121b77e6b760"
"sha256:0354df1fcfa9d9d8df3b63780fae408c8f23c474d71a4e929f4c5b44f2c9ce5a",
"sha256:0f1c2c23c4e05cbed19f7a15710a25e71ab818ba7cd0bf66572bacd221721f22",
"sha256:1380429f4a3f73440aae093a058713c780fdc14930b3070c883bc1737e8711b0",
"sha256:5239bf413a9d3c7fad839400d5082032635511c3b7058e17835c7c4090f223ed",
"sha256:5492ffa42425b7dc783e4376dfc07617c751d7d087d64fe8c2e7944038e35261",
"sha256:606ebf5b0474d89f96a2e1354f0349e985c3897c2989b78e47b095d67434cf4c",
"sha256:64451b4fd2437ebb7bbcff72133b28575cb8464eb3f10ccd88c70a3792e6de0a",
"sha256:77479e79b6840e3eb5e0613bbdbb4be8f4b9c4130bafdf6ac39b9507ea742f15",
"sha256:7da918aed4790a4ce6682061971e6248e7422dd5618dcac8054d4a47955182dc",
"sha256:884ad2cdebb6f8bcc7b5ec70e56c9acdb8456482c49cea12273d34709dfc2c9c",
"sha256:b385451355a9023c9611400c7c6d4088f5781ed11b104b5d690f0ad65b142860",
"sha256:b657cece0b069cd4361a6d25aaae2e9e9df9e65abfa63f09345352fbb1069a11",
"sha256:c2e877a33f6920365e96ad088023f786a4b1ce44a7e772763cc02c55f49614dd",
"sha256:c36e50271146c3c33f10111488307a6aa75416aa53384709b037599426a967ea",
"sha256:d0799e7e8640a31d9567a032a6e046d797356afb3e812e0a0f97e6e74ded7e35",
"sha256:ec7033409e66158e9f2b83c22d887fda7949bf2ac652bbbdcbc006b590c37339",
"sha256:ef4d27ee039007510c3de9c42fd6bb57051661ceeca4a9a6244b642a742632a0",
"sha256:f9dfc803c864edcc2381219b800abf366793400aea55e26d4d5b7d953e14f43f",
"sha256:fe4f0c881dbaac0c818dafc80e348edf8d8f1022278c368390ca20e92ed381cc"
],
"markers": "python_version < '3.10' and python_version >= '3.6'",
"version": "==0.53.1"
"index": "pypi",
"version": "==0.54.1"
},
"numpy": {
"hashes": [
"sha256:2428b109306075d89d21135bdd6b785f132a1f5a3260c371cee1fae427e12727",
"sha256:377751954da04d4a6950191b20539066b4e19e3b559d4695399c5e8e3e683bf6",
"sha256:4703b9e937df83f5b6b7447ca5912b5f5f297aba45f91dbbbc63ff9278c7aa98",
"sha256:471c0571d0895c68da309dacee4e95a0811d0a9f9f532a48dc1bea5f3b7ad2b7",
"sha256:61d5b4cf73622e4d0c6b83408a16631b670fc045afd6540679aa35591a17fe6d",
"sha256:6c915ee7dba1071554e70a3664a839fbc033e1d6528199d4621eeaaa5487ccd2",
"sha256:6e51e417d9ae2e7848314994e6fc3832c9d426abce9328cf7571eefceb43e6c9",
"sha256:719656636c48be22c23641859ff2419b27b6bdf844b36a2447cb39caceb00935",
"sha256:780ae5284cb770ade51d4b4a7dce4faa554eb1d88a56d0e8b9f35fca9b0270ff",
"sha256:878922bf5ad7550aa044aa9301d417e2d3ae50f0f577de92051d739ac6096cee",
"sha256:924dc3f83de20437de95a73516f36e09918e9c9c18d5eac520062c49191025fb",
"sha256:97ce8b8ace7d3b9288d88177e66ee75480fb79b9cf745e91ecfe65d91a856042",
"sha256:9c0fab855ae790ca74b27e55240fe4f2a36a364a3f1ebcfd1fb5ac4088f1cec3",
"sha256:9cab23439eb1ebfed1aaec9cd42b7dc50fc96d5cd3147da348d9161f0501ada5",
"sha256:a8e6859913ec8eeef3dbe9aed3bf475347642d1cdd6217c30f28dee8903528e6",
"sha256:aa046527c04688af680217fffac61eec2350ef3f3d7320c07fd33f5c6e7b4d5f",
"sha256:abc81829c4039e7e4c30f7897938fa5d4916a09c2c7eb9b244b7a35ddc9656f4",
"sha256:bad70051de2c50b1a6259a6df1daaafe8c480ca98132da98976d8591c412e737",
"sha256:c73a7975d77f15f7f68dacfb2bca3d3f479f158313642e8ea9058eea06637931",
"sha256:d15007f857d6995db15195217afdbddfcd203dfaa0ba6878a2f580eaf810ecd6",
"sha256:d76061ae5cab49b83a8cf3feacefc2053fac672728802ac137dd8c4123397677",
"sha256:e8e4fbbb7e7634f263c5b0150a629342cc19b47c5eba8d1cd4363ab3455ab576",
"sha256:e9459f40244bb02b2f14f6af0cd0732791d72232bbb0dc4bab57ef88e75f6935",
"sha256:edb1f041a9146dcf02cd7df7187db46ab524b9af2515f392f337c7cbbf5b52cd"
"sha256:1676b0a292dd3c99e49305a16d7a9f42a4ab60ec522eac0d3dd20cdf362ac010",
"sha256:16f221035e8bd19b9dc9a57159e38d2dd060b48e93e1d843c49cb370b0f415fd",
"sha256:43909c8bb289c382170e0282158a38cf306a8ad2ff6dfadc447e90f9961bef43",
"sha256:4e465afc3b96dbc80cf4a5273e5e2b1e3451286361b4af70ce1adb2984d392f9",
"sha256:55b745fca0a5ab738647d0e4db099bd0a23279c32b31a783ad2ccea729e632df",
"sha256:5d050e1e4bc9ddb8656d7b4f414557720ddcca23a5b88dd7cff65e847864c400",
"sha256:637d827248f447e63585ca3f4a7d2dfaa882e094df6cfa177cc9cf9cd6cdf6d2",
"sha256:6690080810f77485667bfbff4f69d717c3be25e5b11bb2073e76bb3f578d99b4",
"sha256:66fbc6fed94a13b9801fb70b96ff30605ab0a123e775a5e7a26938b717c5d71a",
"sha256:67d44acb72c31a97a3d5d33d103ab06d8ac20770e1c5ad81bdb3f0c086a56cf6",
"sha256:6ca2b85a5997dabc38301a22ee43c82adcb53ff660b89ee88dded6b33687e1d8",
"sha256:6e51534e78d14b4a009a062641f465cfaba4fdcb046c3ac0b1f61dd97c861b1b",
"sha256:70eb5808127284c4e5c9e836208e09d685a7978b6a216db85960b1a112eeace8",
"sha256:830b044f4e64a76ba71448fce6e604c0fc47a0e54d8f6467be23749ac2cbd2fb",
"sha256:8b7bb4b9280da3b2856cb1fc425932f46fba609819ee1c62256f61799e6a51d2",
"sha256:a9c65473ebc342715cb2d7926ff1e202c26376c0dcaaee85a1fd4b8d8c1d3b2f",
"sha256:c1c09247ccea742525bdb5f4b5ceeacb34f95731647fe55774aa36557dbb5fa4",
"sha256:c5bf0e132acf7557fc9bb8ded8b53bbbbea8892f3c9a1738205878ca9434206a",
"sha256:db250fd3e90117e0312b611574cd1b3f78bec046783195075cbd7ba9c3d73f16",
"sha256:e515c9a93aebe27166ec9593411c58494fa98e5fcc219e47260d9ab8a1cc7f9f",
"sha256:e55185e51b18d788e49fe8305fd73ef4470596b33fc2c1ceb304566b99c71a69",
"sha256:ea9cff01e75a956dbee133fa8e5b68f2f92175233de2f88de3a682dd94deda65",
"sha256:f1452578d0516283c87608a5a5548b0cdde15b99650efdfd85182102ef7a7c17",
"sha256:f39a995e47cb8649673cfa0579fbdd1cdd33ea497d1728a6cb194d6252268e48"
],
"index": "pypi",
"version": "==1.20.2"
"version": "==1.20.3"
},
"opencv-contrib-python": {
"hashes": [
"sha256:03da9db503a6d3efbbe9571afde51a266ca6a81bd37830998bebea067cda868a",
"sha256:12267dce9c1bad35917cedb58e342391cc7e96279b2bfd964e347e390f317793",
"sha256:14a9f0b7af354c076c89b3e07b69f3016dd4565173a827f2734a790131635118",
"sha256:15245b8ad631883ad2f21dacc673a0e9d3ea95f22d9cc780206de7997e3e99b8",
"sha256:17553cb3c63735fca76df1794fb5a4201d2eac17b9fba5870cc12b0554f3ba94",
"sha256:1c57ece65b04a362c5bca2d32d9da46141c69d986e4c506546804d626d362f6b",
"sha256:24dc02cf7b7df9172382d0a1277c923aa4c07264d3c08c74166f595d32220514",
"sha256:25b772fa1bb2b66782032aa4ae4dd3deab6ecc7b9694786f3a6266f28b4a19ef",
"sha256:2e6f82af06d4b23b88ffe1514831044f3e675b8495a173043c5ab95a6dfa8830",
"sha256:31936b719bbfe53e31fb48bd9ef6a69fa3314dcbce2356bab927c26be0d21592",
"sha256:332e3e946d718759f44517a1ce208dc26929b052a3fc381fe5051213b674c413",
"sha256:4e8b8dc303c4ce3d9f7566e6b0481d2d741549f1f1fb2117ce0e1a84ecf806da",
"sha256:64ecced839fa187311d5952d47f43ce1df5d4e7c8ec3825c1ecfb1ad8ca211e4",
"sha256:66f9540a39e8cccded81432f4b4c497a874834957682d9ce858d119a4eecb8ce",
"sha256:688fcdbca5e2ffff6183b85d4020aeb1f8f20fbccbcb3690095b6ef6ce1a2f9d",
"sha256:6f4d63e22fa840ceeb1791bdbdfba675b569baea1790c19d91b45fc49653e55c",
"sha256:73762774eff15eeea2be50ac495096bd8914b06e68d2dfc0842615512ad987c9",
"sha256:768ba09db49e9257a8d99ddfc7b5398e894b3165e2490b0534f9ab0d2d52f275",
"sha256:781c2a34bc2a33dbbe4bbd6305a58e944f3589afbcfe1d9573355f1ea0c9f3ad",
"sha256:7f155587dfa46fcd2cdae2dce5ba502d62e6b41597389fbf0e3ab4024ed3e837",
"sha256:934d778fc8c6f4d0c099f7912fea00027af279a6fd6cfeec91187f7f84be5f85",
"sha256:982e2640bd56fc9785ded175f3c9b451e5c7db05dc7995f13907682ade622ba7",
"sha256:999928cb489f4d26a91ae7359774d334ecbe96476b25b71381ba5c381ccb4af6",
"sha256:9f6baabe9db9e1a02923bb2405d7cc0e1a26bb7c53bb4802474422486f96becc",
"sha256:a80de83d6165c29efbf51da7695a4fb4da14230ea261497d3ac82e13026c3549",
"sha256:cbd416c4f097821d8e51a42fba34d97e5697b4877d1718dee4ca9369dbc45adf",
"sha256:ceaf476cb7affec857097bd811d12be7677f259e61dcbce4d5bb455524c9db46",
"sha256:e51959db5b1b2a7fa657150f256509baa279a2570818832aa0c6553ad1d83539",
"sha256:f202b4967c99edbf1844636975b58ca3148019cf01343098b72b55ad6aef2bfd",
"sha256:fb4e7d273f76c01ed36a83d2be0ee2e17f1aab0b2526303c8fd5b617eed8016a"
],
"markers": "python_version >= '3.6'",
"version": "==4.5.4.58"
},
"opencv-python": {
"hashes": [
"sha256:30edebc81b260bcfeb760b3600c367c5261dfb2fe41e5d1408d5357d0867b40d",
"sha256:32dee1c9fd3e31e28edef7b56f868e2b40e280b7062304f9fb8a14dbc51547d5",
"sha256:4982fa8ccc38310a2bd93e06334ba090b12b6aff2f6fcb8ff9613e3c9bc48f48",
"sha256:5172cb37dfd8a0b4945b071a493eb36e5f17675a160637fa380f9c1d9d80535c",
"sha256:6d8434a45e8f75c4da5fd0068ce001f4f8e35771cc851d746d4721eeaf517e25",
"sha256:78a6db8467639383caedf1d111da3510a4ee1a0aacf2117821cae2ee8f92ce37",
"sha256:9646875c501788b1b098f282d777b667d6da69801739504f1b2fd1268970d1da",
"sha256:9c77d508e6822f1f40c727d21b822d017622d8305dce7eccf0ab06caac16d5c6",
"sha256:a1dfa0486db367594510c0c799ec7481247dc86e651b69008806d875ab731471",
"sha256:b2b9ac86aec5f2dd531545cebdea1a1ef4f81ef1fb1760d78b4725f9575504f9",
"sha256:bcb27773cfd5340b2b599b303d9f5499838ef4780c20c038f6030175408c64df",
"sha256:c0503bfaa2b7b743d6ff5d81f1dd8428dbf4c33e7e4f836456d11be20c2e7721",
"sha256:c1159d91f29a85c3333edef6ca420284566d9bcdae46dda2fe7282515b48c8b6",
"sha256:c4ea4f8b217f3e8be6247fc0787fb81797d85202c722523f41070124a7a621c7",
"sha256:c8cc1f5ff3c352ebe756119014c4e4ec7ae5ac536d1f66b0316667ced37637c8",
"sha256:d16144c435b816c5536d5ff012c1a2b7e93155017db7103942ff7efb98c4df1f",
"sha256:d8aefcb30b71064dbbaa2b0ace161a36464c29375a83998fbda39a1d1740f942",
"sha256:e27d062fa1098d90f48b6c047351c89816492a08906a021c973ce510b04a7b9d",
"sha256:e2c17714da59d9d516ceef0450766ff9557ee232d62f702665af905193557582",
"sha256:e38fbd7b2db03204ec09930609b7313d6b6d2b271c8fe2c0aa271fa69b726a1b",
"sha256:e77d0feaff37326f62b127098264e2a7099deb476e38432b1083ce11cdedf560",
"sha256:ebe83901971a6755512424c4fe9f63341cca501b7c497bf608dd38ee31ba3f4c",
"sha256:efac9893d9e21cfb599828801c755ecde8f1e657f05ec6f002efe19422456d5a",
"sha256:fc1472b825d26c8a4f1cfb172a90c3cc47733e4af7522276c1c2efe8f6006a8b",
"sha256:ffc75c614b8dc3d8102f3ba15dafd6ec0400c7ffa71a91953d41511964ee50e0"
"sha256:02872e0a9358526646d691f390143e9c21109c210095314abaa0641211cda077",
"sha256:085c5fcf5a6479c34aca3fd0f59055e704083d6a44009d6583c675ff1a5a0625",
"sha256:0d6249a49122a78afc6685ddb1377a87e46414ae61c84535c4c6024397f1f3e8",
"sha256:0eba0bfe62c48a02a5af3a0944e872c99f57f98653bed14d51c6991a58f9e1d1",
"sha256:215bdf069847d4e3b0447a34e9eb4046dd4ca523d41fe4381c1c55f6704fd0dc",
"sha256:22bcc3153a7d4f95aff79457eef81ef5e40ab1851b189e014412b5e9fbee2573",
"sha256:26feeeb280de179f5dbb8976ebf7ceb836bd263973cb5daec8ca36e8ef7b5773",
"sha256:2fff48a641a74d1def31c1e88f9e5ce50ba4d0f87d085dfbf8bc844e12f6cd54",
"sha256:315c357522b6310ef7a0718d9f0c5d3110e59c19140705499a3c29bdd8c0124f",
"sha256:48288428f407bacba5f73d460feb4a1ecafe87db3d7cfc0730a49fb32f589bbf",
"sha256:4a13381bdfc0fb4b080efcc27c46561d0bd752f126226e9f19aa9cbcf6677f40",
"sha256:4abe9c4fb6fe16daa9fcdd68b5357d3530431341aa655203f8e84f394e1fe6d4",
"sha256:4b614fbd81aeda53ce28e645aaee18fda7c7f2a48eb7f1a70a7c6c3427946342",
"sha256:5370a11757fbe94b176771269aff599f4da8676c2a672b13bcbca043f2e3eea8",
"sha256:637f4d3ad81bd27f273ede4c5fa6c26afb85c097c9715baf107cc270e37f5fea",
"sha256:69a78e40a374ac14e4bf15a13dbb6c30fd2fbd5fcd3674d020a31b88861d5aaf",
"sha256:6b87bab220d17e03eeedbcc6652d9d7e7bb09886dbd0f810310697a948b4c6fd",
"sha256:887a61097092dc0bf23fa24646dbc8cfeeb753649cb28a3782a93a6879e3b7d2",
"sha256:8ddf4dcd8199209e33f21deb0c6d8ab62b21802816bba895fefc346b6d2e522d",
"sha256:8f7886acabaebf0361bd3dbccaa0d08e3f65ab13b7c739eb11e028f01ad13582",
"sha256:92e9b2261ec764229c948d77fe0d922ee033348ca6519939b87861016c1614b3",
"sha256:9998ce60884f3cda074f02b56d2b57ee6bd863e2ddba132da2b0af3b9487d584",
"sha256:9b2c198af083a693d42a82bddc4d1f7e6bb02c64192ff7fac1fd1d43a8cf1be6",
"sha256:9bcca50c5444b5cfb01624666b69f91ba8f2d2bf4ef37b111697aafdeb81c99f",
"sha256:a2a7f09b8843b85f3e1b02c5ea3ddc0cb9f5ad9698380109b37069ee8db7746d",
"sha256:ac852fcaac93439f2f7116ddffdc23fd366c872200ade2272446f9898180cecb",
"sha256:bc34cdbfbab463750713118c8259a5d364547adab8ed91e94ba888349f33590a",
"sha256:c44f5c51e92322ed832607204249c190764dec6cf29e8ba6d679b10326be1c1b",
"sha256:d4b1d0b98ee72ba5dd720166790fc93ce459281e138ee79b0d41420b3da52b2e",
"sha256:eaa144013b597e4dcabc8d8230edfe810319de01b5609556d415a20e2b707547"
],
"index": "pypi",
"version": "==4.5.1.48"
"version": "==4.5.4.58"
},
"pillow": {
"hashes": [
"sha256:01425106e4e8cee195a411f729cff2a7d61813b0b11737c12bd5991f5f14bcd5",
"sha256:031a6c88c77d08aab84fecc05c3cde8414cd6f8406f4d2b16fed1e97634cc8a4",
"sha256:083781abd261bdabf090ad07bb69f8f5599943ddb539d64497ed021b2a67e5a9",
"sha256:0d19d70ee7c2ba97631bae1e7d4725cdb2ecf238178096e8c82ee481e189168a",
"sha256:0e04d61f0064b545b989126197930807c86bcbd4534d39168f4aa5fda39bb8f9",
"sha256:12e5e7471f9b637762453da74e390e56cc43e486a88289995c1f4c1dc0bfe727",
"sha256:22fd0f42ad15dfdde6c581347eaa4adb9a6fc4b865f90b23378aa7914895e120",
"sha256:238c197fc275b475e87c1453b05b467d2d02c2915fdfdd4af126145ff2e4610c",
"sha256:3b570f84a6161cf8865c4e08adf629441f56e32f180f7aa4ccbd2e0a5a02cba2",
"sha256:463822e2f0d81459e113372a168f2ff59723e78528f91f0bd25680ac185cf797",
"sha256:4d98abdd6b1e3bf1a1cbb14c3895226816e666749ac040c4e2554231068c639b",
"sha256:5afe6b237a0b81bd54b53f835a153770802f164c5570bab5e005aad693dab87f",
"sha256:5b70110acb39f3aff6b74cf09bb4169b167e2660dabc304c1e25b6555fa781ef",
"sha256:5cbf3e3b1014dddc45496e8cf38b9f099c95a326275885199f427825c6522232",
"sha256:624b977355cde8b065f6d51b98497d6cd5fbdd4f36405f7a8790e3376125e2bb",
"sha256:63728564c1410d99e6d1ae8e3b810fe012bc440952168af0a2877e8ff5ab96b9",
"sha256:66cc56579fd91f517290ab02c51e3a80f581aba45fd924fcdee01fa06e635812",
"sha256:6c32cc3145928c4305d142ebec682419a6c0a8ce9e33db900027ddca1ec39178",
"sha256:8bb1e155a74e1bfbacd84555ea62fa21c58e0b4e7e6b20e4447b8d07990ac78b",
"sha256:95d5ef984eff897850f3a83883363da64aae1000e79cb3c321915468e8c6add5",
"sha256:a013cbe25d20c2e0c4e85a9daf438f85121a4d0344ddc76e33fd7e3965d9af4b",
"sha256:a787ab10d7bb5494e5f76536ac460741788f1fbce851068d73a87ca7c35fc3e1",
"sha256:a7d5e9fad90eff8f6f6106d3b98b553a88b6f976e51fce287192a5d2d5363713",
"sha256:aac00e4bc94d1b7813fe882c28990c1bc2f9d0e1aa765a5f2b516e8a6a16a9e4",
"sha256:b91c36492a4bbb1ee855b7d16fe51379e5f96b85692dc8210831fbb24c43e484",
"sha256:c03c07ed32c5324939b19e36ae5f75c660c81461e312a41aea30acdd46f93a7c",
"sha256:c5236606e8570542ed424849f7852a0ff0bce2c4c8d0ba05cc202a5a9c97dee9",
"sha256:c6b39294464b03457f9064e98c124e09008b35a62e3189d3513e5148611c9388",
"sha256:cb7a09e173903541fa888ba010c345893cd9fc1b5891aaf060f6ca77b6a3722d",
"sha256:d68cb92c408261f806b15923834203f024110a2e2872ecb0bd2a110f89d3c602",
"sha256:dc38f57d8f20f06dd7c3161c59ca2c86893632623f33a42d592f097b00f720a9",
"sha256:e98eca29a05913e82177b3ba3d198b1728e164869c613d76d0de4bde6768a50e",
"sha256:f217c3954ce5fd88303fc0c317af55d5e0204106d86dea17eb8205700d47dec2"
"sha256:066f3999cb3b070a95c3652712cffa1a748cd02d60ad7b4e485c3748a04d9d76",
"sha256:0a0956fdc5defc34462bb1c765ee88d933239f9a94bc37d132004775241a7585",
"sha256:0b052a619a8bfcf26bd8b3f48f45283f9e977890263e4571f2393ed8898d331b",
"sha256:1394a6ad5abc838c5cd8a92c5a07535648cdf6d09e8e2d6df916dfa9ea86ead8",
"sha256:1bc723b434fbc4ab50bb68e11e93ce5fb69866ad621e3c2c9bdb0cd70e345f55",
"sha256:244cf3b97802c34c41905d22810846802a3329ddcb93ccc432870243211c79fc",
"sha256:25a49dc2e2f74e65efaa32b153527fc5ac98508d502fa46e74fa4fd678ed6645",
"sha256:2e4440b8f00f504ee4b53fe30f4e381aae30b0568193be305256b1462216feff",
"sha256:3862b7256046fcd950618ed22d1d60b842e3a40a48236a5498746f21189afbbc",
"sha256:3eb1ce5f65908556c2d8685a8f0a6e989d887ec4057326f6c22b24e8a172c66b",
"sha256:3f97cfb1e5a392d75dd8b9fd274d205404729923840ca94ca45a0af57e13dbe6",
"sha256:493cb4e415f44cd601fcec11c99836f707bb714ab03f5ed46ac25713baf0ff20",
"sha256:4acc0985ddf39d1bc969a9220b51d94ed51695d455c228d8ac29fcdb25810e6e",
"sha256:5503c86916d27c2e101b7f71c2ae2cddba01a2cf55b8395b0255fd33fa4d1f1a",
"sha256:5b7bb9de00197fb4261825c15551adf7605cf14a80badf1761d61e59da347779",
"sha256:5e9ac5f66616b87d4da618a20ab0a38324dbe88d8a39b55be8964eb520021e02",
"sha256:620582db2a85b2df5f8a82ddeb52116560d7e5e6b055095f04ad828d1b0baa39",
"sha256:62cc1afda735a8d109007164714e73771b499768b9bb5afcbbee9d0ff374b43f",
"sha256:70ad9e5c6cb9b8487280a02c0ad8a51581dcbbe8484ce058477692a27c151c0a",
"sha256:72b9e656e340447f827885b8d7a15fc8c4e68d410dc2297ef6787eec0f0ea409",
"sha256:72cbcfd54df6caf85cc35264c77ede902452d6df41166010262374155947460c",
"sha256:792e5c12376594bfcb986ebf3855aa4b7c225754e9a9521298e460e92fb4a488",
"sha256:7b7017b61bbcdd7f6363aeceb881e23c46583739cb69a3ab39cb384f6ec82e5b",
"sha256:81f8d5c81e483a9442d72d182e1fb6dcb9723f289a57e8030811bac9ea3fef8d",
"sha256:82aafa8d5eb68c8463b6e9baeb4f19043bb31fefc03eb7b216b51e6a9981ae09",
"sha256:84c471a734240653a0ec91dec0996696eea227eafe72a33bd06c92697728046b",
"sha256:8c803ac3c28bbc53763e6825746f05cc407b20e4a69d0122e526a582e3b5e153",
"sha256:93ce9e955cc95959df98505e4608ad98281fff037350d8c2671c9aa86bcf10a9",
"sha256:9a3e5ddc44c14042f0844b8cf7d2cd455f6cc80fd7f5eefbe657292cf601d9ad",
"sha256:a4901622493f88b1a29bd30ec1a2f683782e57c3c16a2dbc7f2595ba01f639df",
"sha256:a5a4532a12314149d8b4e4ad8ff09dde7427731fcfa5917ff16d0291f13609df",
"sha256:b8831cb7332eda5dc89b21a7bce7ef6ad305548820595033a4b03cf3091235ed",
"sha256:b8e2f83c56e141920c39464b852de3719dfbfb6e3c99a2d8da0edf4fb33176ed",
"sha256:c70e94281588ef053ae8998039610dbd71bc509e4acbc77ab59d7d2937b10698",
"sha256:c8a17b5d948f4ceeceb66384727dde11b240736fddeda54ca740b9b8b1556b29",
"sha256:d82cdb63100ef5eedb8391732375e6d05993b765f72cb34311fab92103314649",
"sha256:d89363f02658e253dbd171f7c3716a5d340a24ee82d38aab9183f7fdf0cdca49",
"sha256:d99ec152570e4196772e7a8e4ba5320d2d27bf22fdf11743dd882936ed64305b",
"sha256:ddc4d832a0f0b4c52fff973a0d44b6c99839a9d016fe4e6a1cb8f3eea96479c2",
"sha256:e3dacecfbeec9a33e932f00c6cd7996e62f53ad46fbe677577394aaa90ee419a",
"sha256:eb9fc393f3c61f9054e1ed26e6fe912c7321af2f41ff49d3f83d05bacf22cc78"
],
"markers": "python_version >= '3.6'",
"version": "==8.2.0"
"version": "==8.4.0"
},
"protobuf": {
"hashes": [
"sha256:038daf4fa38a7e818dd61f51f22588d61755160a98db087a046f80d66b855942",
"sha256:28ccea56d4dc38d35cd70c43c2da2f40ac0be0a355ef882242e8586c6d66666f",
"sha256:36d90676d6f426718463fe382ec6274909337ca6319d375eebd2044e6c6ac560",
"sha256:3cd0458870ea7d1c58e948ac8078f6ba8a7ecc44a57e03032ed066c5bb318089",
"sha256:5935c8ce02e3d89c7900140a8a42b35bc037ec07a6aeb61cc108be8d3c9438a6",
"sha256:615b426a177780ce381ecd212edc1e0f70db8557ed72560b82096bd36b01bc04",
"sha256:62a8e4baa9cb9e064eb62d1002eca820857ab2138440cb4b3ea4243830f94ca7",
"sha256:655264ed0d0efe47a523e2255fc1106a22f6faab7cc46cfe99b5bae085c2a13e",
"sha256:6e8ea9173403219239cdfd8d946ed101f2ab6ecc025b0fda0c6c713c35c9981d",
"sha256:71b0250b0cfb738442d60cab68abc166de43411f2a4f791d31378590bfb71bd7",
"sha256:74f33edeb4f3b7ed13d567881da8e5a92a72b36495d57d696c2ea1ae0cfee80c",
"sha256:77d2fadcf369b3f22859ab25bd12bb8e98fb11e05d9ff9b7cd45b711c719c002",
"sha256:8b30a7de128c46b5ecb343917d9fa737612a6e8280f440874e5cc2ba0d79b8f6",
"sha256:8e51561d72efd5bd5c91490af1f13e32bcba8dab4643761eb7de3ce18e64a853",
"sha256:a529e7df52204565bcd33738a7a5f288f3d2d37d86caa5d78c458fa5fabbd54d",
"sha256:b691d996c6d0984947c4cf8b7ae2fe372d99b32821d0584f0b90277aa36982d3",
"sha256:d80f80eb175bf5f1169139c2e0c5ada98b1c098e2b3c3736667f28cbbea39fc8",
"sha256:d83e1ef8cb74009bebee3e61cc84b1c9cd04935b72bca0cbc83217d140424995",
"sha256:d8919368410110633717c406ab5c97e8df5ce93020cfcf3012834f28b1fab1ea",
"sha256:db3532d9f7a6ebbe2392041350437953b6d7a792de10e629c1e4f5a6b1fe1ac6",
"sha256:e7b24c11df36ee8e0c085e5b0dc560289e4b58804746fb487287dda51410f1e2",
"sha256:e7e8d2c20921f8da0dea277dfefc6abac05903ceac8e72839b2da519db69206b",
"sha256:e813b1c9006b6399308e917ac5d298f345d95bb31f46f02b60cd92970a9afa17",
"sha256:fd390367fc211cc0ffcf3a9e149dfeca78fecc62adb911371db0cec5c8b7472d"
],
"markers": "python_version >= '3.5'",
"version": "==3.19.1"
},
"pyaudio": {
"hashes": [
"sha256:0d92f6a294565260a282f7c9a0b0d309fc8cc988b5ee5b50645634ab9e2da7f7",
"sha256:259bb9c1363be895b4f9a97e320a6017dd06bc540728c1a04eb4a7b6fe75035b",
"sha256:2a19bdb8ec1445b4f3e4b7b109e0e4cec1fd1f1ce588592aeb6db0b58d4fb3b0",
"sha256:51b558d1b28c68437b53218279110db44f69f3f5dd3d81859f569a4a96962bdc",
"sha256:589bfad2c615dd4b5d3757e763019c42ab82f06fba5cae64ec02fd7f5ae407ed",
"sha256:8f89075b4844ea94dde0c951c2937581c989fabd4df09bfd3f075035f50955df",
"sha256:93bfde30e0b64e63a46f2fd77e85c41fd51182a4a3413d9edfaf9ffaa26efb74",
"sha256:cf1543ba50bd44ac0d0ab5c035bb9c3127eb76047ff12235149d9adf86f532b6",
"sha256:f78d543a98b730e64621ebf7f3e2868a79ade0a373882ef51c0293455ffa8e6e"
],
"index": "pypi",
"version": "==0.2.11"
},
"pyparsing": {
"hashes": [
"sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1",
"sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b"
"sha256:4881e3d2979f27b41a3a2421b10be9cbfa7ce2baa6c7117952222f8bbea6650c",
"sha256:9329d1c1b51f0f76371c4ded42c5ec4cc0be18456b22193e0570c2da98ed288b"
],
"markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'",
"version": "==2.4.7"
"markers": "python_version >= '3.6'",
"version": "==3.0.5"
},
"python-dateutil": {
"hashes": [
"sha256:73ebfe9dbf22e832286dafa60473e4cd239f8592f699aa5adaf10050e6e1823c",
"sha256:75bb3f31ea686f1197762692a9ee6a7550b59fc6ca3a1f4b5d7e32fb98e2da2a"
"sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86",
"sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"
],
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
"version": "==2.8.1"
},
"pywavelets": {
"hashes": [
"sha256:076ca8907001fdfe4205484f719d12b4a0262dfe6652fa1cfc3c5c362d14dc84",
"sha256:18a51b3f9416a2ae6e9a35c4af32cf520dd7895f2b69714f4aa2f4342fca47f9",
"sha256:1a64b40f6acb4ffbaccce0545d7fc641744f95351f62e4c6aaa40549326008c9",
"sha256:2b634a54241c190ee989a4af87669d377b37c91bcc9cf0efe33c10ff847f7841",
"sha256:2f7429eeb5bf9c7068002d0d7f094ed654c77a70ce5e6198737fd68ab85f8311",
"sha256:35959c041ec014648575085a97b498eafbbaa824f86f6e4a59bfdef8a3fe6308",
"sha256:39c74740718e420d38c78ca4498568fa57976d78d5096277358e0fa9629a7aea",
"sha256:411e17ca6ed8cf5e18a7ca5ee06a91c25800cc6c58c77986202abf98d749273a",
"sha256:55e39ec848ceec13c9fa1598253ae9dd5c31d09dfd48059462860d2b908fb224",
"sha256:6162dc0ae04669ea04b4b51420777b9ea2d30b0a9d02901b2a3b4d61d159c2e9",
"sha256:68b5c33741d26c827074b3d8f0251de1c3019bb9567b8d303eb093c822ce28f1",
"sha256:6bc78fb9c42a716309b4ace56f51965d8b5662c3ba19d4591749f31773db1125",
"sha256:6ebfefebb5c6494a3af41ad8c60248a95da267a24b79ed143723d4502b1fe4d7",
"sha256:720dbcdd3d91c6dfead79c80bf8b00a1d8aa4e5d551dc528c6d5151e4efc3403",
"sha256:732bab78435c48be5d6bc75486ef629d7c8f112e07b313bf1f1a2220ab437277",
"sha256:7947e51ca05489b85928af52a34fe67022ab5b81d4ae32a4109a99e883a0635e",
"sha256:79f5b54f9dc353e5ee47f0c3f02bebd2c899d49780633aa771fed43fa20b3149",
"sha256:80b924edbc012ded8aa8b91cb2fd6207fb1a9a3a377beb4049b8a07445cec6f0",
"sha256:83c5e3eb78ce111c2f0b45f46106cc697c3cb6c4e5f51308e1f81b512c70c8fb",
"sha256:889d4c5c5205a9c90118c1980df526857929841df33e4cd1ff1eff77c6817a65",
"sha256:935ff247b8b78bdf77647fee962b1cc208c51a7b229db30b9ba5f6da3e675178",
"sha256:98b2669c5af842a70cfab33a7043fcb5e7535a690a00cd251b44c9be0be418e5",
"sha256:9e2528823ccf5a0a1d23262dfefe5034dce89cd84e4e124dc553dfcdf63ebb92",
"sha256:bc5e87b72371da87c9bebc68e54882aada9c3114e640de180f62d5da95749cd3",
"sha256:be105382961745f88d8196bba5a69ee2c4455d87ad2a2e5d1eed6bd7fda4d3fd",
"sha256:c06d2e340c7bf8b9ec71da2284beab8519a3908eab031f4ea126e8ccfc3fd567",
"sha256:c2a799e79cee81a862216c47e5623c97b95f1abee8dd1f9eed736df23fb653fb",
"sha256:cfe79844526dd92e3ecc9490b5031fca5f8ab607e1e858feba232b1b788ff0ea",
"sha256:d510aef84d9852653d079c84f2f81a82d5d09815e625f35c95714e7364570ad4",
"sha256:e02a0558e0c2ac8b8bbe6a6ac18c136767ec56b96a321e0dfde2173adfa5a504"
],
"markers": "python_version >= '3.5'",
"version": "==1.1.1"
},
"scikit-image": {
"hashes": [
"sha256:1256017c513e8e1b8b9da73e5fd1e605d0077bbbc8e5c8d6c2cab36400131c6c",
"sha256:1cd05c882ffb2a271a1f20b4afe937d63d55b8753c3d652f11495883a7800ebe",
"sha256:23f9178b21c752bfb4e4ea3a3fa0ff79bc5a401bc75ddb4661f2cebd1c2b0e24",
"sha256:2c058770c6ad6e0fe6c30f59970c9c65fa740ff014d121d8c341664cd792cf49",
"sha256:2eea42706a25ae6e0cebaf1914e2ab1c04061b1f3c9966d76025d58a2e9188fc",
"sha256:30447af3f5b7c9491f2d3db5bc275493d1b91bf1dd16b67e2fd79a6bb95d8ee9",
"sha256:3515b890e771f99bbe1051a0dcfe0fc477da961da933c34f89808a0f1eeb7dc2",
"sha256:5f602779258807d03e72c0a439cfb221f647e628be166fb3594397435f13c76b",
"sha256:76446e2402e64d7dba78eeae8aa86e92a0cafe5b1c9e6235bd8d067471ed2788",
"sha256:ae6659b3a8bd4bba7e9dcbfd0064e443b32c7054bf09174749db896730fcf42e",
"sha256:c700336a7f96109c74154090c5e693693a8e3fa09ed6156a5996cdc9a3bb1534",
"sha256:d5ad4a9b4c9797d4c4c48f45fa224c5ebff22b9b0af636c3ecb8addbb66c21e6",
"sha256:d746540cafe7776c6d05a0b40ec744bb8d33d1ddc51faba601d26c02593d8bcc",
"sha256:e972c628ad9ba52c298b032368e29af9bd5eeb81ce33bc2d9b039a81661c99c5",
"sha256:ec25e4110951d3a280421bb10dd510a082ba83d86e20d706294faf7899cdb3d5",
"sha256:fbb618ca911867bce45574c1639618cdfb5d94e207432b19bc19563d80d2f171"
],
"markers": "python_version >= '3.7'",
"version": "==0.18.1"
"version": "==2.8.2"
},
"scipy": {
"hashes": [
"sha256:03f1fd3574d544456325dae502facdf5c9f81cbfe12808a5e67a737613b7ba8c",
"sha256:0c81ea1a95b4c9e0a8424cf9484b7b8fa7ef57169d7bcc0dfcfc23e3d7c81a12",
"sha256:1fba8a214c89b995e3721670e66f7053da82e7e5d0fe6b31d8e4b19922a9315e",
"sha256:37f4c2fb904c0ba54163e03993ce3544c9c5cde104bcf90614f17d85bdfbb431",
"sha256:50e5bcd9d45262725e652611bb104ac0919fd25ecb78c22f5282afabd0b2e189",
"sha256:6ca1058cb5bd45388041a7c3c11c4b2bd58867ac9db71db912501df77be2c4a4",
"sha256:77f7a057724545b7e097bfdca5c6006bed8580768cd6621bb1330aedf49afba5",
"sha256:816951e73d253a41fa2fd5f956f8e8d9ac94148a9a2039e7db56994520582bf2",
"sha256:96620240b393d155097618bcd6935d7578e85959e55e3105490bbbf2f594c7ad",
"sha256:993c86513272bc84c451349b10ee4376652ab21f312b0554fdee831d593b6c02",
"sha256:adf7cee8e5c92b05f2252af498f77c7214a2296d009fc5478fc432c2f8fb953b",
"sha256:bc52d4d70863141bb7e2f8fd4d98e41d77375606cde50af65f1243ce2d7853e8",
"sha256:c1d3f771c19af00e1a36f749bd0a0690cc64632783383bc68f77587358feb5a4",
"sha256:d744657c27c128e357de2f0fd532c09c84cd6e4933e8232895a872e67059ac37",
"sha256:e3e9742bad925c421d39e699daa8d396c57535582cba90017d17f926b61c1552",
"sha256:e547f84cd52343ac2d56df0ab08d3e9cc202338e7d09fafe286d6c069ddacb31",
"sha256:e89091e6a8e211269e23f049473b2fde0c0e5ae0dd5bd276c3fc91b97da83480",
"sha256:e9da33e21c9bc1b92c20b5328adb13e5f193b924c9b969cd700c8908f315aa59",
"sha256:ffdfb09315896c6e9ac739bb6e13a19255b698c24e6b28314426fd40a1180822"
"sha256:1437073f1d4664990879aa8f9547524764372e0fef84a077be4b19e82bba7a8d",
"sha256:17fd991a275e4283453f89d404209aa92059ac68d76d804b4bc1716a3742e1b5",
"sha256:1ea6233f5a365cb7945b4304bd06323ece3ece85d6a3fa8598d2f53e513467c9",
"sha256:2d25272c03ee3c0fe5e0dff1bb7889280bb6c9e1766fa9c7bde81ad8a5f78694",
"sha256:30bdda199667e74b50208a793eb1ba47a04e5e3fa16f5ff06c6f7969ae78e4da",
"sha256:359b60a0cccd17723b9d5e329a5212a710e771a3ddde800e472fb93732756c46",
"sha256:39f838ea5ce8da868785193d88d05cf5a6d5c390804ec99de29a28e1dcdd53e6",
"sha256:4d175ba93e00d8eef8f7cd70d4d88a9106a86800c82ea03cf2268c36d6545483",
"sha256:5273d832fb9cd5724ee0d335c16a903b923441107dd973d27fc4293075a9f4e3",
"sha256:54951f51d731c832b1b8885e0a92e89f33d087de7e40d02078bf0d49c7cbdbb5",
"sha256:74f518ce542533054695f743e4271cb8986b63f95bb51d70fcee4f3929cbff7d",
"sha256:7b1d0f5f524518f1a86f288443528e4ff4a739c0966db663af4129b7ac7849f8",
"sha256:82c5befebf54d799d77e5f0205c03030f57f69ba2541baa44d2e6ad138c28cd3",
"sha256:8482c8e45857ab0a5446eb7460d2307a27cbbe659d6d2257820c6d6eb950fd0f",
"sha256:87cf3964db0f1cce17aeed5bfc1b89a6b4b07dbfc48e50d21fa3549e00456803",
"sha256:8b5726a0fedeaa6beb1095e4466998bdd1d1e960b28db9b5a16c89cbd7b2ebf1",
"sha256:97eb573e361a73a553b915dc195c6f72a08249964b1a33f157f9659f3b6210d1",
"sha256:a80eb01c43fd98257ec7a49ff5cec0edba32031b5f86503f55399a48cb2c5379",
"sha256:cac71d5476a6f56b50459da21f6221707e0051ebd428b2137db32ef4a43bb15e",
"sha256:d86abd1ddf421dea5e9cebfeb4de0d205b3dc04e78249afedba9c6c3b2227ff2",
"sha256:dc2d1bf41294e63c7302bf499973ac0c7f73c93c01763db43055f6525234bf11",
"sha256:e08b81fcd9bf98740b58dc6fdd7879e33a64dcb682201c1135f7d4a75216bb05",
"sha256:e3efe7ef75dfe627b354ab0af0dbc918eadee97cc80ff1aabea6d3e01114ebdd",
"sha256:fa2dbabaaecdb502641b0b3c00dec05fb475ae48655c66da16c9ed24eda1e711"
],
"markers": "python_version < '3.10' and python_version >= '3.7'",
"version": "==1.6.2"
"index": "pypi",
"version": "==1.7.2"
},
"six": {
"hashes": [
"sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259",
"sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced"
"sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926",
"sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"
],
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
"version": "==1.15.0"
"version": "==1.16.0"
},
"tifffile": {
"toml": {
"hashes": [
"sha256:1cfc55f5b728e200142580a7bf108b72775c4097d007b4111876559fa1fb7432",
"sha256:55aa8baad38e1567c9fe450fff52160e4a21294a612f241c5e414da80f87209b"
"sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b",
"sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"
],
"markers": "python_version >= '3.7'",
"version": "==2021.4.8"
"index": "pypi",
"version": "==0.10.2"
},
"torch": {
"websockets": {
"hashes": [
"sha256:1388b30fbd262c1a053d6c9ace73bb0bd8f5871b4892b6f3e02d1d7bc9768563",
"sha256:16f2630d9604c4ee28ea7d6e388e2264cd7bc6031c6ecd796bae3f56b5efa9a3",
"sha256:225ee4238c019b28369c71977327deeeb2bd1c6b8557e6fcf631b8866bdc5447",
"sha256:3e4190c04dfd89c59bad06d5fe451446643a65e6d2607cc989eb1001ee76e12f",
"sha256:4ace9c5bb94d5a7b9582cd089993201658466e9c59ff88bd4e9e08f6f072d1cf",
"sha256:55137feb2f5a0dc7aced5bba690dcdb7652054ad3452b09a2bbb59f02a11e9ff",
"sha256:5c2e9a33d44cdb93ebd739b127ffd7da786bf5f740539539195195b186a05f6c",
"sha256:6ffa1e7ae079c7cb828712cb0cdaae5cc4fb87c16a607e6d14526b62c20bcc17",
"sha256:8ad2252bf09833dcf46a536a78544e349b8256a370e03a98627ebfb118d9555b",
"sha256:95b7bbbacc3f28fe438f418392ceeae146a01adc03b29d44917d55214ac234c9",
"sha256:a50ea8ed900927fb30cadb63aa7a32fdd59c7d7abe5012348dfbe35a8355c083",
"sha256:c6ede2ae4dcd8214b63e047efabafa92493605205a947574cf358216ca4e440a",
"sha256:ce7d435426f3dd14f95710d779aa46e9cd5e077d512488e813f7589fdc024f78",
"sha256:dac4d10494e74f7e553c92d7263e19ea501742c4825ddd26c4decfa27be95981",
"sha256:e7ad1649adb7dc2a450e70a3e51240b84fa4746c69c8f98989ce0c254f9fba3a",
"sha256:f23eeb1a48cc39209d986c418ad7e02227eee973da45c0c42d36b1aec72f4940"
"sha256:01db0ecd1a0ca6702d02a5ed40413e18b7d22f94afb3bbe0d323bac86c42c1c8",
"sha256:085bb8a6e780d30eaa1ba48ac7f3a6707f925edea787cfb761ce5a39e77ac09b",
"sha256:1ac35426fe3e7d3d0fac3d63c8965c76ed67a8fd713937be072bf0ce22808539",
"sha256:1f6b814cff6aadc4288297cb3a248614829c6e4ff5556593c44a115e9dd49939",
"sha256:2a43072e434c041a99f2e1eb9b692df0232a38c37c61d00e9f24db79474329e4",
"sha256:5b2600e01c7ca6f840c42c747ffbe0254f319594ed108db847eb3d75f4aacb80",
"sha256:62160772314920397f9d219147f958b33fa27a12c662d4455c9ccbba9a07e474",
"sha256:706e200fc7f03bed99ad0574cd1ea8b0951477dd18cc978ccb190683c69dba76",
"sha256:71358c7816e2762f3e4af3adf0040f268e219f5a38cb3487a9d0fc2e554fef6a",
"sha256:7d2e12e4f901f1bc062dfdf91831712c4106ed18a9a4cdb65e2e5f502124ca37",
"sha256:7f79f02c7f9a8320aff7d3321cd1c7e3a7dbc15d922ac996cca827301ee75238",
"sha256:82b17524b1ce6ae7f7dd93e4d18e9b9474071e28b65dbf1dfe9b5767778db379",
"sha256:82bd921885231f4a30d9bc550552495b3fc36b1235add6d374e7c65c3babd805",
"sha256:8bbf8660c3f833ddc8b1afab90213f2e672a9ddac6eecb3cde968e6b2807c1c7",
"sha256:9a4d889162bd48588e80950e07fa5e039eee9deb76a58092e8c3ece96d7ef537",
"sha256:b4ade7569b6fd17912452f9c3757d96f8e4044016b6d22b3b8391e641ca50456",
"sha256:b8176deb6be540a46695960a765a77c28ac8b2e3ef2ec95d50a4f5df901edb1c",
"sha256:c4fc9a1d242317892590abe5b61a9127f1a61740477bfb121743f290b8054002",
"sha256:c5880442f5fc268f1ef6d37b2c152c114deccca73f48e3a8c48004d2f16f4567",
"sha256:cd8c6f2ec24aedace251017bc7a414525171d4e6578f914acab9349362def4da",
"sha256:d67646ddd17a86117ae21c27005d83c1895c0cef5d7be548b7549646372f868a",
"sha256:e42a1f1e03437b017af341e9bbfdc09252cd48ef32a8c3c3ead769eab3b17368",
"sha256:eb282127e9c136f860c6068a4fba5756eb25e755baffb5940b6f1eae071928b2",
"sha256:fe83b3ec9ef34063d86dfe1029160a85f24a5a94271036e5714a57acfdd089a1",
"sha256:ff59c6bdb87b31f7e2d596f09353d5a38c8c8ff571b0e2238e8ee2d55ad68465"
],
"markers": "python_full_version >= '3.6.2'",
"version": "==1.8.1"
"index": "pypi",
"version": "==10.0"
},
"tqdm": {
"wheel": {
"hashes": [
"sha256:daec693491c52e9498632dfbe9ccfc4882a557f5fa08982db1b4d3adbe0887c3",
"sha256:ebdebdb95e3477ceea267decfc0784859aa3df3e27e22d23b83e9b272bf157ae"
"sha256:21014b2bd93c6d0034b6ba5d35e4eb284340e09d63c59aef6fc14b0f346146fd",
"sha256:e2ef7239991699e3355d54f8e968a21bb940a1dbf34a4d226741e64462516fad"
],
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
"version": "==4.60.0"
},
"typing-extensions": {
"hashes": [
"sha256:7cb407020f00f7bfc3cb3e7881628838e69d8f3fcab2f64742a5e76b2f841918",
"sha256:99d4073b617d30288f569d3f13d2bd7548c3a7e4c8de87db09a9d29bb3a4a60c",
"sha256:dafc7639cde7f1b6e1acc0f457842a83e722ccca8eef5270af2d74792619a89f"
],
"version": "==3.7.4.3"
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'",
"version": "==0.37.0"
}
},
"develop": {}

@ -1 +0,0 @@
Subproject commit c46d5b559b41aeaccad5b3ab60ac004b2f6d30d3

@ -1 +0,0 @@
Subproject commit e09639b267c1ab61ac9130afda9cf2f82d7dc6fc

527
main.py
View File

@ -1,508 +1,47 @@
import socket
from threading import Thread
from queue import Queue, Full as FullException, Empty as EmptyException
from enum import Enum, auto
import math
import logging
import sys
from multiprocessing.connection import wait
import face_alignment
import face_alignment.detection.blazeface
import cv2
import numpy as np
from numba import jit
from multiprocessing_logging import install_mp_handler
import toml
import click
from contrib.head_pose_estimation.pose_estimator import PoseEstimator
# from contrib.GazeTracking.gaze_tracking.eye import Eye
# from contrib.GazeTracking.gaze_tracking.calibration import Calibration
from ovtk_track.pipeline import Pipeline
# class GazeTracker:
# def __init__(self, margin):
# self.calibration = Calibration()
# self.margin = margin
#
# def get_gaze(self, frame, landmarks):
# frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# eye_left = Eye(frame, landmarks, self.margin, 0, self.calibration)
# eye_right = Eye(frame, landmarks, self.margin, 1, self.calibration)
# try:
# left_horz_ratio = eye_left.pupil.x / (eye_left.center[0] * 2 - (self.margin//2))
# left_vert_ratio = eye_left.pupil.y / (eye_left.center[1] * 2 - (self.margin//2))
# right_horz_ratio = eye_right.pupil.x / (eye_right.center[0] * 2 - (self.margin//2))
# right_vert_ratio = eye_right.pupil.y / (eye_right.center[1] * 2 - (self.margin//2))
# except TypeError:
# return None
#
# horz_average = (left_horz_ratio + right_horz_ratio) / 2
# vert_average = (left_vert_ratio + right_vert_ratio) / 2
#
# return (eye_left.origin[0] + eye_left.pupil.x, eye_left.origin[1] + eye_left.pupil.y, eye_left.blinking), \
# (eye_right.origin[0] + eye_right.pupil.x, eye_right.origin[1] + eye_right.pupil.y, eye_right.blinking), \
# horz_average, vert_average
class Eye:
class SIDE(Enum):
LEFT = auto()
RIGHT = auto()
logLevels = ['DEBUG', 'INFO', 'WARN', 'ERROR', 'CRITICAL']
_SIDE_SLICES = {
SIDE.LEFT: slice(36, 42),
SIDE.RIGHT: slice(42, 48),
}
def __init__(self, frame, landmarks, margin, side):
self._margin = margin
self._max_iterations = 100
self._ascent_distance = 8
@click.command()
@click.argument('config_toml', type=click.File('r'), default='config.toml')
@click.option('--verbose', is_flag=True)
@click.option('--log', 'log_settings', nargs=2, multiple=True, type=(str, click.Choice(logLevels)),
help='When verbose is too verbose, use this to directly set log levels of modules.')
def main(config_toml, verbose=False, log_settings=[]):
logging.basicConfig(stream=sys.stdout, level=logging.WARNING)
logging.getLogger('matplotlib').setLevel(logging.WARNING)
logging.getLogger('numba').setLevel(logging.WARNING)
for module, level in log_settings:
logging.getLogger(module).setLevel(logging[level])
self._pupil_x = None
self._pupil_y = None
install_mp_handler()
landmarks_for_eye = landmarks[Eye._SIDE_SLICES[side]].astype(np.int32)
config = toml.load(config_toml)
self._frame, (self.min_x, self.min_y, self.width, self.height) = self._crop(frame, landmarks_for_eye)
pipelines = []
for pipeline_conf in config['pipeline']:
pipeline = Pipeline(**pipeline_conf)
pipelines.append(pipeline)
pipeline.start()
self.blinking = self._get_blink_ratio(landmarks_for_eye)
wait(pipeline.sentinel for pipeline in pipelines)
if (self.blinking is not None and self.blinking < 1):
pass
# pupil = self._find_pupils(self._frame)
# if pupil is not None:
# pupil_x, pupil_y = pupil
# self._pupil_x = round(pupil_x)
# self._pupil_y = round(pupil_y)
@property
def frame(self):
return self._frame
@property
def pupil(self):
return self._pupil_x, self._pupil_y
@property
def origin(self):
return self.min_x, self.min_y
@property
def center(self):
return self.min_x + (self.width // 2), self.min_y + (self.height // 2)
def _crop(self, frame, landmarks):
# FIXME: My spidy sense says this is a poor way to do this, but rn brain no worky and code do
min_x = np.min(landmarks[:, 0]) - self._margin
max_x = np.max(landmarks[:, 0]) + self._margin
min_y = np.min(landmarks[:, 1]) - self._margin
max_y = np.max(landmarks[:, 1]) + self._margin
cropped = frame[min_y:max_y, min_x:max_x]
height, width = cropped.shape[:2]
return cropped, (min_x, min_y, width, height)
def _get_blink_ratio(self, landmarks):
left = landmarks[0]
right = landmarks[3]
top = np.mean(landmarks[1:3], axis=0)
bottom = np.mean(landmarks[4:6], axis=0)
eye_width = math.hypot((left[0] - right[0]), (left[1] - right[1]))
eye_height = math.hypot((top[0] - bottom[0]), (top[1] - bottom[1]))
try:
ratio = eye_width / eye_height
except ZeroDivisionError:
ratio = None
return ratio
# MAGIC: Get pupils via Fabian Timm gradient localization
# See https://www.inb.uni-luebeck.de/fileadmin/files/PUBPDFS/TiBa11b.pdf
# and https://thume.ca/projects/2012/11/04/simple-accurate-eye-center-tracking-in-opencv/
#
# Mostly copied from from https://github.com/Kryolyz/Pupil_Tracker/blob/master/Pupil_Tracker.py
@staticmethod
@jit(nopython=True, cache=True, fastmath=True)
def _gradx(pic, grad, mean, std):
if len(grad[:, :]) > 0:
for y in range(len(grad[:, 0])):
for x in range(len(grad[0, :]) - 2):
grad[y, x+1] = (float(pic[y, x+2]) - float(pic[y, x])) / 2.0
mean = np.mean(grad)
std = np.std(grad)
for y in range(len(grad[:, 0])):
for x in range(len(grad[0, :])):
if grad[y, x] < 0.3 * mean + std and grad[y, x] > - 0.3 * mean - std:
grad[y, x] = 0
if grad[y, x] > 0:
grad[y, x] = 1
elif grad[y, x] < 0:
grad[y, x] = -1
return grad
@staticmethod
@jit(nopython=True, cache=True, fastmath=True)
def _evaluate(x, y, gradix, gradiy, func):
if len(gradix[:, :]) > 0:
for cy in range(len(gradix[:, 0])):
for cx in range(len(gradix[0, :])):
if y != cy and x != cx:
dy = float(cy - y)
dx = float(cx - x)
norm = np.linalg.norm(np.array([dx, dy]))
dy = dy/norm
dx = dx/norm
func[cy, cx] = gradiy[cy, cx] * dy + gradix[cy, cx] * dx
if func[cy, cx] < 0:
func[cy, cx] = 0
return np.mean(func)
def _find_pupils(self, frame):
mean = 0
std = 0
gradix = np.zeros_like(frame, dtype=float)
self._gradx(frame, gradix, mean, std)
gradiy = np.zeros_like(np.transpose(frame), dtype=float)
self._gradx(np.transpose(frame), gradiy, mean, std)
gradiy = np.transpose(gradiy)
func = np.zeros_like(frame, dtype=float)
means = np.zeros_like(frame, dtype=float)
y = int(self.height / 2)
x = int(self.width / 2)
iterations = 0
while iterations < self._max_iterations:
ymin = max(y-self._ascent_distance, 0)
ymax = min(y+self._ascent_distance, self.height)
xmin = max(x-self._ascent_distance, 0)
xmax = min(x+self._ascent_distance, self.width)
should_continue = 0
for i in np.arange(ymin, ymax):
for j in np.arange(xmin, xmax):
if means[i, j] < 10:
means[i, j] = (255 - frame[i, j]) * self._evaluate(j, i, gradix, gradiy, func)
if means[i, j] > means[y, x]:
should_continue = 1
y = i
x = j
iterations += 1
if should_continue == 0:
break
return x, y
class GazeTracker:
def __init__(self, margin):
self.margin = margin
def get_gaze(self, frame, landmarks):
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
eye_left = Eye(frame, landmarks, self.margin, Eye.SIDE.LEFT)
eye_right = Eye(frame, landmarks, self.margin, Eye.SIDE.RIGHT)
# left_debug = left_eye.frame.copy()
# left_x, left_y = left_eye.pupil_pos
# if left_x and left_y:
# color = (255, 0, 0)
# cv2.line(left_debug, (left_x - 5, left_y), (left_x + 5, left_y), color)
# cv2.line(left_debug, (left_x, left_y - 5), (left_x, left_y + 5), color)
# cv2.imshow('eye test l', left_debug)
#
# right_debug = right_eye.frame.copy()
# right_x, right_y = right_eye.pupil_pos
# if right_x and right_y:
# color = (255, 0, 0)
# cv2.line(right_debug, (right_x - 5, right_y), (right_x + 5, right_y), color)
# cv2.line(right_debug, (right_x, right_y - 5), (right_x, right_y + 5), color)
# cv2.imshow('eye test r', right_debug)
#
# try:
# left_horz_ratio = eye_left.pupil[0] / (eye_left.center[0] * 2 - (self.margin//2))
# left_vert_ratio = eye_left.pupil[1] / (eye_left.center[1] * 2 - (self.margin//2))
# right_horz_ratio = eye_right.pupil[0] / (eye_right.center[0] * 2 - (self.margin//2))
# right_vert_ratio = eye_right.pupil[1] / (eye_right.center[1] * 2 - (self.margin//2))
# except TypeError:
# return None
#
# horz_average = (left_horz_ratio + right_horz_ratio) / 2
# vert_average = (left_vert_ratio + right_vert_ratio) / 2
# return (eye_left.origin[0] + eye_left.pupil[0], eye_left.origin[1] + eye_left.pupil[1], eye_left.blinking), \
# (eye_right.origin[0] + eye_right.pupil[0], eye_right.origin[1] + eye_right.pupil[1], eye_right.blinking), \
# horz_average, vert_average
return eye_left.blinking, eye_right.blinking
class FaceDetector(Thread):
def __init__(self, frame_queue, track_queue):
super().__init__()
self.alive = True
self._frame_queue = frame_queue
self._track_queue = track_queue
self._face_aligner = face_alignment.FaceAlignment(face_alignment.LandmarksType._2D, device='cpu')
self._face_detector = face_alignment.detection.blazeface.FaceDetector(device='cpu')
def find_face(self, frame):
detections = self._face_detector.detect_from_image(frame)
if (len(detections) == 0):
return None
else:
return (detections[0]).astype(int)
def get_landmarks(self, frame, face_box):
landmarks = self._face_aligner.get_landmarks_from_image(frame, detected_faces=[face_box])
if len(landmarks) == 0:
return None
else:
return landmarks[0]
def run(self):
while self.alive:
newest_frame = self._frame_queue.get()
face_box = self.find_face(newest_frame)
if face_box is None:
continue
# cropped = newest_frame.copy()[face_box[1]:face_box[3], face_box[0]:face_box[2]]
# print(face_box)
# print(newest_frame.shape)
# print(cropped.shape)
# cv2.imshow('crop', cropped)
# cv2.imshow('old', newest_frame)
# ch = cv2.waitKey(1)
# if ch == 27:
# break
landmarks = self.get_landmarks(newest_frame, face_box)
if landmarks is None:
continue
self._track_queue.put((landmarks, newest_frame))
self._track_queue.join()
class Interpolator(Thread):
def __init__(self, frame_queue, track_queue, output_queue):
super().__init__()
self._frame_queue = frame_queue
self._track_queue = track_queue
self._output_queue = output_queue
# Lucas-Kanade sparse optical flow paramaters
# TODO: learn what these should do and make them easier to configure
self._lk_params = dict( winSize = (8,8),
maxLevel = 2,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
self._lk_error_thresh = 10
def run(self):
landmarks_from = None
landmarks_from_frame = None
frames_to_interpolate = None
while True:
# Get landmarked frame from the ML thread
landmarks_to, landmarks_to_frame = self._track_queue.get()
# Empty the frame queue
new_frames = list()
while not self._frame_queue.empty():
try:
new_frame_raw = self._frame_queue.get_nowait()
new_frames.append(new_frame_raw)
except EmptyException:
break
# Let the ML thread know its time to analyze a new frame
self._track_queue.task_done()
if landmarks_from is not None:
self._output_queue.put((landmarks_from_frame, landmarks_from, False))
# Interpolate frames from last loop
for frame, interpolated_landmarks in self.interpolate(landmarks_from, landmarks_from_frame, landmarks_to, frames_to_interpolate):
try:
self._output_queue.put_nowait((frame, interpolated_landmarks, True))
except FullException:
break
# Save the current landmarks for use in the next loop
landmarks_from = landmarks_to
landmarks_from_frame = landmarks_to_frame
frames_to_interpolate = new_frames
# Interpolate frames using a mixture of methods
def interpolate(self, landmarks_from, landmarks_from_frame, landmarks_to, frames):
buffer_length = len(frames)
lk_interp = self._interpolate_lk(landmarks_from, landmarks_from_frame, frames)
lin_interp = self._interpolate_lin(landmarks_from, landmarks_to, buffer_length)
for frame_index, (frame, lk_result, lin_landmarks) in enumerate(zip(frames, lk_interp, lin_interp)):
# Use lk landmarks as the base, fall back on linear interpolation when tracking failed
lk_landmarks, lk_statuses, lk_errors = lk_result
lk_criteria_mask = np.logical_or(lk_statuses == 0, lk_errors > self._lk_error_thresh)
lk_criteria_mask = np.repeat(lk_criteria_mask, 2, axis=1)
np.putmask(lk_landmarks, lk_criteria_mask, lin_landmarks)
# Mix the linear interpolation results in towards the end of the buffer, to prevent jitter
combined = np.stack((lk_landmarks, lin_landmarks), axis=-1)
mix_factor = (frame_index / buffer_length) ** 2
weights = np.full(combined.shape, (1-mix_factor, mix_factor))
mixed_landmarks = np.average(combined, axis=-1, weights=weights)
yield frame, mixed_landmarks
# Interpolates linearly
def _interpolate_lin(self, landmarks_from, landmarks_to, points):
return np.linspace(landmarks_from, landmarks_to, points)[:-1]
# Interpolates using Lucas-Kanade sparse optical flow
def _interpolate_lk(self, landmarks, landmarks_frame, frames):
old_frame = cv2.cvtColor(landmarks_frame, cv2.COLOR_BGR2GRAY)
old_points = landmarks
for frame in frames:
new_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
new_points, statuses, errors = cv2.calcOpticalFlowPyrLK(old_frame, new_frame, old_points, None, **self._lk_params)
yield new_points, statuses, errors
old_points = new_points
old_frame = new_frame
class FaceTracker3000:
class CODEC(Enum):
# what the fuck are these constants
MJPEG = 1196444237.0
def __init__(self, cam_index, codec, fps, width, height, bufferlen, remote_addr):
self.frame_queue = Queue(maxsize=bufferlen)
self.track_queue = Queue()
self.output_queue = Queue(maxsize=bufferlen)
self.remote_addr = remote_addr
# Setup
self._cap = cv2.VideoCapture(cam_index, cv2.CAP_V4L)
self._cap.set(cv2.CAP_PROP_FOURCC, codec.value)
self._cap.set(cv2.CAP_PROP_FPS, fps)
self._cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
self._cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
self._cap.set(cv2.CAP_PROP_SHARPNESS, 0)
self._cap.set(cv2.CAP_PROP_GAMMA, 106)
if remote_addr:
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._pose_estimator = PoseEstimator(img_size=(height, width))
self._gaze_estimator = GazeTracker(8)
# ml thread
self._face_detector_thread = FaceDetector(self.frame_queue, self.track_queue)
self._face_detector_thread.daemon = True
# interpolator thread
self._interpolator_thread = Interpolator(self.frame_queue, self.track_queue, self.output_queue)
def run(self):
# Initalize some variables
mar = 0
mdst = 0
volume = 0
look_horiz, look_vert = (0.5,) * 2
left_blink, right_blink = (1,) * 2
left_x, left_y, right_x, right_y = (None,) * 4
# Start our other threads (will read from our queues)
self._face_detector_thread.start()
self._interpolator_thread.start()
if self.remote_addr:
self._socket.connect(self.remote_addr)
try:
while True:
# get frame
success, current_frame = self._cap.read()
current_frame = cv2.flip(current_frame, 2)
# Place this frame in the queue if we have room, otherwise drop it
try:
self.frame_queue.put_nowait(current_frame)
except FullException:
pass
# Display the frames we have ready to go
if not self.output_queue.empty():
frame, landmarks, interpolated = self.output_queue.get_nowait()
if landmarks is not None:
rotation_vector, translation_vector = self._pose_estimator.solve_pose_by_68_points(landmarks)
yaw = -np.degrees(rotation_vector[0])
pitch = -np.degrees(rotation_vector[1])
roll = -(180+np.degrees(rotation_vector[2]))
track_results = self._gaze_estimator.get_gaze(frame, landmarks)
if track_results is not None:
# (left_x, left_y, left_blink), (right_x, right_y, right_blink), look_horiz, look_vert = track_results
left_blink, right_blink = track_results
mar = self.mouth_aspect_ratio(landmarks)
# mdst = self.mouth_distance(landmarks, facebox)
msg = '%.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f' % \
(roll, pitch, yaw, look_horiz, look_vert, left_blink, right_blink, mar, mdst)
if self.remote_addr:
self._socket.send(bytes(msg, "utf-8"))
# mark_color = (255, 255, 255) if interpolated else (0, 255, 0)
# for mark in landmarks:
# cv2.circle(frame, (int(mark[0]), int(mark[1])), 1, mark_color, -1, cv2.LINE_AA)
#
# if left_x and left_y and right_x and right_y:
# color = (255, 0, 0)
# cv2.line(frame, (left_x - 5, left_y), (left_x + 5, left_y), color)
# cv2.line(frame, (left_x, left_y - 5), (left_x, left_y + 5), color)
# cv2.line(frame, (right_x - 5, right_y), (right_x + 5, right_y), color)
# cv2.line(frame, (right_x, right_y - 5), (right_x, right_y + 5), color)
#
# self._pose_estimator.draw_annotation_box(frame, rotation_vector, translation_vector)
#
# cv2.imshow('tracking debug', frame)
# ch = cv2.waitKey(1)
# if ch == 27:
# break
# TODO: Doesnt exit cleanly, getting an error message / python is having to kill the daemon thread
except Exception as e:
self._face_detector_thread.alive = False
try:
self.track_queue.task_done()
except ValueError:
pass
self._face_detector_thread.join(1)
raise e
# Cleanup our thread / cv2 windows
finally:
cv2.destroyAllWindows()
@staticmethod
def mouth_distance(landmarks, facebox):
mouth = landmarks[60:68]
return np.linalg.norm(mouth[0]-mouth[4]) / (facebox[2]-facebox[0])
@staticmethod
def mouth_aspect_ratio(landmarks):
mouth = landmarks[60:68]
mar = np.linalg.norm(mouth[1]-mouth[7]) + np.linalg.norm(mouth[2]-mouth[6]) + np.linalg.norm(mouth[3]-mouth[5])
mar /= (2 * np.linalg.norm(mouth[0]-mouth[4]) + 1e-6)
return mar
for pipeline in pipelines:
pipeline.terminate()
pipeline.join(3)
if pipeline.exitcode is None:
pipeline.kill()
if __name__ == '__main__':
# TODO: should come from a configuration file
# ft = FaceTracker3000(0, 60, 640, 480, 30, ('127.0.0.1', 5066))
ft = FaceTracker3000(0, FaceTracker3000.CODEC.MJPEG, 30, 1920, 1080, 30, ('127.0.0.1', 5066))
ft.run()
main()

0
ovtk_track/__init__.py Normal file
View File

41
ovtk_track/clock.py Normal file
View File

@ -0,0 +1,41 @@
import logging
import time
from multiprocessing import Process
import threading
logger = logging.getLogger(__name__)
class Clock(Process):
def __init__(self, clock_pulse, rate, buffer_start, buffer_length, percision=0.0005):
super().__init__()
self.daemon = True
self._clock_pulse = clock_pulse
self._buffer_start = buffer_start
self._buffer_length = buffer_length
self._rate = rate
self._percision = percision
self._target = rate - (percision / 2)
def run(self):
i = 0
start = 0
try:
while True:
while time.perf_counter() - start < self._target:
time.sleep(self._percision)
start = time.perf_counter()
self._clock_pulse.wait()
if i % self._buffer_length == 0:
with self._buffer_start:
self._buffer_start.notify_all()
i += 1
except threading.BrokenBarrierError:
with self._buffer_start:
self._buffer_start.notify_all()
return 0

16
ovtk_track/enums.py Normal file
View File

@ -0,0 +1,16 @@
from enum import Flag, auto
class DataTypes(Flag):
RGB = auto() # RGB frame
POINTS_2D = auto() # Array of 2d points
POINTS_3D = auto() # Array of 3d points
VECTOR = auto() # A 3d vector
MEASUREMENT = auto() # A number between 0 and 1
DataTypes.ANY = DataTypes.RGB \
| DataTypes.POINTS_2D \
| DataTypes.POINTS_3D \
| DataTypes.VECTOR \
| DataTypes.MEASUREMENT

View File

@ -0,0 +1,33 @@
from multiprocessing import Manager
from queue import Empty as QueueEmptyException
from threading import BrokenBarrierError
from abc import ABC, abstractmethod
from ovtk_track.pipelinecomponent import PipelineComponent
class OutputProcess(PipelineComponent, ABC):
def __init__(self, *args, allow_empty_queue=False):
super().__init__(*args)
self.allow_empty_queue = allow_empty_queue
self._inputs = {key: Manager().Queue(self._buffer_length * 2) for key in self.__class__.INPUTS.keys()}
@property
def inputs(self):
return self._inputs
def loop(self):
while True:
try:
self._clock_pulse.wait()
self.send()
except QueueEmptyException:
self.logger.warn('Buffer underrun!')
except BrokenBarrierError:
return 0
@abstractmethod
def send(self):
pass

View File

@ -0,0 +1,38 @@
import cv2
from . import OutputProcess
from ovtk_track import types
class Process(OutputProcess):
INPUTS = dict(frame=types.Image,
landmarks=types.Landmarks,
audio=types.AudioFrame,
skeleton=types.Skeleton)
def __init__(self, *args):
super().__init__(*args)
def setup(self):
pass
def send(self):
landmarks = self._inputs['landmarks'].get_nowait()
image = self._inputs['frame'].get_nowait()
skeleton = self._inputs['skeleton'].get_nowait()
audio = self._inputs['audio'].get_nowait()
if image is None:
return
frame = cv2.cvtColor(image.pixels, cv2.COLOR_RGB2BGR)
if landmarks is not None:
landmarks.draw(image, frame, label=False, color=(130, 130, 130))
if skeleton is not None:
skeleton.draw(image, frame)
cv2.imshow("face", frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
raise KeyboardInterrupt('User requested stop')

112
ovtk_track/output/ovtk.py Normal file
View File

@ -0,0 +1,112 @@
from multiprocessing import Process, Pipe
import asyncio
import json
import websockets
import numpy as np
from . import OutputProcess
from ovtk_track import types
from ovtk_track.types.Type import Type
class WebsocketServerProcess(Process):
def __init__(self, bind, port):
super().__init__()
self.daemon = True
self._bind = bind
self._port = port
self._pipe, self._caller_pipe = Pipe()
self.clients = set()
@property
def pipe(self):
return self._caller_pipe
# Data input
async def handle_websocket(self, ws, path):
self.clients.add(ws)
try:
async for message in ws:
data = json.loads(message)
self._pipe.send(data)
except websockets.exceptions.ConnectionClosedError:
pass
except asyncio.CancelledError:
ws.close()
finally:
self.clients.remove(ws)
# Data output
async def handle_pipe(self, pipe_ready):
while True:
# Let other co-routines process until file descriptor is readable
await pipe_ready.wait()
pipe_ready.clear()
# Check if messages exist on the pipe before attempting to recv
# to avoid accidentally blocking the event loop when file
# descriptor does stuff we don't expect
if not self._pipe.poll():
continue
data = self._pipe.recv()
# Send to registered clients
if self.clients:
await asyncio.wait([client.send(data) for client in self.clients])
def run(self):
# Setup asyncio websocket server
start_server = websockets.serve(self.handle_websocket, self._bind, self._port)
asyncio.get_event_loop().run_until_complete(start_server)
# Make an awaitable object that flips when the pipe's underlying file descriptor is readable
pipe_ready = asyncio.Event()
asyncio.get_event_loop().add_reader(self._pipe.fileno(), pipe_ready.set)
# Make and start our infinite pipe listener task
asyncio.get_event_loop().create_task(self.handle_pipe(pipe_ready))
# Keep the asyncio code running in this thread until explicitly stopped
asyncio.get_event_loop().run_forever()
def serialze(o):
if isinstance(o, Type):
return o.serialize()
elif isinstance(o, np.floating):
return float(o)
elif isinstance(o, np.integer):
return int(o)
elif isinstance(o, np.ndarray):
return o.tolist()
else:
raise NotImplementedError(f"Could not serialize data of type {o.__class__}")
class Process(OutputProcess):
INPUTS = dict(skel=types.Skeleton,
audio=types.AudioFrame)
def __init__(self, *args, bind='localhost', port=5066):
super().__init__(*args)
self.daemon = False
self._bind = bind
self._port = port
def setup(self):
self.ws_server = WebsocketServerProcess(self._bind, self._port)
self.ws_server.start()
def send(self):
skel = self._inputs['skel'].get_nowait()
audio = self._inputs['audio'].get_nowait()
if audio is None or skel is None:
return
serialized = json.dumps({
'skel': skel,
'volume': audio.volume,
}, default=serialze)
self.ws_server.pipe.send(serialized)

38
ovtk_track/output/ovtp.py Normal file
View File

@ -0,0 +1,38 @@
import socket
from . import OutputProcess
from ovtk_track import types
class Process(OutputProcess):
INPUTS = dict(headlook=types.Quaternion,
audio=types.AudioFrame)
def __init__(self, *args, remote='127.0.0.1:5066'):
super().__init__(*args)
ip, port = remote.split(':')
self.remote_addr = (ip, int(port))
def setup(self):
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.connect(self.remote_addr)
def send(self):
headlook = self._inputs['headlook'].get_nowait()
audio = self._inputs['audio'].get_nowait()
if audio is None or headlook is None:
return
pitch, yaw, roll = headlook.as_euler()
volume = audio.volume
look_horiz, look_vert = (0.5,) * 2
left_blink, right_blink, mar, mdst = (0,) * 4
msg = '%.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f' % \
(0, pitch, yaw, look_horiz, look_vert, left_blink, right_blink, volume, mdst)
self._socket.send(bytes(msg, "utf-8"))

262
ovtk_track/pipeline.py Normal file
View File

@ -0,0 +1,262 @@
import logging
import time
from queue import Empty as QueueEmptyException, Full as QueueFullException
from multiprocessing import Process, Manager
from multiprocessing.connection import wait
from threading import BrokenBarrierError
from importlib import import_module
from itertools import chain
from traceback import format_exception
from ovtk_track.clock import Clock
from ovtk_track.transform import TransformProcess
from ovtk_track.provider import ProviderProcess
from ovtk_track.output import OutputProcess
class Pipeline(Process):
def __init__(self, name=None, buffer_length=None, sample_rate=None, providers=[], transforms=[], outputs=[]):
super().__init__()
self.logger = logging.getLogger(f'{__name__}.{name}')
self.buffer_length = buffer_length
self.sample_rate = 1/sample_rate
self.transform_buffer = {}
self.output_buffer = {}
self.transform_map = {}
self.output_map = {}
self.masks = {}
self.modules = {}
for config, type in chain(((provider, 'provider') for provider in providers),
((transform, 'transform') for transform in transforms),
((output, 'output') for output in outputs)):
mod_name = config['name']
# Load module
module = import_module(f"ovtk_track.{type}.{config['module']}").Process
self.modules[mod_name] = (module, config)
# Note dependencies
inputs = config.get('inputs')
if type == 'transform':
for input_name, from_full in inputs.items():
if self.transform_map.get(from_full) is None:
self.transform_map[from_full] = []
self.transform_map[from_full].append(f'{mod_name}:{input_name}')
elif type == 'output':
for input_name, from_full in inputs.items():
self.output_buffer[from_full] = []
if self.output_map.get(from_full) is None:
self.output_map[from_full] = []
self.output_map[from_full].append(f'{mod_name}:{input_name}')
# Resolve the mask if defined
if mask := config.get('mask'):
frame_indexes = list(range(self.buffer_length))
mask = [frame_indexes[frame] for frame in mask]
# HACK: Being too lazy to fix where they're fetched by module name alone
self.masks[mod_name] = mask
def run(self):
try:
self.setup()
buffer_ttl = time.perf_counter() + (self.buffer_length * self.sample_rate)
while True:
self.logger.info('-------------- fetch --------------')
self.logger.info(f'Next buffer in: {buffer_ttl - time.perf_counter()}')
self.fetch()
self.logger.info('------------ transform ------------')
self.logger.info(f'Next buffer in: {buffer_ttl - time.perf_counter()}')
self.transform()
self.logger.info('-------------- sync ---------------')
self.logger.info(f'Deadline in: {buffer_ttl - time.perf_counter()}')
if (buffer_ttl - time.perf_counter() > 0):
with self.buffer_start:
self.buffer_start.wait()
self.logger.info('-------------- send ---------------')
buffer_ttl = time.perf_counter() + (self.buffer_length * self.sample_rate)
self.send()
for buffer in self._all_buffers:
buffer.clear()
except BrokenBarrierError:
return 0
except TimeoutError as e:
self.logger.error(f'Could not meet deadline during {e} - consider increasing your buffer length or decreasing sample rate')
return -3
except RuntimeError as e:
self.logger.error(f'Encounterd a fatal error during processing: {e}')
return -2
except Exception as e:
self.logger.error('Uncaught exception! This is likely a coding error')
for step in format_exception(None, e, e.__traceback__):
for line in step.splitlines():
self.logger.error(line)
return -1
finally:
for process in self.processes.values():
process.terminate()
def setup(self):
clock_users = sum(1 for proc, config in self.modules.values() if issubclass(proc, (OutputProcess, ProviderProcess))) + 1
self.clock_pulse = Manager().Barrier(clock_users)
self.buffer_start = Manager().Condition()
self.transform_pipes = []
self.transform_queues = {}
self.bufferwise_inputs = set()
self.pipe_map = {}
self.provider_queues = {}
self.provider_shapes = {}
self.output_queues = {}
self.processes = {}
for mod_name, (module, config) in self.modules.items():
self.logger.debug(f'Setting up {mod_name}')
# Setup process
ready_barrier = Manager().Barrier(2)
filtered_config = {key: value for key, value in config.items() if key not in ['module', 'name', 'inputs', 'mask']}
args = [mod_name, ready_barrier, self.clock_pulse, self.buffer_length, self.sample_rate]
process = module(*args, **filtered_config)
self.processes[mod_name] = process
# Make maps to get to the important stuff easier
if isinstance(process, TransformProcess):
for output_field_name, pipe in process.outputs.items():
full_name = f'{mod_name}:{output_field_name}'
self.transform_pipes.append(pipe)
self.pipe_map[pipe.fileno()] = full_name
if mask := self.masks.get(mod_name):
self.masks[full_name] = mask
for input_field_name, queue in process.inputs.items():
full_name = f'{mod_name}:{input_field_name}'
self.transform_queues[full_name] = queue
if mask := self.masks.get(mod_name):
self.masks[full_name] = mask
if process.bufferwise:
self.bufferwise_inputs.add(full_name)
self.transform_buffer[full_name] = []
elif isinstance(process, ProviderProcess):
for field_name, queue in process.outputs.items():
full_name = f'{mod_name}:{field_name}'
self.provider_queues[full_name] = queue
elif isinstance(process, OutputProcess):
for field_name, queue in process.inputs.items():
full_name = f'{mod_name}:{field_name}'
self.output_queues[full_name] = queue
else:
raise ValueError(f'Module is not an instance of any base process class: {mod_name}')
# Do some checks
if process.bufferwise and self.masks.get(mod_name):
raise ValueError(f"Cannot mask {config['module']} module at \"{mod_name}\" - module requests whole buffers")
if isinstance(process, OutputProcess) and process.allow_empty_queue and len(process.inputs) > 1:
raise ValueError(f"Cannot bypass queue check for {config['module']} module at \"{mod_name}\" - multiple inputs may become desycned otherwise")
# Start the process
process.start()
# Wait for it to become ready
ready_barrier.wait()
# Precompute some data for runtime efficincy
self._all_buffers = list(chain(self.transform_buffer.values(), self.output_buffer.values()))
self._map_keys = list(chain(self.transform_map.keys(), self.output_map.keys()))
self._fetch_loop_time_factor = len(self.provider_queues.keys()) / self.buffer_length
# Start the sync process
self.clock_process = Clock(self.clock_pulse, self.sample_rate, self.buffer_start, self.buffer_length)
self.clock_process.start()
self.watchables = self.transform_pipes + [self.clock_process.sentinel]
def fetch(self):
for name_full, queue in self.provider_queues.items():
# Efficiently read frames from provider queues
try:
frames = [None] * self.buffer_length
for index in range(0, self.buffer_length):
frames[index] = queue.get()
except QueueEmptyException as e:
raise RuntimeError(f'{name_full} queue was short - excepted {self.buffer_length}, got {index}') from e
# Push data to transforms
if wanted_by_transforms := self.transform_map.get(name_full):
for to_full in wanted_by_transforms:
try:
if to_full in self.bufferwise_inputs:
self.transform_queues[to_full].put_nowait(frames)
else:
if mask := self.masks.get(to_full):
for frame in (frames[index] for index in mask):
self.transform_queues[to_full].put_nowait(frame)
else:
for frame in frames:
self.transform_queues[to_full].put_nowait(frame)
except QueueFullException:
self.logger.warn(f'{to_full} queue is full - module is running behind!')
# Push data directly to output buffer
if wanted_by_outputs := self.output_map.get(name_full):
for to_full in wanted_by_outputs:
self.output_buffer[name_full] = frames
def transform(self):
frame_count = {key: 0 for key in self.pipe_map.values()}
while any(len(buffer) < self.buffer_length for buffer in self.output_buffer.values()):
for ready in wait(self.watchables):
if ready == self.clock_process.sentinel:
raise BrokenBarrierError()
frame = ready.recv()
from_full = self.pipe_map[ready.fileno()]
self.logger.debug(f'Got data from {from_full}')
fill_frames = []
if mask := self.masks.get(from_full):
gen = (index for index in mask if index >= frame_count[from_full])
try:
start = next(gen)
try:
end = next(gen)
except StopIteration:
end = self.buffer_length
fill_frames = [None for _ in range(start + 1, end)]
except StopIteration:
pass
frames = [frame] + fill_frames
if (wanted_by := self.transform_map.get(from_full)) is not None:
for to_full in wanted_by:
if to_full in self.bufferwise_inputs:
buffer = self.transform_buffer[to_full]
buffer.extend(frames)
if len(buffer) != self.buffer_length:
continue
self.transform_queues[to_full].put_nowait(buffer)
else:
if mask := self.masks.get(to_full):
if frame_count[from_full] not in mask:
continue
for frame in frames:
self.transform_queues[to_full].put_nowait(frame)
if (buffer := self.output_buffer.get(from_full)) is not None:
buffer.extend(frames)
frame_count[from_full] += len(frames)
def send(self):
for from_full, wanted_by in self.output_map.items():
for output_name in wanted_by:
try:
for frame in self.output_buffer[from_full]:
self.output_queues[output_name].put_nowait(frame)
except QueueFullException:
self.logger.warn(f'{output_name} queue is full - module is running behind!')

View File

@ -0,0 +1,53 @@
import logging
from multiprocessing import Process
from abc import ABC, abstractmethod
from traceback import format_exception
class PipelineComponent(Process, ABC):
def __init__(self, name, ready_barrier, clock_pulse, buffer_length, sample_rate):
super().__init__()
self.daemon = True
self.bufferwise = False
self._clock_pulse = clock_pulse
self._ready = ready_barrier
self._sample_rate = sample_rate
self._buffer_length = buffer_length
self.name = name
self.logger = logging.getLogger(f'{__name__}.{name}')
def _log_error(self, e):
for step in format_exception(None, e, e.__traceback__):
for line in step.splitlines():
self.logger.error(line)
def run(self):
try:
self.setup()
except Exception as e:
self.logger.error('Module failed to setup at runtime')
self._log_error(e)
self._ready.abort()
return -1
self._ready.wait()
try:
self.loop()
except EOFError:
self.logger.info('Caught EOF at pipe end - assuming parent process is shutting down')
return 0
except Exception as e:
self.logger.error('Unhandled error')
self._log_error(e)
self._clock_pulse.abort()
return -1
@abstractmethod
def setup(self):
pass
@abstractmethod
def loop(self):
pass

View File

@ -0,0 +1,28 @@
from multiprocessing import Manager
from threading import BrokenBarrierError
from abc import ABC, abstractmethod
from ovtk_track.pipelinecomponent import PipelineComponent
class ProviderProcess(PipelineComponent, ABC):
def __init__(self, *args):
super().__init__(*args)
self._outputs = {key: Manager().Queue(self._buffer_length * 2) for key in self.__class__.OUTPUTS.keys()}
@property
def outputs(self):
return self._outputs
def loop(self):
try:
while True:
self._clock_pulse.wait()
self.sample()
except BrokenBarrierError:
return 0
@abstractmethod
def sample(self):
pass

View File

@ -0,0 +1,59 @@
import pyaudio
import numpy as np
from . import ProviderProcess
from ovtk_track import types
class Process(ProviderProcess):
OUTPUTS = dict(audio=types.AudioFrame)
def __init__(self, *args, channels=1, rate=48000, device=None):
super().__init__(*args)
self.device = device
self.channels = channels
self.rate = rate
self.buffer_size = int(self.rate * self._sample_rate)
def setup(self):
pa = pyaudio.PyAudio()
input_index = self.select_device(pa)
self.cap = pa.open(input=True,
input_device_index=input_index,
format=pyaudio.paInt16,
channels=self.channels,
rate=self.rate,
frames_per_buffer=self.buffer_size)
def select_device(self, pa):
if self.device is not None:
if ':' in self.device:
host_api_name, input_name = self.device.split(':', 1)
else:
host_api_name = self.device
input_name = None
for i in range(pa.get_host_api_count()):
host_api_info = pa.get_host_api_info_by_index(i)
if host_api_info['name'] == host_api_name:
if input_name is None:
return host_api_info['defaultInputDevice']
else:
for j in range(host_api_info['deviceCount']):
device_info = pa.get_device_info_by_host_api_device_index(i, j)
if device_info['name'] == input_name:
return device_info['index']
raise ValueError(f'Could not find requested output device: {input_name}')
else:
raise ValueError(f'Could not find requested audio API: {host_api_name}')
else:
return None
def sample(self):
audio_raw = np.fromstring(self.cap.read(self.buffer_size), dtype=np.int16)
self.cap.read(self.cap.get_read_available(), exception_on_overflow=False)
self._outputs['audio'].put(types.AudioFrame(audio_raw))

View File

@ -0,0 +1,16 @@
from . import ProviderProcess
from ovtk_track.enums import DataTypes
class Process(ProviderProcess):
OUTPUTS = dict(index=DataTypes.RGB)
def __init__(self, *args):
super().__init__(*args)
def setup(self):
self.i = 0
def sample(self):
self._outputs['index'].put(self.i)
self.i += 1

View File

@ -0,0 +1,30 @@
import cv2
from . import ProviderProcess
from ovtk_track import types
class Process(ProviderProcess):
OUTPUTS = dict(frame=types.Image)
def __init__(self, *args, path=None):
super().__init__(*args)
if path is None:
raise ValueError('Missing required arg: path')
self.path = path
def setup(self):
self._cap = cv2.VideoCapture(self.path)
# Run a blank through to ensure capture works (and to ensure any lazy-loading is done)
success, frame = self._cap.read()
def sample(self):
success, frame = self._cap.read()
frame = cv2.cvtColor(cv2.flip(frame, 2), cv2.COLOR_BGR2RGB)
root_origin = types.Point3d(0, 0, 0)
image = types.Image(frame, 1, root_origin, None)
self._outputs['frame'].put(image)

View File

@ -0,0 +1,62 @@
import cv2
import logging
from enum import Enum
from . import ProviderProcess
from ovtk_track import types
logger = logging.getLogger(__name__)
# TODO: Calibration
class Process(ProviderProcess):
OUTPUTS = dict(frame=types.Image)
class CODEC(Enum):
# what the fuck are these constants
MJPEG = 1196444237.0
def __init__(self, *args, index=None, fps=None, width=None, height=None,
codec='MJPEG', sharpness=None, gamma=None):
super().__init__(*args)
if index is None:
raise ValueError('Missing required arg: index')
self.index = index
self.fps = fps
self.width = width
self.height = height
self.codec = self.__class__.CODEC[codec]
self.sharpness = sharpness
self.gamma = gamma
def setup(self):
self._cap = cv2.VideoCapture(self.index)
self._cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)
self._cap.set(cv2.CAP_PROP_FOURCC, self.codec.value)
if self.fps:
self._cap.set(cv2.CAP_PROP_FPS, self.fps)
if self.width:
self._cap.set(cv2.CAP_PROP_FRAME_WIDTH, self.width)
if self.height:
self._cap.set(cv2.CAP_PROP_FRAME_HEIGHT, self.height)
if self.gamma:
self._cap.set(cv2.CAP_PROP_GAMMA, self.gamma)
if self.sharpness:
self._cap.set(cv2.CAP_PROP_SHARPNESS, self.sharpness)
# Run a frame through to ensure any lazy-loading is done
success, frame = self._cap.read()
self.width = frame.shape[1]
self.height = frame.shape[0]
self._root_origin = types.Point3d(self.width / 2, self.height / 2, 34)
def sample(self):
success, frame = self._cap.read()
frame = cv2.cvtColor(cv2.flip(frame, 2), cv2.COLOR_BGR2RGB)
image = types.Image(frame, 1, self._root_origin, None)
self._outputs['frame'].put(image)

View File

@ -0,0 +1,34 @@
from multiprocessing import Pipe, Manager
from abc import ABC, abstractmethod
from ovtk_track.pipelinecomponent import PipelineComponent
class TransformProcess(PipelineComponent, ABC):
def __init__(self, *args, bufferwise=False):
super().__init__(*args)
self.bufferwise = bufferwise
self._inputs = {key: Manager().Queue(1 if bufferwise else self._buffer_length) for key in self.__class__.INPUTS.keys()}
self.outputs = {}
self._outputs = {}
for key in self.__class__.OUTPUTS.keys():
self.outputs[key], self._outputs[key] = Pipe(False)
@property
def inputs(self):
return self._inputs
def loop(self):
try:
while True:
self.process()
except Exception as e:
raise e
finally:
for pipe in self._outputs.values():
pipe.close()
@abstractmethod
def process(self):
pass

View File

@ -0,0 +1,131 @@
import cv2
import numpy as np
from numba import jit
from numba.typed import List
from .. import TransformProcess
from ovtk_track import types
# Interpolates linearly
def interpolate_lin(landmarks_from, landmarks_to, points):
return np.linspace(landmarks_from, landmarks_to, points)[:-1]
# Interpolates using Lucas-Kanade sparse optical flow
def interpolate_lk(landmarks, landmarks_frame, frames, lk_params):
old_frame = cv2.cvtColor(landmarks_frame, cv2.COLOR_RGB2GRAY)
old_points = landmarks
for frame in frames:
new_frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
new_points, statuses, errors = cv2.calcOpticalFlowPyrLK(old_frame, new_frame, old_points, None, **lk_params)
yield new_points, statuses, errors
old_points = new_points
old_frame = new_frame
# Combine interp methods
def interpolate(landmarks_from, landmarks_from_frame, landmarks_to, frames, error_thresh, lk_params):
gap_len = len(frames)
lk_interp = interpolate_lk(landmarks_from, landmarks_from_frame, frames, lk_params)
if landmarks_to is not None:
lin_interp = interpolate_lin(landmarks_from, landmarks_to, gap_len + 1)
else:
lin_interp = [None] * gap_len
for frame_index, (frame, lk_result, lin_landmarks) in enumerate(zip(frames, lk_interp, lin_interp)):
lk_landmarks, lk_statuses, lk_errors = lk_result
# Use lk landmarks as the base, fall back on linear interpolation when tracking failed
lk_criteria_mask = np.logical_or(lk_statuses == 0, lk_errors > error_thresh)
lk_criteria_mask = np.repeat(lk_criteria_mask, 2, axis=1)
np.putmask(lk_landmarks, lk_criteria_mask, lin_landmarks)
if lin_landmarks is not None:
# Mix the linear interpolation results in towards the end of the buffer, to prevent jitter
combined = np.stack((lk_landmarks, lin_landmarks), axis=-1)
mix_factor = (frame_index / gap_len) ** 2
weights = np.full(combined.shape, (1-mix_factor, mix_factor))
mixed_landmarks = np.average(combined, axis=-1, weights=weights)
yield frame, mixed_landmarks
else:
yield frame, lk_landmarks
@jit(nopython=True)
def get_gaps(data):
last = None
for index, frame in enumerate(data):
if frame and last is not None:
if last != 0:
yield last, index
last = None
elif last is None and not frame:
last = index
if last is not None:
yield last, len(data)
class Process(TransformProcess):
INPUTS = dict(
frame=types.Image,
landmark=types.Landmarks,
)
OUTPUTS = dict(
interpolated=types.Landmarks
)
def __init__(self, *args, device='cpu'):
super().__init__(*args, bufferwise=True)
# Lucas-Kanade sparse optical flow paramaters
# TODO: learn what these should do and make them easier to configure
self._lk_params = dict(winSize=(8, 8),
maxLevel=2,
criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
self._lk_error_thresh = 10
def setup(self):
pass
def process(self):
frames = self._inputs['frame'].get()
landmarks = self._inputs['landmark'].get()
# TODO: Maybe have a configurable fill mode - fill data forward / backward if cannot interp
landmark_mask = List(landmark is not None for landmark in landmarks)
gaps = list(get_gaps(landmark_mask))
# No (or unfillable) gaps in input - noop
if len(gaps) == 0:
self.logger.debug('No interpretable gaps in buffer :c')
for landmark in landmarks:
self._outputs['interpolated'].send(landmark)
else:
last_index = 0
for from_index, to_index in gaps:
# Fill any data that comes before this gap
for landmark in landmarks[last_index:from_index]:
self.logger.debug(f'Passing through pre-gap frames {last_index}:{from_index}')
self._outputs['interpolated'].send(landmark)
# Interpolate the gap
landmarks_from_frame = frames[from_index-1]
landmarks_to_frame = frames[from_index-1]
tween_frames = [frame.pixels for frame in frames[from_index:to_index]]
landmarks_from = np.array(landmarks[from_index-1].project_to_image(landmarks_from_frame))
landmarks_to = np.array(landmarks[to_index].project_to_image(landmarks_to_frame))
self.logger.debug(f'interpolating {from_index}:{to_index}')
interpolated = interpolate(landmarks_from, landmarks_from_frame,
landmarks_to, tween_frames,
self._lk_error_thresh, self._lk_params)
for frame, interpolated_landmark in interpolated:
self._outputs['interpolated'].send(interpolated_landmark)
last_index = to_index
# Fill in data that comes after the last gap
self.logger.debug(f'Filling in remaining frames {last_index}:{len(landmarks)}')
for landmark in landmarks[last_index:]:
self._outputs['interpolated'].send(landmark)

View File

@ -0,0 +1,57 @@
import math
from .. import TransformProcess
from ovtk_track import types
class Process(TransformProcess):
INPUTS = dict(frame=types.Image, rect=types.Rect)
OUTPUTS = dict(rect=types.Rect)
def __init__(self, *args, edge_reject=8, divisions=4):
super().__init__(*args)
self.edge_reject = 8
self.divisions = divisions
self._last = None
def setup(self):
pass
def quant(self, rect, coord, gap):
if (coord.endswith('1')):
roundfn = math.floor
edge_adjust = -self.edge_reject
else:
roundfn = math.ceil
edge_adjust = self.edge_reject
pos = rect[coord]
edge_fac = abs((pos + edge_adjust) % gap - gap / 2) / (gap / 2 - self.edge_reject)
if (edge_fac >= 1 and self._last is not None):
return self._last[coord]
return roundfn(pos / gap) * gap
def process(self):
frame = self._inputs['frame'].get()
rect = self._inputs['rect'].get()
if frame is None or rect is None:
quant_rect = None
else:
gap_x = frame.width // self.divisions
gap_y = frame.height // self.divisions
x1 = self.quant(rect, 'x1', gap_x)
x2 = self.quant(rect, 'x2', gap_x)
y1 = self.quant(rect, 'y1', gap_y)
y2 = self.quant(rect, 'y2', gap_y)
try:
quant_rect = types.Rect(x1, y1, x2, y2)
except ValueError:
quant_rect = None
self._outputs['rect'].send(quant_rect)
self._last = quant_rect

View File

@ -0,0 +1,151 @@
import math
import numpy as np
from numba import jit
from .. import TransformProcess
from ovtk_track.types import Frame, Landmarks, Scalar
class Process(TransformProcess):
INPUTS = dict(frame=Frame, eye_outline=Landmarks)
OUTPUTS = dict(blink=Scalar)
def __init__(self, *args, max_iterations=100, ascent_distance=8):
super().__init__(*args)
self._max_iterations = max_iterations
self._ascent_distance = ascent_distance
def setup(self):
pass
def process(self):
raise NotImplementedError()
def _get_blink_ratio(self, landmarks):
left = landmarks[0]
right = landmarks[3]
top = np.mean(landmarks[1:3], axis=0)
bottom = np.mean(landmarks[4:6], axis=0)
eye_width = math.hypot((left[0] - right[0]), (left[1] - right[1]))
eye_height = math.hypot((top[0] - bottom[0]), (top[1] - bottom[1]))
try:
ratio = eye_width / eye_height
except ZeroDivisionError:
ratio = None
return ratio
# MAGIC: Get pupils via Fabian Timm gradient localization
# See https://www.inb.uni-luebeck.de/fileadmin/files/PUBPDFS/TiBa11b.pdf
# and https://thume.ca/projects/2012/11/04/simple-accurate-eye-center-tracking-in-opencv/
#
# Mostly copied from from https://github.com/Kryolyz/Pupil_Tracker/blob/master/Pupil_Tracker.py
@staticmethod
@jit(nopython=True, cache=True, fastmath=True)
def _gradx(pic, grad, mean, std):
if len(grad[:, :]) > 0:
for y in range(len(grad[:, 0])):
for x in range(len(grad[0, :]) - 2):
grad[y, x+1] = (float(pic[y, x+2]) - float(pic[y, x])) / 2.0
mean = np.mean(grad)
std = np.std(grad)
for y in range(len(grad[:, 0])):
for x in range(len(grad[0, :])):
if grad[y, x] < 0.3 * mean + std and grad[y, x] > - 0.3 * mean - std:
grad[y, x] = 0
if grad[y, x] > 0:
grad[y, x] = 1
elif grad[y, x] < 0:
grad[y, x] = -1
return grad
@staticmethod
@jit(nopython=True, cache=True, fastmath=True)
def _evaluate(x, y, gradix, gradiy, func):
if len(gradix[:, :]) > 0:
for cy in range(len(gradix[:, 0])):
for cx in range(len(gradix[0, :])):
if y != cy and x != cx:
dy = float(cy - y)
dx = float(cx - x)
norm = np.linalg.norm(np.array([dx, dy]))
dy = dy/norm
dx = dx/norm
func[cy, cx] = gradiy[cy, cx] * dy + gradix[cy, cx] * dx
if func[cy, cx] < 0:
func[cy, cx] = 0
return np.mean(func)
def _find_pupils(self, frame):
mean = 0
std = 0
gradix = np.zeros_like(frame, dtype=float)
self._gradx(frame, gradix, mean, std)
gradiy = np.zeros_like(np.transpose(frame), dtype=float)
self._gradx(np.transpose(frame), gradiy, mean, std)
gradiy = np.transpose(gradiy)
func = np.zeros_like(frame, dtype=float)
means = np.zeros_like(frame, dtype=float)
y = int(self.height / 2)
x = int(self.width / 2)
iterations = 0
while iterations < self._max_iterations:
ymin = max(y-self._ascent_distance, 0)
ymax = min(y+self._ascent_distance, self.height)
xmin = max(x-self._ascent_distance, 0)
xmax = min(x+self._ascent_distance, self.width)
should_continue = 0
for i in np.arange(ymin, ymax):
for j in np.arange(xmin, xmax):
if means[i, j] < 10:
means[i, j] = (255 - frame[i, j]) * self._evaluate(j, i, gradix, gradiy, func)
if means[i, j] > means[y, x]:
should_continue = 1
y = i
x = j
iterations += 1
if should_continue == 0:
break
return x, y
# left_debug = left_eye.frame.copy()
# left_x, left_y = left_eye.pupil_pos
# if left_x and left_y:
# color = (255, 0, 0)
# cv2.line(left_debug, (left_x - 5, left_y), (left_x + 5, left_y), color)
# cv2.line(left_debug, (left_x, left_y - 5), (left_x, left_y + 5), color)
# cv2.imshow('eye test l', left_debug)
#
# right_debug = right_eye.frame.copy()
# right_x, right_y = right_eye.pupil_pos
# if right_x and right_y:
# color = (255, 0, 0)
# cv2.line(right_debug, (right_x - 5, right_y), (right_x + 5, right_y), color)
# cv2.line(right_debug, (right_x, right_y - 5), (right_x, right_y + 5), color)
# cv2.imshow('eye test r', right_debug)
#
# try:
# left_horz_ratio = eye_left.pupil[0] / (eye_left.center[0] * 2 - (self.margin//2))
# left_vert_ratio = eye_left.pupil[1] / (eye_left.center[1] * 2 - (self.margin//2))
# right_horz_ratio = eye_right.pupil[0] / (eye_right.center[0] * 2 - (self.margin//2))
# right_vert_ratio = eye_right.pupil[1] / (eye_right.center[1] * 2 - (self.margin//2))
# except TypeError:
# return None
#
# horz_average = (left_horz_ratio + right_horz_ratio) / 2
# vert_average = (left_vert_ratio + right_vert_ratio) / 2
# return (eye_left.origin[0] + eye_left.pupil[0], eye_left.origin[1] + eye_left.pupil[1], eye_left.blinking), \
# (eye_right.origin[0] + eye_right.pupil[0], eye_right.origin[1] + eye_right.pupil[1], eye_right.blinking), \
# horz_average, vert_average

View File

@ -0,0 +1,46 @@
from ovtk_track.types.Landmarks import LANDMARK_TYPES
# Goog,,,,, why are these so arbitrary
face_mesh_map = {
LANDMARK_TYPES.FACE | LANDMARK_TYPES.OUTLINE | LANDMARK_TYPES.LEFT: [
152, 148, 176, 148, 176, 149, 150, 136, 138, 215, 177, 137, 227, 34,
162, 21, 54, 103, 67, 109,
],
LANDMARK_TYPES.FACE | LANDMARK_TYPES.OUTLINE | LANDMARK_TYPES.RIGHT: [
10, 338, 297, 332, 284, 251, 389, 356, 454, 323, 361, 288, 397, 365,
379, 378, 400, 377,
],
LANDMARK_TYPES.FACE | LANDMARK_TYPES.NOSE | LANDMARK_TYPES.TIP: [
51, 45, 5, 4, 281, 275
],
LANDMARK_TYPES.FACE | LANDMARK_TYPES.EYE | LANDMARK_TYPES.OUTLINE | LANDMARK_TYPES.LEFT: [
144, 145, 153, 154, 173, 157, 158, 159, 160, 161, 246, 7, 163
],
LANDMARK_TYPES.FACE | LANDMARK_TYPES.EYE | LANDMARK_TYPES.OUTLINE | LANDMARK_TYPES.RIGHT: [
374, 373, 390, 249, 466, 388, 387, 386, 385, 384, 398, 381, 380
],
LANDMARK_TYPES.FACE | LANDMARK_TYPES.IRIS | LANDMARK_TYPES.CENTER | LANDMARK_TYPES.LEFT: [468],
LANDMARK_TYPES.FACE | LANDMARK_TYPES.IRIS | LANDMARK_TYPES.OUTLINE | LANDMARK_TYPES.LEFT: [
469, 470, 471, 472
],
LANDMARK_TYPES.FACE | LANDMARK_TYPES.IRIS | LANDMARK_TYPES.CENTER | LANDMARK_TYPES.RIGHT: [473],
LANDMARK_TYPES.FACE | LANDMARK_TYPES.IRIS | LANDMARK_TYPES.OUTLINE | LANDMARK_TYPES.RIGHT: [
474, 475, 476, 477,
],
}
# SEE YEAH THESE MAKE SENSE GOOGLE WHAT THE HELL
hand_mesh_map = {LANDMARK_TYPES.HAND | LANDMARK_TYPES.WRIST: [0]}
_finger_map = {
LANDMARK_TYPES.THUMB: list(range(1, 5)),
LANDMARK_TYPES.INDEX_FINGER: list(range(5, 9)),
LANDMARK_TYPES.MIDDLE_FINGER: list(range(9, 13)),
LANDMARK_TYPES.RING_FINGER: list(range(13, 17)),
LANDMARK_TYPES.PINKY_FINGER: list(range(17, 21)),
}
for finger_flag, points in _finger_map.items():
hand_mesh_map[finger_flag] = points[:-1]
hand_mesh_map[finger_flag | LANDMARK_TYPES.TIP] = [points[-1]]

View File

@ -0,0 +1,38 @@
import mediapipe
from ovtk_track.transform import TransformProcess
from ovtk_track import types
class Process(TransformProcess):
INPUTS = dict(frame=types.Image)
OUTPUTS = dict(face_region=types.Rect)
def __init__(self, *args, model_selection=1, min_detection_confidence=0.5):
super().__init__(*args)
self.model_selection = model_selection
self.min_detection_confidence = min_detection_confidence
def setup(self):
self._detector = mediapipe.solutions.face_detection.FaceDetection(
model_selection=self.model_selection,
min_detection_confidence=self.min_detection_confidence
)
def process(self):
frame = self._inputs['frame'].get()
if frame is None:
region = None
else:
result = self._detector.process(frame.pixels)
if result.detections:
detection = result.detections[0].location_data.relative_bounding_box
region = types.Rect(int(detection.xmin * frame.width),
int(detection.ymin * frame.height),
int((detection.xmin + detection.width) * frame.width),
int((detection.ymin + detection.height) * frame.height))
else:
region = None
self._outputs['face_region'].send(region)

View File

@ -0,0 +1,52 @@
import mediapipe
import numpy as np
from ovtk_track.transform import TransformProcess
from ovtk_track.transform.solve.mediapipe import face_mesh_map
from ovtk_track import types
class Process(TransformProcess):
INPUTS = dict(frame=types.Image)
OUTPUTS = dict(landmarks=types.Landmarks)
def __init__(self, *args, precropped=False, min_tracking_confidence=0.9,
min_detection_confidence=0.5):
super().__init__(*args)
self.pre_cropped = precropped
self.min_tracking_confidence = min_tracking_confidence
self.min_detection_confidence = min_detection_confidence
def setup(self):
self._mp_face_mesh = mediapipe.solutions.face_mesh.FaceMesh(
min_detection_confidence=self.min_tracking_confidence,
min_tracking_confidence=self.min_detection_confidence,
static_image_mode=self.pre_cropped,
refine_landmarks=True,
)
def process(self):
frame = self._inputs['frame'].get()
if frame is None:
landmarks = None
else:
results = self._mp_face_mesh.process(frame.pixels)
if results.multi_face_landmarks:
raw_landmarks = results.multi_face_landmarks[0].landmark
np_landmarks = np.empty((478, 3), dtype=np.float32)
for i in range(478):
np_landmarks[i][0] = raw_landmarks[i].x
np_landmarks[i][1] = raw_landmarks[i].y
np_landmarks[i][2] = raw_landmarks[i].z
np_landmarks *= [frame.width, frame.height, frame.width]
# TODO: Estimate real distance from iris size
landmarks = types.Landmarks(types.Point3d.from_imagespace(frame, np_landmarks), face_mesh_map)
else:
landmarks = None
self._outputs['landmarks'].send(landmarks)

View File

@ -0,0 +1,95 @@
import mediapipe
import numpy as np
from ovtk_track.transform import TransformProcess
from ovtk_track.transform.solve.mediapipe import face_mesh_map, hand_mesh_map
from ovtk_track import types
from ovtk_track.types.Landmarks import LANDMARK_TYPES
def mix_maps(point_map, mix_flag):
return {flag | mix_flag: points for flag, points in point_map.items()}
def combine_maps(maps_array):
offset = 0
newmap = {}
for points_map, landmarks_length in maps_array:
for flag, points in points_map.items():
newmap[flag] = [index + offset for index in points]
offset += landmarks_length
return newmap
class Process(TransformProcess):
INPUTS = dict(frame=types.Image)
OUTPUTS = dict(landmarks=types.Landmarks)
def __init__(self, *args, model_complexity=1, min_tracking_confidence=0.9,
min_detection_confidence=0.5):
super().__init__(*args)
self.model_complexity = model_complexity
self.min_tracking_confidence = min_tracking_confidence
self.min_detection_confidence = min_detection_confidence
def setup(self):
self._mp_holistic = mediapipe.solutions.holistic.Holistic(
min_detection_confidence=self.min_tracking_confidence,
min_tracking_confidence=self.min_detection_confidence,
model_complexity=self.model_complexity,
refine_face_landmarks=True,
)
def process(self):
frame = self._inputs['frame'].get()
landmarks = None
if frame is not None:
results = self._mp_holistic.process(frame.pixels)
available = []
if results.face_landmarks:
raw_landmarks = results.face_landmarks.landmark
face_landmarks = np.empty((478, 3), dtype=np.float32)
for i in range(478):
face_landmarks[i][0] = raw_landmarks[i].x
face_landmarks[i][1] = raw_landmarks[i].y
face_landmarks[i][2] = raw_landmarks[i].z
# TODO: Estimate real distance from iris size
available.append((face_landmarks, face_mesh_map))
if results.left_hand_landmarks:
raw_landmarks = results.left_hand_landmarks.landmark
left_hand_landmarks = np.empty((21, 3), dtype=np.float32)
for i in range(21):
left_hand_landmarks[i][0] = raw_landmarks[i].x
left_hand_landmarks[i][1] = raw_landmarks[i].y
left_hand_landmarks[i][2] = raw_landmarks[i].z
available.append((left_hand_landmarks, mix_maps(hand_mesh_map, LANDMARK_TYPES.LEFT)))
if results.right_hand_landmarks:
raw_landmarks = results.right_hand_landmarks.landmark
right_hand_landmarks = np.empty((21, 3), dtype=np.float32)
for i in range(21):
right_hand_landmarks[i][0] = raw_landmarks[i].x
right_hand_landmarks[i][1] = raw_landmarks[i].y
right_hand_landmarks[i][2] = raw_landmarks[i].z
available.append((right_hand_landmarks, mix_maps(hand_mesh_map, LANDMARK_TYPES.RIGHT)))
if available:
avail_landmarks, maps = zip(*available)
combo_map = combine_maps(zip(maps, (array.shape[0] for array in avail_landmarks)))
np_landmarks = np.concatenate(avail_landmarks, axis=0)
np_landmarks *= [frame.width, frame.height, frame.width]
landmarks = types.Landmarks(types.Point3d.from_imagespace(frame, np_landmarks), combo_map)
self._outputs['landmarks'].send(landmarks)

View File

@ -0,0 +1,79 @@
import math
import numpy as np
from .. import TransformProcess
from ovtk_track.types import Quaternion, Point3d
from ovtk_track.types.Landmarks import Landmarks, LANDMARK_TYPES
from ovtk_track.types.Skeleton import Skeleton, Joint, JOINT_TYPES
def vec_perp(vec):
if abs(vec[2]) < abs(vec[0]):
return [vec[1], -vec[0], 0]
else:
return [0, -vec[2], vec[1]]
class Process(TransformProcess):
INPUTS = dict(landmarks=Landmarks)
OUTPUTS = dict(skel=Skeleton)
def __init__(self, *args, normal=[0, 0, -1]):
super().__init__(*args)
self.normal = np.array(normal, dtype=float)
self.up = np.array(vec_perp(normal), dtype=float)
def setup(self):
pass
def process(self):
landmarks = self._inputs['landmarks'].get()
skeleton = None
if landmarks:
joints = {}
if landmarks.has(LANDMARK_TYPES.FACE):
# Get head look / pos
nose = Landmarks.to_numpy(landmarks[LANDMARK_TYPES.NOSE | LANDMARK_TYPES.TIP]).mean(0)
head_center = Landmarks.to_numpy(landmarks[LANDMARK_TYPES.FACE | LANDMARK_TYPES.OUTLINE]).mean(0)
look_vec = (nose - head_center)
eye_center_l = Landmarks.to_numpy(landmarks[LANDMARK_TYPES.EYE | LANDMARK_TYPES.LEFT]).mean(0)
eye_center_r = Landmarks.to_numpy(landmarks[LANDMARK_TYPES.EYE | LANDMARK_TYPES.RIGHT]).mean(0)
roll_vec = (eye_center_l - eye_center_r)
look_vec /= np.linalg.norm(look_vec)
roll_vec /= np.linalg.norm(roll_vec)
roll_angle = np.sum(roll_vec * self.up)
roll = Quaternion(math.cos(roll_angle), * self.normal * math.sin(roll_angle))
roll = roll.normalize()
look = Quaternion(np.dot(look_vec, self.normal), *np.cross(look_vec, self.normal))
look.w += look.magnitude()
look = look.normalize()
combo = look + roll
combo = combo.normalize()
# Get eye data
marks_left = Landmarks.to_numpy(landmarks[LANDMARK_TYPES.EYE | LANDMARK_TYPES.LEFT])
marks_right = Landmarks.to_numpy(landmarks[LANDMARK_TYPES.EYE | LANDMARK_TYPES.RIGHT])
range = np.array([marks_left.max(axis=0) - marks_left.min(axis=0),
marks_right.max(axis=0) - marks_right.min(axis=0)])
delta = np.array([eye_center_l - Landmarks.to_numpy(landmarks[LANDMARK_TYPES.IRIS | LANDMARK_TYPES.CENTER | LANDMARK_TYPES.LEFT]).mean(0),
eye_center_r - Landmarks.to_numpy(landmarks[LANDMARK_TYPES.IRIS | LANDMARK_TYPES.CENTER | LANDMARK_TYPES.RIGHT]).mean(0)])
delta /= range
try:
eye_aspect_ratio = range[::, 0] / range[::, 1]
except ZeroDivisionError:
eye_aspect_ratio = None
head_joint = Joint(Point3d(*head_center), combo, dict(look_delta=delta, eye_aspect_ratio=eye_aspect_ratio))
joints[JOINT_TYPES.HEAD] = head_joint
skeleton = Skeleton(joints)
self._outputs['skel'].send(skeleton)

View File

@ -0,0 +1,34 @@
from .. import TransformProcess
from ovtk_track import types
import numpy as np
class Process(TransformProcess):
INPUTS = dict(frame=types.Image, rect=types.Rect)
OUTPUTS = dict(frame=types.Image)
def __init__(self, *args, border=None):
super().__init__(*args)
self.border = border
def setup(self):
pass
def process(self):
frame = self._inputs['frame'].get()
mask = self._inputs['rect'].get()
if frame is None or mask is None:
cropped = None
else:
if self.border:
half = self.border // 2
mask = types.Rect(mask.x1 - half, mask.y1 - half,
mask.x2 + half, mask.y2 + half)
mask.x1, mask.x2 = np.clip([mask.x1, mask.x2], 0, frame.width)
mask.y1, mask.y2 = np.clip([mask.y1, mask.y2], 0, frame.height)
cropped = frame.crop(mask)
self._outputs['frame'].send(cropped)

View File

@ -0,0 +1,17 @@
from .. import TransformProcess
from ovtk_track.enums import DataTypes
class Process(TransformProcess):
INPUTS = dict(data=DataTypes.RGB)
OUTPUTS = dict(data=DataTypes.POINTS_2D)
def __init__(self, *args, device='cpu'):
super().__init__(*args)
def setup(self):
pass
def process(self):
data = self._inputs['data'].get()
self._outputs['data'].send(data)

View File

@ -0,0 +1,21 @@
import typing
from dataclasses import dataclass
import numpy as np
from .Type import Type
@dataclass
class AudioFrame(Type):
# HACK: Should be a np.int16 array but i do not know how to do that
samples: typing.Any
@property
def volume(self):
# MAGIC: 2**16 / 2
# ^ 16 bit int ^ signed
return np.max(np.abs(self.samples)) / 2**15
def serialize(self):
return NotImplementedError()

50
ovtk_track/types/Image.py Normal file
View File

@ -0,0 +1,50 @@
import typing
from dataclasses import dataclass, field
import cv2
import numpy as np
from .Type import Type
from .Point3d import Point3d
from .Vector3 import Vector3
@dataclass
class Image(Type):
pixels: typing.Any
metric_scale: float
metric_origin: Point3d
metric_normal: Vector3
width: int = field(init=False)
height: int = field(init=False)
camera_matrix: typing.Any = field(init=False, compare=False, repr=False)
dist_coefs: typing.Any = field(init=False, compare=False, repr=False)
def __post_init__(self):
self.height, self.width, _ = self.pixels.shape
# HACK: Calibration stub
focal_length = self.width
camera_center = (self.width / 2, self.height / 2)
self.camera_matrix = np.array(
[[focal_length, 0, camera_center[0]],
[0, focal_length, camera_center[1]],
[0, 0, 1]], dtype="double")
self.dist_coefs = np.zeros((4, 1))
def crop(self, rect):
cropped_pix = self.pixels[rect.as_slices()]
# HACK: This probably changes based on direction
cropped_origin = Point3d(rect.x1 - self.metric_origin.x,
rect.y1 - self.metric_origin.y,
self.metric_origin.z)
return Image(cropped_pix, self.metric_scale, cropped_origin, self.metric_normal)
def resize(self, scale):
resized_pix = cv2.resize(self.pixels, fx=scale, fy=scale)
return Image(resized_pix, self.metric_scale / scale, self.metric_origin, self.direction)
def serialize(self):
return NotImplementedError()

View File

@ -0,0 +1,84 @@
from dataclasses import dataclass
from enum import Flag, auto
import cv2
import numpy as np
from .Type import Type
from .Point3d import Point3d
class LANDMARK_TYPES(Flag):
# Face
FACE = auto()
NOSE = auto()
EYE = auto()
IRIS = auto()
LIPS = auto()
CHIN = auto()
# Hand
HAND = auto()
WRIST = auto()
THUMB = auto()
INDEX_FINGER = auto()
MIDDLE_FINGER = auto()
RING_FINGER = auto()
PINKY_FINGER = auto()
# Specifiers
OUTLINE = auto()
TIP = auto()
CENTER = auto()
LEFT = auto()
RIGHT = auto()
@dataclass
class Landmarks(Type):
points: list[Point3d]
map: dict[LANDMARK_TYPES, list[int]]
@classmethod
def to_numpy(cls, p3d_iterable):
return np.array([(p.x, p.y, p.z) for p in p3d_iterable], dtype=np.float32)
@classmethod
def from_numpy(cls, p3d_np, map={}):
points = [Point3d(*point) for point in p3d_np]
return cls(points, map)
# Support searching for landmarks of a particular type according to the map
# via indexing syntax (flags are checked strictly - ie, LEFT | NOSE will
# only match if the map entry has both flags - use multiple indicies to
# match many)
def __getitem__(self, types):
if not isinstance(types, tuple):
types = (types,)
for key, indicies in self.map.items():
if any(type & key == type for type in types):
yield from (self.points[index] for index in indicies)
# Above but for simply checking if any points with a type exist
def has(self, types):
if not isinstance(types, tuple):
types = (types,)
for key, indicies in self.map.items():
if any(type & key == type for type in types):
return True
return False
def draw(self, image, canvas, color=(255, 255, 255), label=True):
for i, (x, y, z) in enumerate(point.project_to_image(image) for point in self.points):
if x > image.width or x < 0 or y > image.height or y < 0:
continue
cv2.circle(canvas, (x, y), 1, color, -1, cv2.LINE_AA)
if label:
cv2.putText(canvas, str(i), (x + 2, y), cv2.FONT_HERSHEY_PLAIN, 0.5, color)
def serialize(self):
return [point.serialze() for point in self.points]

View File

@ -0,0 +1,57 @@
from dataclasses import dataclass
import numpy as np
from .Type import Type
@dataclass
class Point3d(Type):
x: float
y: float
z: float
def serialize(self):
return self.__dict__
def as_np(self):
return np.array([self.x, self.y, self.z])
def project_to_image(self, image):
point = self.as_np()
origin = image.metric_origin.as_np()
# REVIEW: This assumes that every point is at z 0 (camera sensor) and lens distortion + projection dont exist!
# need to account for camera optics (cv2.projectPoints) and rotation
point += origin
point *= image.metric_scale
point = np.around(point).astype(int)
# Space orientation (0, 0 at bottom left) to image orientation (0, 0 at top left)
point *= [1, -1, 1]
point += [0, image.height, 0]
return point
@classmethod
def from_imagespace(cls, image, points):
if not isinstance(points, np.ndarray):
raise ValueError("Points must be a numpy array")
dims = len(points.shape)
if dims > 2 or points.shape[-1] > 3:
print(points.shape)
raise ValueError("Array must be of shape (3) or (x, 3)")
# Image orientation (0, 0 at top left) to space orientation (0, 0 at bottom left)
points *= [1, -1, 1]
points += [0, image.height, 0]
# REVIEW: This assumes that every point is at z 0 (camera sensor) and lens distortion + projection dont exist!
# need to account for camera optics (cv2.projectPoints) and rotation
points /= image.metric_scale
points -= image.metric_origin.as_np()
if dims == 1:
return cls(*points)
else:
return [cls(*point) for point in points]

View File

@ -0,0 +1,52 @@
from dataclasses import dataclass
import numpy as np
from scipy.spatial.transform import Rotation
from .Type import Type
@dataclass
class Quaternion(Type):
w: float
x: float
y: float
z: float
def __mul__(self, q):
if isinstance(q, self.__class__):
product = self.as_np() * q.as_np()
else:
product = self.as_np() * q
return self.__class__(*product)
def __add__(self, q):
if isinstance(q, self.__class__):
product = self.as_np() + q.as_np()
else:
product = self.as_np() + q
return self.__class__(*product)
def as_rotation(self):
return Rotation.from_quat([self.x, self.y, self.z, self.w])
def as_euler(self):
return self.as_rotation().as_euler('xyz', degrees=True)
def as_np(self):
return np.array(list(self.__dict__.values()))
def magnitude(self):
return np.linalg.norm(self.as_np())
def normalize(self):
norm = self.as_np() / self.magnitude()
return self.__class__(*norm)
def conjugate(self):
return self.__class__(self.w, -self.x, -self.y, -self.z)
def draw(self, canvas, origin):
raise NotImplementedError()
def serialize(self):
return self.__dict__

36
ovtk_track/types/Rect.py Normal file
View File

@ -0,0 +1,36 @@
from dataclasses import dataclass
import cv2
from .Type import Type
@dataclass
class Rect(Type):
x1: float
y1: float
x2: float
y2: float
def __post_init__(self):
if self.x1 == self.x2 or self.y1 == self.y2:
raise ValueError('Cannot create rect with zero-length dimension')
if self.x1 > self.x2:
self.x2, self.x1 = self.x1, self.x2
if self.y1 > self.y2:
self.y2, self.y1 = self.y1, self.y2
def __getitem__(self, key):
return self.__dict__.__getitem__(key)
def as_slices(self):
return slice(int(self.y1), int(self.y2)), slice(int(self.x1), int(self.x2))
def draw(self, canvas, color=(255, 255, 255)):
cv2.rectangle(canvas,
(self.x1, self.y1),
(self.x2, self.y2), color, 3)
def serialize(self):
return self.__dict__

View File

@ -0,0 +1,61 @@
from dataclasses import dataclass, field
from enum import Enum
import typing
import cv2
from .Type import Type
from .Point3d import Point3d
from .Quaternion import Quaternion
class JOINT_TYPES(Enum):
HEAD = 'head'
CHEST = 'chest'
SHOULDER_L = 'shoulder_l'
ELBOW_L = 'elbow_l'
WRIST_L = 'wrist_l'
HIP_L = 'hip_l'
KNEE_L = 'knee_l'
FOOT_L = 'foot_l'
SHOULDER_R = 'shoulder_r'
ELBOW_R = 'elbow_r'
WRIST_R = 'wrist_r'
HIP_R = 'hip_r'
KNEE_R = 'knee_r'
FOOT_R = 'foot_r'
@dataclass
class Joint(Type):
pos: Point3d
rot: Quaternion
attr: dict[str, typing.Any] = field(default_factory=dict, repr=False, compare=False)
def serialize(self):
return self.__dict__
@dataclass
class Skeleton(Type):
joints: set[JOINT_TYPES, Joint]
def __add__(self, other):
if not isinstance(other, Skeleton):
return ValueError('Cannot merge Skeleton and non-skeleton')
# TODO: More intelegent merge
return Skeleton(self.joints + other.joints)
def draw(self, image, canvas, color=(255, 255, 255)):
for i, joint in enumerate(self.joints.values()):
x, y, z = joint.pos.project_to_image(image)
if x > image.width or x < 0 or y > image.height or y < 0:
continue
cv2.circle(canvas, (x, y), 1, color, -1, cv2.LINE_AA)
def serialize(self):
return {type.value: joint for type, joint in self.joints.items()}

16
ovtk_track/types/Type.py Normal file
View File

@ -0,0 +1,16 @@
from abc import ABC, abstractmethod
class Type(ABC):
def __init__(self):
pass
def __pipeline_getitem__(self, index):
raise NotImplementedError()
def draw(self, image, canvas):
raise NotImplementedError()
@abstractmethod
def serialize(self):
pass

View File

@ -0,0 +1,16 @@
from dataclasses import dataclass
import numpy as np
from .Type import Type
@dataclass
class Vector3(Type):
x: float
y: float
z: float
def normalize(self):
self_list = list(self.__dict__.values())
norm = self_list / np.linalg.norm(self_list)
return self.__class__(*norm)

View File

@ -0,0 +1,9 @@
from .AudioFrame import AudioFrame
from .Image import Image
from .Landmarks import Landmarks
from .Point3d import Point3d
from .Rect import Rect
from .Skeleton import Skeleton
from .Skeleton import Joint
from .Vector3 import Vector3
from .Quaternion import Quaternion