diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000..bef8d88 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,64 @@ +on: [push, pull_request] + +name: Continuous integration + +jobs: + check: + name: Check + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: stable + override: true + - uses: actions-rs/cargo@v1 + with: + command: check + + test: + name: Test Suite + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: stable + override: true + - uses: actions-rs/cargo@v1 + with: + command: test + + fmt: + name: Rustfmt + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: stable + override: true + components: rustfmt + - uses: actions-rs/cargo@v1 + with: + command: fmt + args: --all -- --check + + clippy: + name: Clippy + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: stable + override: true + components: clippy + - uses: actions-rs/cargo@v1 + with: + command: clippy + args: -- -D warnings \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index be37790..e66df85 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,5 +1,7 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. +version = 3 + [[package]] name = "actix" version = "0.7.9" @@ -352,7 +354,7 @@ dependencies = [ "serde_json", "serde_urlencoded", "time", - "url 2.2.1", + "url 2.2.2", ] [[package]] @@ -375,9 +377,9 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "068a33520e21c1eea89726be4d6b3ce2e6b81046904367e1677287695a043abb" dependencies = [ - "proc-macro2 1.0.24", - "quote 1.0.9", - "syn 1.0.60", + "proc-macro2 1.0.36", + "quote 1.0.14", + "syn 1.0.85", ] [[package]] @@ -404,9 +406,9 @@ dependencies = [ [[package]] name = "addr2line" -version = "0.14.1" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a55f82cfe485775d02112886f4169bde0c5894d75e79ead7eafe7e40a25e45f7" +checksum = "b9ecd88a8c8378ca913a680cd98f0f13ac67383d35993f86c90a70e3f137816b" dependencies = [ "gimli", ] @@ -428,9 +430,9 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "0.7.15" +version = "0.7.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7404febffaa47dac81aa44dba71523c9d069b1bdc50a77db41195149e17f68e5" +checksum = "1e37cfd5e7657ada45f742d6e99ca5788580b5c529dc78faf11ece6dc702656f" dependencies = [ "memchr", ] @@ -524,11 +526,12 @@ dependencies = [ [[package]] name = "backtrace" -version = "0.3.56" +version = "0.3.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d117600f438b1707d4e4ae15d3595657288f8235a0eb593e80ecc98ab34e1bc" +checksum = "321629d8ba6513061f26707241fa9bc89524ff1cd7a915a97ef0c62c666ce1b6" dependencies = [ "addr2line", + "cc", "cfg-if 1.0.0", "libc", "miniz_oxide", @@ -559,9 +562,9 @@ checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" [[package]] name = "bitflags" -version = "1.2.1" +version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "brotli-sys" @@ -585,15 +588,15 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.6.1" +version = "3.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "63396b8a4b9de3f4fdfb320ab6080762242f66a8ef174c49d8e19b674db4cdbe" +checksum = "a4a45a46ab1f2412e53d3a0ade76ffad2025804294569aae387231a0cd6e0899" [[package]] name = "byteorder" -version = "1.4.2" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae44d1a3d5a19df61dd0c8beb138458ac2a53a7ac09eba97d55592540004306b" +checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" @@ -607,9 +610,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.67" +version = "1.0.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3c69b077ad434294d3ce9f1f6143a2a4b89a8a2d54ef813d85003a4fd1137fd" +checksum = "22a9137b95ea06864e018375b72adfb7db6e6f68cfc8df5a04d00288050485ee" [[package]] name = "cfg-if" @@ -662,7 +665,7 @@ version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "615f6e27d000a2bffbc7f2f6a8669179378fa27ee4d0a509e985dfc0a7defb40" dependencies = [ - "getrandom 0.2.2", + "getrandom 0.2.3", "lazy_static", "proc-macro-hack", "tiny-keccak", @@ -676,9 +679,9 @@ checksum = "a2df960f5d869b2dd8532793fde43eb5427cceb126c929747a26823ab0eeb536" [[package]] name = "crc32fast" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a" +checksum = "738c290dfaea84fc1ca15ad9c168d083b05a714e1efddd8edaab678dc28d2836" dependencies = [ "cfg-if 1.0.0", ] @@ -694,9 +697,9 @@ dependencies = [ [[package]] name = "crossbeam-deque" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f02af974daeee82218205558e51ec8768b48cf524bd01d550abe5573a608285" +checksum = "c20ff29ded3204c5106278a81a38f4b482636ed4fa1e6cfbeef193291beb29ed" dependencies = [ "crossbeam-epoch", "crossbeam-utils 0.7.2", @@ -793,9 +796,9 @@ dependencies = [ [[package]] name = "diesel" -version = "1.4.5" +version = "1.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e2de9deab977a153492a1468d1b1c0662c1cf39e5ea87d0c060ecd59ef18d8c" +checksum = "b28135ecf6b7d446b43e27e225622a038cc4e2930a1022f51cdb97ada19b8e4d" dependencies = [ "bitflags", "byteorder", @@ -812,9 +815,9 @@ version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "45f5098f628d02a7a0f68ddba586fb61e80edec3bdc1be3b921f4ceec60858d3" dependencies = [ - "proc-macro2 1.0.24", - "quote 1.0.9", - "syn 1.0.60", + "proc-macro2 1.0.36", + "quote 1.0.14", + "syn 1.0.85", ] [[package]] @@ -835,9 +838,9 @@ checksum = "77c90badedccf4105eca100756a0b1289e191f6fcbdadd3cee1d2f614f97da8f" [[package]] name = "dtoa" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88d7ed2934d741c6b37e33e3832298e8850b53fd2d2bea03873375596c7cea4e" +checksum = "56899898ce76aaf4a0f24d914c97ea6ed976d42fec6ad33fcbb0a1103e07b2b0" [[package]] name = "either" @@ -847,9 +850,9 @@ checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" [[package]] name = "encoding_rs" -version = "0.8.28" +version = "0.8.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80df024fbc5ac80f87dfef0d9f5209a252f2a497f7f42944cff24d8253cac065" +checksum = "7896dc8abb250ffdda33912550faa54c88ec8b998dec0b2c55ab224921ce11df" dependencies = [ "cfg-if 1.0.0", ] @@ -867,9 +870,9 @@ dependencies = [ [[package]] name = "env_logger" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17392a012ea30ef05a610aa97dfb49496e71c9f676b27879922ea5bdf60d9d3f" +checksum = "a19187fea3ac7e84da7dacf48de0c45d63c6a76f9490dae389aead16c243fce3" dependencies = [ "atty", "humantime", @@ -903,21 +906,30 @@ version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aa4da3c766cd7a0db8242e326e9e4e081edd567072893ed320008189715366a4" dependencies = [ - "proc-macro2 1.0.24", - "quote 1.0.9", - "syn 1.0.60", + "proc-macro2 1.0.36", + "quote 1.0.14", + "syn 1.0.85", "synstructure", ] +[[package]] +name = "fastrand" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "779d043b6a0b90cc4c0ed7ee380a6504394cee7efd7db050e3774eee387324b2" +dependencies = [ + "instant", +] + [[package]] name = "filetime" -version = "0.2.14" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d34cfa13a63ae058bfa601fe9e313bbdb3746427c1459185464ce0fcf62e1e8" +checksum = "975ccf83d8d9d0d84682850a38c8169027be83368805971cc4f238c2b245bc98" dependencies = [ "cfg-if 1.0.0", "libc", - "redox_syscall 0.2.5", + "redox_syscall 0.2.10", "winapi 0.3.9", ] @@ -957,7 +969,7 @@ dependencies = [ "mpart-async", "num_cpus", "r2d2", - "rand 0.8.3", + "rand 0.8.4", "serde", "serde_derive", "serde_json", @@ -971,9 +983,9 @@ dependencies = [ [[package]] name = "flate2" -version = "1.0.20" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd3aec53de10fe96d7d8c565eb17f2c687bb5518a2ec453b5b1252964526abe0" +checksum = "1e6988e897c1c9c485f43b47a529cef42fde0547f9d8d41a7062518f1d8fc53f" dependencies = [ "cfg-if 1.0.0", "crc32fast", @@ -1071,9 +1083,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9495705279e7140bf035dde1f6e750c162df8b625267cd52cc44e0b156732c8" +checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753" dependencies = [ "cfg-if 1.0.0", "libc", @@ -1082,9 +1094,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.23.0" +version = "0.26.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6503fe142514ca4799d4c26297c4248239fe8838d827db6bd6065c6ed29a6ce" +checksum = "78cc372d058dcf6d5ecd98510e7fbc9e5aec4d21de70f65fea8fecebcd881bd4" [[package]] name = "h2" @@ -1122,15 +1134,15 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.9.1" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04" +checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" [[package]] name = "hermit-abi" -version = "0.1.18" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "322f4de77956e22ed0e5032c359a0f1273f1f7f0d79bfa3b8ffbc730d7fbcc5c" +checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" dependencies = [ "libc", ] @@ -1160,14 +1172,14 @@ checksum = "d6ccf5ede3a895d8856620237b2f02972c1bbc78d2965ad7fe8838d4a0ed41f0" dependencies = [ "bytes", "fnv", - "itoa", + "itoa 0.4.8", ] [[package]] name = "httparse" -version = "1.3.5" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "615caabe2c3160b313d52ccc905335f4ed5f10881dd63dc5699d47e90be85691" +checksum = "acd94fdbe1d4ff688b67b04eee2e17bd50995534a61539e45adfefb45e5e5503" [[package]] name = "humantime" @@ -1188,9 +1200,9 @@ dependencies = [ [[package]] name = "idna" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89829a5d69c23d348314a7ac337fe39173b61149a9864deabd260983aed48c21" +checksum = "418a0a6fab821475f634efe3ccc45c013f742efe03d853e8d3355d5cb850ecf8" dependencies = [ "matches", "unicode-bidi", @@ -1199,19 +1211,19 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.6.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fb1fa934250de4de8aef298d81c729a7d33d8c239daa3a7575e6b92bfc7313b" +checksum = "282a6247722caba404c065016bbfa522806e51714c34f5dfc3e4a3a46fcb4223" dependencies = [ "autocfg 1.0.1", - "hashbrown 0.9.1", + "hashbrown 0.11.2", ] [[package]] name = "instant" -version = "0.1.9" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61124eeebbd69b8190558df225adf7e4caafce0d743919e5d6b19652314ec5ec" +checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" dependencies = [ "cfg-if 1.0.0", ] @@ -1252,15 +1264,21 @@ dependencies = [ [[package]] name = "itoa" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736" +checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" + +[[package]] +name = "itoa" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1aab8fc367588b89dcee83ab0fd66b72b50b72fa1904d7095045ace2b0c81c35" [[package]] name = "js-sys" -version = "0.3.48" +version = "0.3.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc9f84f9b115ce7843d60706df1422a916680bfdfcbdb0447c5614ff9d7e4d78" +checksum = "7cc9ffccd38c451a86bf13657df244e9c3f37493cce8e5e21e940963777acc84" dependencies = [ "wasm-bindgen", ] @@ -1303,9 +1321,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.87" +version = "0.2.112" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "265d751d31d6780a3f956bb5b8022feba2d94eeee5a84ba64f4212eedca42213" +checksum = "1b03d17f364a3a042d5e5d46b053bbbf82c92c9430c592dd4c064dc6ee997125" [[package]] name = "linked-hash-map" @@ -1343,9 +1361,9 @@ dependencies = [ [[package]] name = "lock_api" -version = "0.4.2" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd96ffd135b2fd7b973ac026d28085defbe8983df057ced3eb4f2130b0831312" +checksum = "712a4d093c9976e24e7dbca41db895dabcbac38eb5f4045393d17a95bdfb1109" dependencies = [ "scopeguard 1.1.0", ] @@ -1376,9 +1394,9 @@ checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" [[package]] name = "matches" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08" +checksum = "a3e378b66a060d48947b590737b30a1be76706c8dd7b8ba0f2fe3989c68a853f" [[package]] name = "maybe-uninit" @@ -1388,9 +1406,9 @@ checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" [[package]] name = "memchr" -version = "2.3.4" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ee1c47aaa256ecabcaea351eae4a9b01ef39ed810004e298d2511ed284b1525" +checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a" [[package]] name = "memoffset" @@ -1417,9 +1435,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9753f12909fd8d923f75ae5c3258cae1ed3c8ec052e1b38c93c21a6d157f789c" dependencies = [ "migrations_internals", - "proc-macro2 1.0.24", - "quote 1.0.9", - "syn 1.0.60", + "proc-macro2 1.0.36", + "quote 1.0.14", + "syn 1.0.85", ] [[package]] @@ -1485,7 +1503,7 @@ checksum = "0840c1c50fd55e521b247f949c241c9997709f23bd7f023b9762cd561e935656" dependencies = [ "log", "mio", - "miow 0.3.6", + "miow 0.3.7", "winapi 0.3.9", ] @@ -1514,11 +1532,10 @@ dependencies = [ [[package]] name = "miow" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a33c1b55807fbed163481b5ba66db4b2fa6cde694a5027be10fb724206c5897" +checksum = "b9f1c5b025cda876f66ef43a113f91ebc9f4ccef34843000e0adf6ebbab84e21" dependencies = [ - "socket2", "winapi 0.3.9", ] @@ -1590,9 +1607,9 @@ dependencies = [ [[package]] name = "num_cpus" -version = "1.13.0" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3" +checksum = "19e64526ebdee182341572e50e9ad03965aa510cd94427a4549448f285e957a1" dependencies = [ "hermit-abi", "libc", @@ -1600,15 +1617,18 @@ dependencies = [ [[package]] name = "object" -version = "0.23.0" +version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9a7ab5d64814df0fe4a4b5ead45ed6c5f181ee3ff04ba344313a6c80446c5d4" +checksum = "67ac1d3f9a1d3616fd9a60c8d74296f22406a238b6a72f5cc1e6f314df4ffbf9" +dependencies = [ + "memchr", +] [[package]] name = "once_cell" -version = "1.7.2" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af8b08b04175473088b46763e51ee54da5f9a164bc162f615b91bc179dbf15a3" +checksum = "da32515d9f6e6e489d7bc9d84c71b060db7247dc035bbe44eac88cf87486d8d5" [[package]] name = "owning_ref" @@ -1653,13 +1673,13 @@ dependencies = [ [[package]] name = "parking_lot" -version = "0.11.1" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d7744ac029df22dca6284efe4e898991d28e3085c706c972bcd7da4a27a15eb" +checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" dependencies = [ "instant", - "lock_api 0.4.2", - "parking_lot_core 0.8.3", + "lock_api 0.4.5", + "parking_lot_core 0.8.5", ] [[package]] @@ -1708,15 +1728,15 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.8.3" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa7a782938e745763fe6907fc6ba86946d72f49fe7e21de074e08128a99fb018" +checksum = "d76e8e1493bcac0d2766c42737f34458f1c8c50c0d23bcb24ea953affb273216" dependencies = [ "cfg-if 1.0.0", "instant", "libc", - "redox_syscall 0.2.5", - "smallvec 1.6.1", + "redox_syscall 0.2.10", + "smallvec 1.7.0", "winapi 0.3.9", ] @@ -1745,9 +1765,9 @@ checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" [[package]] name = "ppv-lite86" -version = "0.2.10" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857" +checksum = "eb9f9e6e233e5c4a35559a617bf40a4ec447db2e84c20b55a6f83167b7e57872" [[package]] name = "pq-sys" @@ -1775,11 +1795,11 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.24" +version = "1.0.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e0704ee1a7e00d7bb417d0770ea303c1bccbabf0ef1667dae92b5967f5f8a71" +checksum = "c7342d5883fbccae1cc37a2353b09c87c9b0f3afd73f5fb9bba687a1f733b029" dependencies = [ - "unicode-xid 0.2.1", + "unicode-xid 0.2.2", ] [[package]] @@ -1799,11 +1819,11 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.9" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3d0b9745dc2debf507c8422de05d7226cc1f0644216dfdfead988f9b1ab32a7" +checksum = "47aa80447ce4daf1717500037052af176af5d38cc3e571d9ec1c7353fc10c87d" dependencies = [ - "proc-macro2 1.0.24", + "proc-macro2 1.0.36", ] [[package]] @@ -1813,7 +1833,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "545c5bc2b880973c9c10e4067418407a0ccaa3091781d1671d46eb35107cb26f" dependencies = [ "log", - "parking_lot 0.11.1", + "parking_lot 0.11.2", "scheduled-thread-pool", ] @@ -1864,14 +1884,14 @@ dependencies = [ [[package]] name = "rand" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ef9e7e66b4468674bfcb0c81af8b7fa0bb154fa9f28eb840da5c447baeb8d7e" +checksum = "2e7573632e6454cf6b99d7aac4ccca54be06da05aca2ef7423d22d27d4d4bcd8" dependencies = [ "libc", - "rand_chacha 0.3.0", - "rand_core 0.6.2", - "rand_hc 0.3.0", + "rand_chacha 0.3.1", + "rand_core 0.6.3", + "rand_hc 0.3.1", ] [[package]] @@ -1896,12 +1916,12 @@ dependencies = [ [[package]] name = "rand_chacha" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e12735cf05c9e10bf21534da50a147b924d555dc7a547c42e6bb2d5b6017ae0d" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core 0.6.2", + "rand_core 0.6.3", ] [[package]] @@ -1930,11 +1950,11 @@ dependencies = [ [[package]] name = "rand_core" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34cf66eb183df1c5876e2dcf6b13d57340741e8dc255b48e40a26de954d06ae7" +checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" dependencies = [ - "getrandom 0.2.2", + "getrandom 0.2.3", ] [[package]] @@ -1957,11 +1977,11 @@ dependencies = [ [[package]] name = "rand_hc" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3190ef7066a446f2e7f42e239d161e905420ccab01eb967c9eb27d21b2322a73" +checksum = "d51e9f596de227fda2ea6c84607f5558e196eeaf43c986b724ba4fb8fdf497e7" dependencies = [ - "rand_core 0.6.2", + "rand_core 0.6.3", ] [[package]] @@ -2034,30 +2054,29 @@ checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce" [[package]] name = "redox_syscall" -version = "0.2.5" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94341e4e44e24f6b591b59e47a8a027df12e008d73fd5672dbea9cc22f4507d9" +checksum = "8383f39639269cde97d255a32bdb68c047337295414940c68bdd30c2e13203ff" dependencies = [ "bitflags", ] [[package]] name = "regex" -version = "1.4.3" +version = "1.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9251239e129e16308e70d853559389de218ac275b515068abc96829d05b948a" +checksum = "d07a8629359eb56f1e2fb1652bb04212c072a87ba68546a04065d525673ac461" dependencies = [ "aho-corasick", "memchr", "regex-syntax", - "thread_local", ] [[package]] name = "regex-syntax" -version = "0.6.22" +version = "0.6.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5eb417147ba9860a96cfe72a0b93bf88fee1744b5636ec99ab20c1aa9376581" +checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b" [[package]] name = "remove_dir_all" @@ -2095,9 +2114,9 @@ dependencies = [ [[package]] name = "rustc-demangle" -version = "0.1.18" +version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e3bad0ee36814ca07d7968269dd4b7ec89ec2da10c4bb613928d3077083c232" +checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342" [[package]] name = "rustc_version" @@ -2110,9 +2129,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.5" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e" +checksum = "73b4b750c782965c211b42f022f59af1fbceabdd026623714f104152f1ec149f" [[package]] name = "same-file" @@ -2129,7 +2148,7 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc6f74fd1204073fa02d5d5d68bec8021be4c38690b61264b2fdb48083d0e7d7" dependencies = [ - "parking_lot 0.11.1", + "parking_lot 0.11.2", ] [[package]] @@ -2161,31 +2180,31 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.123" +version = "1.0.133" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92d5161132722baa40d802cc70b15262b98258453e85e5d1d365c757c73869ae" +checksum = "97565067517b60e2d1ea8b268e59ce036de907ac523ad83a0475da04e818989a" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.123" +version = "1.0.133" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9391c295d64fc0abb2c556bad848f33cb8296276b1ad2677d1ae1ace4f258f31" +checksum = "ed201699328568d8d08208fdd080e3ff594e6c422e438b6705905da01005d537" dependencies = [ - "proc-macro2 1.0.24", - "quote 1.0.9", - "syn 1.0.60", + "proc-macro2 1.0.36", + "quote 1.0.14", + "syn 1.0.85", ] [[package]] name = "serde_json" -version = "1.0.64" +version = "1.0.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "799e97dc9fdae36a5c8b8f2cae9ce2ee9fdce2058c57a93e6099d919fd982f79" +checksum = "ee2bb9cd061c5865d345bb02ca49fcef1391741b672b54a0bf7b679badec3142" dependencies = [ - "itoa", + "itoa 1.0.1", "ryu", "serde", ] @@ -2197,9 +2216,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ec5d77e2d4c73717816afac02670d5c4f534ea95ed430442cad02e7a6e32c97" dependencies = [ "dtoa", - "itoa", + "itoa 0.4.8", "serde", - "url 2.2.1", + "url 2.2.2", ] [[package]] @@ -2210,9 +2229,9 @@ checksum = "2579985fda508104f7587689507983eadd6a6e84dd35d6d115361f530916fa0d" [[package]] name = "signal-hook-registry" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16f1d0fef1604ba8f7a073c7e701f213e056707210e9020af4528e0101ce11a6" +checksum = "e51e73328dc4ac0c7ccbda3a494dfa03df1de2f46018127f60c693f2648455b0" dependencies = [ "libc", ] @@ -2230,9 +2249,9 @@ dependencies = [ [[package]] name = "slab" -version = "0.4.2" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" +checksum = "9def91fd1e018fe007022791f865d0ccc9b3a0d5001e01aabb8b40e46000afb5" [[package]] name = "smallvec" @@ -2245,9 +2264,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.6.1" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe0f37c9e8f3c5a4a66ad655a93c74daac4ad00c441533bf5c6e7990bb42604e" +checksum = "1ecab6c735a6bb4139c0caafd0cc3635748bbb3acf4550e8138122099251f309" [[package]] name = "socket2" @@ -2294,37 +2313,37 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.60" +version = "1.0.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c700597eca8a5a762beb35753ef6b94df201c81cca676604f547495a0d7f0081" +checksum = "a684ac3dcd8913827e18cd09a68384ee66c1de24157e3c556c9ab16d85695fb7" dependencies = [ - "proc-macro2 1.0.24", - "quote 1.0.9", - "unicode-xid 0.2.1", + "proc-macro2 1.0.36", + "quote 1.0.14", + "unicode-xid 0.2.2", ] [[package]] name = "synstructure" -version = "0.12.4" +version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b834f2d66f734cb897113e34aaff2f1ab4719ca946f9a7358dba8f8064148701" +checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" dependencies = [ - "proc-macro2 1.0.24", - "quote 1.0.9", - "syn 1.0.60", - "unicode-xid 0.2.1", + "proc-macro2 1.0.36", + "quote 1.0.14", + "syn 1.0.85", + "unicode-xid 0.2.2", ] [[package]] name = "tempfile" -version = "3.2.0" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22" +checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4" dependencies = [ "cfg-if 1.0.0", + "fastrand", "libc", - "rand 0.8.3", - "redox_syscall 0.2.5", + "redox_syscall 0.2.10", "remove_dir_all", "winapi 0.3.9", ] @@ -2338,15 +2357,6 @@ dependencies = [ "winapi-util", ] -[[package]] -name = "thread_local" -version = "1.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8018d24e04c95ac8790716a5987d0fec4f8b27249ffa0f7d33f1369bdfb88cbd" -dependencies = [ - "once_cell", -] - [[package]] name = "threadpool" version = "1.8.1" @@ -2378,9 +2388,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.1.1" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "317cca572a0e89c3ce0ca1f1bdc9369547fe318a683418e42ac8f59d14701023" +checksum = "2c1c1d5a42b6245520c249549ec267180beaffcc0615401ac8e31853d4b6d8d2" dependencies = [ "tinyvec_macros", ] @@ -2740,9 +2750,9 @@ dependencies = [ [[package]] name = "twoway" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b40075910de3a912adbd80b5d8bad6ad10a23eeb1f5bf9d4006839e899ba5bc" +checksum = "c57ffb460d7c24cd6eda43694110189030a3d1dfe418416d9468fd1c1d290b47" dependencies = [ "memchr", "unchecked-index", @@ -2760,23 +2770,20 @@ version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" dependencies = [ - "version_check 0.9.2", + "version_check 0.9.4", ] [[package]] name = "unicode-bidi" -version = "0.3.4" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49f2bd0c6468a8230e1db229cff8029217cf623c767ea5d60bfbd42729ea54d5" -dependencies = [ - "matches", -] +checksum = "1a01404663e3db436ed2746d9fefef640d868edae3cceb81c3b8d5732fda678f" [[package]] name = "unicode-normalization" -version = "0.1.17" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07fbfce1c8a97d547e8b5334978438d9d6ec8c20e38f56d4a4374d181493eaef" +checksum = "d54590932941a9e9266f0832deed84ebe1bf2e4c9e4a3554d393d18f5e854bf9" dependencies = [ "tinyvec", ] @@ -2789,9 +2796,9 @@ checksum = "fc72304796d0818e357ead4e000d19c9c174ab23dc11093ac919054d20a6a7fc" [[package]] name = "unicode-xid" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564" +checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" [[package]] name = "untrusted" @@ -2812,12 +2819,12 @@ dependencies = [ [[package]] name = "url" -version = "2.2.1" +version = "2.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ccd964113622c8e9322cfac19eb1004a07e636c545f325da085d5cdde6f1f8b" +checksum = "a507c383b2d33b5fc35d1861e77e6b383d158b2da5e14fe51b83dfedf6fd578c" dependencies = [ "form_urlencoded", - "idna 0.2.2", + "idna 0.2.3", "matches", "percent-encoding 2.1.0", ] @@ -2847,9 +2854,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2ca2a14bc3fc5b64d188b087a7d3a927df87b152e941ccfbc66672e20c467ae" dependencies = [ "nom", - "proc-macro2 1.0.24", - "quote 1.0.9", - "syn 1.0.60", + "proc-macro2 1.0.36", + "quote 1.0.14", + "syn 1.0.85", ] [[package]] @@ -2864,9 +2871,9 @@ dependencies = [ [[package]] name = "vcpkg" -version = "0.2.11" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b00bca6106a5e23f3eee943593759b7fcddb00554332e856d990c893966879fb" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" [[package]] name = "version_check" @@ -2876,15 +2883,15 @@ checksum = "914b1a6776c4c929a602fafd8bc742e06365d4bcbe48c30f9cca5824f70dc9dd" [[package]] name = "version_check" -version = "0.9.2" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5a972e5669d67ba988ce3dc826706fb0a8b01471c088cb0b6110b805cc36aed" +checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] name = "walkdir" -version = "2.3.1" +version = "2.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "777182bc735b6424e1a57516d35ed72cb8019d85c8c9bf536dccb3445c1a2f7d" +checksum = "808cf2735cd4b6866113f648b791c6adc5714537bc222d9347bb203386ffda56" dependencies = [ "same-file", "winapi 0.3.9", @@ -2905,9 +2912,9 @@ checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" [[package]] name = "wasm-bindgen" -version = "0.2.71" +version = "0.2.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ee1280240b7c461d6a0071313e08f34a60b0365f14260362e5a2b17d1d31aa7" +checksum = "632f73e236b219150ea279196e54e610f5dbafa5d61786303d4da54f84e47fce" dependencies = [ "cfg-if 1.0.0", "wasm-bindgen-macro", @@ -2915,53 +2922,53 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.71" +version = "0.2.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b7d8b6942b8bb3a9b0e73fc79b98095a27de6fa247615e59d096754a3bc2aa8" +checksum = "a317bf8f9fba2476b4b2c85ef4c4af8ff39c3c7f0cdfeed4f82c34a880aa837b" dependencies = [ "bumpalo", "lazy_static", "log", - "proc-macro2 1.0.24", - "quote 1.0.9", - "syn 1.0.60", + "proc-macro2 1.0.36", + "quote 1.0.14", + "syn 1.0.85", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-macro" -version = "0.2.71" +version = "0.2.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5ac38da8ef716661f0f36c0d8320b89028efe10c7c0afde65baffb496ce0d3b" +checksum = "d56146e7c495528bf6587663bea13a8eb588d39b36b679d83972e1a2dbbdacf9" dependencies = [ - "quote 1.0.9", + "quote 1.0.14", "wasm-bindgen-macro-support", ] [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.71" +version = "0.2.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc053ec74d454df287b9374ee8abb36ffd5acb95ba87da3ba5b7d3fe20eb401e" +checksum = "7803e0eea25835f8abdc585cd3021b3deb11543c6fe226dcd30b228857c5c5ab" dependencies = [ - "proc-macro2 1.0.24", - "quote 1.0.9", - "syn 1.0.60", + "proc-macro2 1.0.36", + "quote 1.0.14", + "syn 1.0.85", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.71" +version = "0.2.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d6f8ec44822dd71f5f221a5847fb34acd9060535c1211b70a05844c0f6383b1" +checksum = "0237232789cf037d5480773fe568aac745bfe2afbc11a863e97901780a6b47cc" [[package]] name = "web-sys" -version = "0.3.48" +version = "0.3.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec600b26223b2948cedfde2a0aa6756dcf1fef616f43d7b3097aaf53a6c4d92b" +checksum = "38eb105f1c59d9eaa6b5cdc92b859d85b926e82cb2e0945cd0c9259faa6fe9fb" dependencies = [ "js-sys", "wasm-bindgen", diff --git a/Cargo.toml b/Cargo.toml index d14c4b2..31750fb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -4,6 +4,7 @@ version = "0.3.7" authors = ["Alexander Larsson "] license = "MIT OR Apache-2.0" default-run = "flat-manager" +edition = "2021" [lib] name = "flatmanager" @@ -37,7 +38,7 @@ futures = "0.1" futures-fs = "0.0" futures-locks = "0.3" hex = "0.4" -jsonwebtoken = "7" +jwt = {package = "jsonwebtoken", version = "7"} libc = "0.2" log = "0.4" mpart-async = "0.2" diff --git a/src/api.rs b/src/api.rs index 6a3f280..e7fff2c 100644 --- a/src/api.rs +++ b/src/api.rs @@ -1,13 +1,16 @@ use actix::prelude::*; -use actix_web::{error, http}; -use actix_web::{HttpRequest, HttpResponse, Result, ResponseError, web}; -use actix_web::web::{Json, Data, Path}; -use actix_web_actors::ws; use actix_multipart::Multipart; use actix_web::middleware::BodyEncoding; +use actix_web::web::{Data, Json, Path}; +use actix_web::{error, http}; +use actix_web::{web, HttpRequest, HttpResponse, ResponseError, Result}; +use actix_web_actors::ws; +use chrono::Utc; use futures::future; -use futures::future::{Future}; +use futures::future::Future; +use log::warn; +use serde::{Deserialize, Serialize}; use std::cell::RefCell; use std::clone::Clone; use std::env; @@ -20,57 +23,77 @@ use std::path; use std::rc::Rc; use std::sync::Arc; use tempfile::NamedTempFile; -use chrono::{Utc}; -use jwt; -use serde::Serialize; - -use app::{Claims,Config}; -use errors::ApiError; -use db::*; -use models::{Job,JobStatus, JobKind,NewBuild,NewBuildRef}; -use tokens::{self, ClaimsValidator}; -use jobs::{ProcessJobs, JobQueue}; + +use crate::app::{Claims, Config}; +use crate::db::*; +use crate::deltas::{DeltaGenerator, RemoteWorker}; +use crate::errors::ApiError; +use crate::jobs::{JobQueue, ProcessJobs}; +use crate::models::{Job, JobKind, JobStatus, NewBuild, NewBuildRef}; +use crate::tokens::{self, ClaimsValidator}; use askama::Template; -use deltas::{DeltaGenerator,RemoteWorker}; -fn init_ostree_repo(repo_path: &path::PathBuf, parent_repo_path: &path::PathBuf, build_id: i32, opt_collection_id: &Option) -> io::Result<()> { +fn init_ostree_repo( + repo_path: &path::Path, + parent_repo_path: &path::Path, + build_id: i32, + opt_collection_id: &Option, +) -> io::Result<()> { let parent_repo_absolute_path = env::current_dir()?.join(parent_repo_path); - for &d in ["extensions", - "objects", - "refs/heads", - "refs/mirrors", - "refs/remotes", - "state", - "tmp/cache"].iter() { + for &d in [ + "extensions", + "objects", + "refs/heads", + "refs/mirrors", + "refs/remotes", + "state", + "tmp/cache", + ] + .iter() + { fs::create_dir_all(repo_path.join(d))?; } unix::fs::symlink(&parent_repo_absolute_path, repo_path.join("parent"))?; let mut file = fs::File::create(repo_path.join("config"))?; - file.write_all(format!( -r#"[core] + file.write_all( + format!( + r#"[core] repo_version=1 mode=archive-z2 min-free-space-size=500MB {}parent={}"#, - match opt_collection_id { - Some(collection_id) => format!("collection-id={}.Build{}\n", collection_id, build_id), - _ => "".to_string(), - }, - parent_repo_absolute_path.display()).as_bytes())?; + match opt_collection_id { + Some(collection_id) => + format!("collection-id={}.Build{}\n", collection_id, build_id), + _ => "".to_string(), + }, + parent_repo_absolute_path.display() + ) + .as_bytes(), + )?; Ok(()) } -fn respond_with_url(data: &T, req: &HttpRequest, name: &str, elements: &[String]) -> Result where +fn respond_with_url( + data: &T, + req: &HttpRequest, + name: &str, + elements: &[String], +) -> Result +where T: Serialize, { - match req.url_for(name, elements.clone()) { + match req.url_for(name, elements) { Ok(url) => Ok(HttpResponse::Ok() - .header(http::header::LOCATION, url.to_string()) - .json(data)), - Err(e) => Err(ApiError::InternalServerError(format!("Can't get url for {} {:?}: {}", name, elements, e.to_string()))), + .header(http::header::LOCATION, url.to_string()) + .json(data)), + Err(e) => Err(ApiError::InternalServerError(format!( + "Can't get url for {} {:?}: {}", + name, elements, e + ))), } } @@ -89,16 +112,23 @@ pub struct TokenSubsetResponse { token: String, } -pub fn repos_is_subset(maybe_subset_repos: &Option>, claimed_repos: &Vec) -> bool { - match maybe_subset_repos { - Some(subset_repos) => subset_repos.iter().all(|subset_repo| tokens::repo_matches_one_claimed(subset_repo, claimed_repos)), +pub fn repos_is_subset(maybe_subset_repos: &Option>, claimed_repos: &[String]) -> bool { + match maybe_subset_repos { + Some(subset_repos) => subset_repos + .iter() + .all(|subset_repo| tokens::repo_matches_one_claimed(subset_repo, claimed_repos)), None => true, } } -pub fn prefix_is_subset(maybe_subset_prefix: &Option>, claimed_prefixes: &Vec) -> bool { - match maybe_subset_prefix { - Some(subset_prefix) => subset_prefix.iter().all(|s| tokens::id_matches_one_prefix(s, &claimed_prefixes)), +pub fn prefix_is_subset( + maybe_subset_prefix: &Option>, + claimed_prefixes: &[String], +) -> bool { + match maybe_subset_prefix { + Some(subset_prefix) => subset_prefix + .iter() + .all(|s| tokens::id_matches_one_prefix(s, claimed_prefixes)), None => true, } } @@ -106,33 +136,52 @@ pub fn prefix_is_subset(maybe_subset_prefix: &Option>, claimed_prefi pub fn token_subset( args: Json, config: Data, - req: HttpRequest + req: HttpRequest, ) -> HttpResponse { if let Some(claims) = req.get_claims() { - let new_exp = Utc::now().timestamp().saturating_add(i64::max(args.duration, 0)); - if new_exp <= claims.exp && - tokens::sub_has_prefix (&args.sub, &claims.sub) && - args.scope.iter().all(|s| claims.scope.contains(s)) && - prefix_is_subset(&args.prefixes, &claims.prefixes) && - repos_is_subset(&args.repos, &claims.repos) { - let new_claims = Claims { - sub: args.sub.clone(), - scope: args.scope.clone(), - name: Some(claims.name.unwrap_or("".to_string()) + "/" + &args.name), - prefixes: { if let Some(ref prefixes) = args.prefixes { prefixes.clone() } else { claims.prefixes.clone() } }, - repos: { if let Some(ref repos) = args.repos { repos.clone() } else { claims.repos.clone() } }, - exp: new_exp, - }; - return match jwt::encode(&jwt::Header::default(), &new_claims, &jwt::EncodingKey::from_secret(config.secret.as_ref())) { - Ok(token) => HttpResponse::Ok().json(TokenSubsetResponse{ token: token }), - Err(e) => ApiError::InternalServerError(e.to_string()).error_response() - } - } + let new_exp = Utc::now() + .timestamp() + .saturating_add(i64::max(args.duration, 0)); + if new_exp <= claims.exp + && tokens::sub_has_prefix(&args.sub, &claims.sub) + && args.scope.iter().all(|s| claims.scope.contains(s)) + && prefix_is_subset(&args.prefixes, &claims.prefixes) + && repos_is_subset(&args.repos, &claims.repos) + { + let new_claims = Claims { + sub: args.sub.clone(), + scope: args.scope.clone(), + name: Some(claims.name.unwrap_or_else(|| "".to_string()) + "/" + &args.name), + prefixes: { + if let Some(ref prefixes) = args.prefixes { + prefixes.clone() + } else { + claims.prefixes.clone() + } + }, + repos: { + if let Some(ref repos) = args.repos { + repos.clone() + } else { + claims.repos + } + }, + exp: new_exp, + }; + return match jwt::encode( + &jwt::Header::default(), + &new_claims, + &jwt::EncodingKey::from_secret(config.secret.as_ref()), + ) { + Ok(token) => HttpResponse::Ok().json(TokenSubsetResponse { token }), + Err(e) => ApiError::InternalServerError(e.to_string()).error_response(), + }; + } }; ApiError::NotEnoughPermissions("No token presented".to_string()).error_response() } -#[derive(Deserialize,Debug)] +#[derive(Deserialize, Debug)] pub struct JobPathParams { id: i32, } @@ -150,55 +199,58 @@ pub fn get_job( req: HttpRequest, ) -> impl Future { futures::done(req.has_token_claims("build", "jobs")) - .and_then(move |_| db.lookup_job(params.id, args.log_offset)) + .and_then(move |_| db.lookup_job(params.id, args.log_offset)) .and_then(|job| Ok(HttpResponse::Ok().json(job))) } #[derive(Debug, Serialize, Deserialize)] pub struct CreateBuildArgs { - repo: String + repo: String, } pub fn create_build( args: Json, db: Data, config: Data, - req: HttpRequest -) -> impl Future { + req: HttpRequest, +) -> impl Future { let repo1 = args.repo.clone(); let repo2 = args.repo.clone(); - futures::done(req.has_token_claims("build", "build")) - .and_then(move |_| futures::done(req.has_token_repo(&repo1)) - .and_then(move |_| futures::done(config.get_repoconfig(&repo2).map(|rc| rc.clone())) // Ensure the repo exists - .and_then(move |repoconfig| { - db - .new_build ( - NewBuild { - repo: args.repo.clone(), - }) - .and_then(move |build| { - let build_repo_path = config.build_repo_base.join(build.id.to_string()); - let upload_path = build_repo_path.join("upload"); - - init_ostree_repo (&build_repo_path, &repoconfig.path, build.id, &repoconfig.collection_id)?; - init_ostree_repo (&upload_path, &repoconfig.path, build.id, &None)?; - - respond_with_url(&build, &req, "show_build", &[build.id.to_string()]) - }) - }) - )) + futures::done(req.has_token_claims("build", "build")).and_then(move |_| { + futures::done(req.has_token_repo(&repo1)).and_then(move |_| { + futures::done(config.get_repoconfig(&repo2).map(|rc| rc.clone())) // Ensure the repo exists + .and_then(move |repoconfig| { + db.new_build(NewBuild { + repo: args.repo.clone(), + }) + .and_then(move |build| { + let build_repo_path = config.build_repo_base.join(build.id.to_string()); + let upload_path = build_repo_path.join("upload"); + + init_ostree_repo( + &build_repo_path, + &repoconfig.path, + build.id, + &repoconfig.collection_id, + )?; + init_ostree_repo(&upload_path, &repoconfig.path, build.id, &None)?; + + respond_with_url(&build, &req, "show_build", &[build.id.to_string()]) + }) + }) + }) + }) } pub fn builds( db: Data, - req: HttpRequest + req: HttpRequest, ) -> impl Future { futures::done(req.has_token_claims("build", "build")) .and_then(move |_| db.list_builds()) .and_then(move |builds| Ok(HttpResponse::Ok().json(builds))) } - #[derive(Deserialize)] pub struct BuildPathParams { id: i32, @@ -209,11 +261,13 @@ pub fn get_build( db: Data, req: HttpRequest, ) -> impl Future { - futures::done(req.has_token_claims(&format!("build/{}", params.id), "build") - /* We allow getting a build for uploaders too, as it is similar info, and useful */ - .or_else(|_| req.has_token_claims(&format!("build/{}", params.id), "upload"))) - .and_then(move |_| db.lookup_build(params.id)) - .and_then(|build| Ok(HttpResponse::Ok().json(build))) + futures::done( + req.has_token_claims(&format!("build/{}", params.id), "build") + /* We allow getting a build for uploaders too, as it is similar info, and useful */ + .or_else(|_| req.has_token_claims(&format!("build/{}", params.id), "upload")), + ) + .and_then(move |_| db.lookup_build(params.id)) + .and_then(|build| Ok(HttpResponse::Ok().json(build))) } #[derive(Deserialize)] @@ -234,24 +288,29 @@ pub fn get_build_ref( #[derive(Debug, Serialize, Deserialize)] pub struct MissingObjectsArgs { - wanted: Vec + wanted: Vec, } #[derive(Debug, Serialize, Deserialize)] pub struct MissingObjectsResponse { - missing: Vec + missing: Vec, } -fn has_object (build_id: i32, - object: &str, - config: &Data) -> bool -{ +fn has_object(build_id: i32, object: &str, config: &Data) -> bool { let subpath: path::PathBuf = ["objects", &object[..2], &object[2..]].iter().collect(); - let build_path = config.build_repo_base.join(build_id.to_string()).join("upload").join(&subpath); + let build_path = config + .build_repo_base + .join(build_id.to_string()) + .join("upload") + .join(&subpath); if build_path.exists() { true } else { - let parent_path = config.build_repo_base.join(build_id.to_string()).join("parent").join(&subpath); + let parent_path = config + .build_repo_base + .join(build_id.to_string()) + .join("parent") + .join(&subpath); parent_path.exists() } } @@ -267,67 +326,82 @@ pub fn missing_objects( } let mut missing = vec![]; for object in &args.wanted { - if ! has_object (params.id, object, &config) { + if !has_object(params.id, object, &config) { missing.push(object.to_string()); } } HttpResponse::Ok() .encoding(http::header::ContentEncoding::Gzip) - .json(MissingObjectsResponse { missing: missing }) + .json(MissingObjectsResponse { missing }) } -fn validate_ref (ref_name: &String, req: &HttpRequest) -> Result<(),ApiError> -{ +fn validate_ref(ref_name: &str, req: &HttpRequest) -> Result<(), ApiError> { let ref_parts: Vec<&str> = ref_name.split('/').collect(); match ref_parts[0] { "screenshots" => { if ref_parts.len() != 2 { - return Err(ApiError::BadRequest(format!("Invalid ref_name {}", ref_name))) + return Err(ApiError::BadRequest(format!( + "Invalid ref_name {}", + ref_name + ))); } Ok(()) - }, + } "app" | "runtime" => { if ref_parts.len() != 4 { - return Err(ApiError::BadRequest(format!("Invalid ref_name {}", ref_name))) + return Err(ApiError::BadRequest(format!( + "Invalid ref_name {}", + ref_name + ))); } req.has_token_prefix(ref_parts[1]) - }, - _ => Err(ApiError::BadRequest(format!("Invalid ref_name {}", ref_name))), + } + _ => Err(ApiError::BadRequest(format!( + "Invalid ref_name {}", + ref_name + ))), } } #[derive(Debug, Serialize, Deserialize)] pub struct CreateBuildRefArgs { - #[serde(rename = "ref")] ref_name: String, + #[serde(rename = "ref")] + ref_name: String, commit: String, } -pub fn create_build_ref ( +pub fn create_build_ref( args: Json, params: Path, db: Data, req: HttpRequest, ) -> impl Future { - futures::done(req.has_token_claims(&format!("build/{}", params.id), "upload") - .and_then(|_| validate_ref(&args.ref_name, &req))) - .and_then(move |_| { - let build_id = params.id; - db - .lookup_build(params.id) - .and_then (move |build| futures::done(req.has_token_repo(&build.repo)) - .and_then (move |_ok| { - db.new_build_ref ( - NewBuildRef { - build_id: build_id, - ref_name: args.ref_name.clone(), - commit: args.commit.clone(), - }) - }) - .and_then(move |buildref| respond_with_url(&buildref, &req, "show_build_ref", - &[params.id.to_string(), buildref.id.to_string()])) - ) + futures::done( + req.has_token_claims(&format!("build/{}", params.id), "upload") + .and_then(|_| validate_ref(&args.ref_name, &req)), + ) + .and_then(move |_| { + let build_id = params.id; + db.lookup_build(params.id).and_then(move |build| { + futures::done(req.has_token_repo(&build.repo)) + .and_then(move |_ok| { + db.new_build_ref(NewBuildRef { + build_id, + ref_name: args.ref_name.clone(), + commit: args.commit.clone(), + }) + }) + .and_then(move |buildref| { + respond_with_url( + &buildref, + &req, + "show_build_ref", + &[params.id.to_string(), buildref.id.to_string()], + ) + }) }) + }) } #[derive(Debug, Serialize, Deserialize)] @@ -335,16 +409,18 @@ pub struct AddExtraIdsArgs { ids: Vec, } -fn validate_id (id: &String) -> Result<(),ApiError> -{ - if !id.split('.').all(|element| element.len() > 0 && element.chars().all(|ch| ch.is_alphanumeric())) { +fn validate_id(id: &str) -> Result<(), ApiError> { + if !id + .split('.') + .all(|element| !element.is_empty() && element.chars().all(|ch| ch.is_alphanumeric())) + { Err(ApiError::BadRequest(format!("Invalid extra id {}", id))) } else { Ok(()) } } -pub fn add_extra_ids ( +pub fn add_extra_ids( args: Json, params: Path, db: Data, @@ -352,46 +428,46 @@ pub fn add_extra_ids ( ) -> impl Future { let ids = args.ids.clone(); futures::done(req.has_token_claims(&format!("build/{}", params.id), "upload")) - .and_then (move |_| ids.iter().try_for_each(|id| validate_id(id))) + .and_then(move |_| ids.iter().try_for_each(|id| validate_id(id))) .and_then(move |_| { let req2 = req.clone(); let build_id = params.id; - db - .lookup_build(params.id) - .and_then (move |build| { + db.lookup_build(params.id) + .and_then(move |build| { /* Validate token */ req2.has_token_repo(&build.repo) }) - .and_then (move |_ok| db.add_extra_ids(build_id, args.ids.clone())) - .and_then(move |build| respond_with_url(&build, &req, "show_build", - &[build_id.to_string()])) + .and_then(move |_ok| db.add_extra_ids(build_id, args.ids.clone())) + .and_then(move |build| { + respond_with_url(&build, &req, "show_build", &[build_id.to_string()]) + }) }) } - fn is_all_lower_hexdigits(s: &str) -> bool { - !s.contains(|c: char| !(c.is_digit(16) && !c.is_uppercase())) + !s.contains(|c: char| !c.is_digit(16) || c.is_uppercase()) } fn filename_parse_object(filename: &str) -> Option { - let v: Vec<&str> = filename.split(".").collect(); + let v: Vec<&str> = filename.split('.').collect(); if v.len() != 2 { - return None + return None; } if v[0].len() != 64 || !is_all_lower_hexdigits(v[0]) { - return None + return None; } - if v[1] != "dirmeta" && - v[1] != "dirtree" && - v[1] != "filez" && - v[1] != "commit" { - return None - } + if v[1] != "dirmeta" && v[1] != "dirtree" && v[1] != "filez" && v[1] != "commit" { + return None; + } - Some(path::Path::new("objects").join(&filename[..2]).join(&filename[2..])) + Some( + path::Path::new("objects") + .join(&filename[..2]) + .join(&filename[2..]), + ) } fn is_all_digits(s: &str) -> bool { @@ -406,37 +482,42 @@ fn is_all_digits(s: &str) -> bool { * sdm_iU8hHZYwDpmzYBAP6cJQ5MX5VLxoGF+j+Q1OGPQ.0.delta */ fn filename_parse_delta(name: &str) -> Option { - let v: Vec<&str> = name.split(".").collect(); + let v: Vec<&str> = name.split('.').collect(); if v.len() != 3 { - return None + return None; } if v[2] != "delta" { - return None + return None; } if v[1] != "superblock" && !is_all_digits(v[1]) { - return None + return None; } - if !(v[0].len() == 43 || - (v[0].len() == 87 && v[0].chars().nth(43) == Some('-'))) { - return None + if !(v[0].len() == 43 || (v[0].len() == 87 && v[0].chars().nth(43) == Some('-'))) { + return None; } - Some(path::Path::new("deltas") - .join(&v[0][..2]) - .join(&v[0][2..]) - .join(&v[1])) + Some( + path::Path::new("deltas") + .join(&v[0][..2]) + .join(&v[0][2..]) + .join(&v[1]), + ) } -fn get_upload_subpath(field: &actix_multipart::Field, - state: &Arc) -> error::Result { - let cd = field.content_disposition().ok_or( - ApiError::BadRequest("No content disposition for multipart item".to_string()))?; - let filename = cd.get_filename().ok_or( - ApiError::BadRequest("No filename for multipart item".to_string()))?; +fn get_upload_subpath( + field: &actix_multipart::Field, + state: &Arc, +) -> error::Result { + let cd = field.content_disposition().ok_or_else(|| { + ApiError::BadRequest("No content disposition for multipart item".to_string()) + })?; + let filename = cd + .get_filename() + .ok_or_else(|| ApiError::BadRequest("No filename for multipart item".to_string()))?; // We verify the format below, but just to make sure we never allow anything like a path if filename.contains('/') { return Err(ApiError::BadRequest("Invalid upload filename".to_string())); @@ -444,12 +525,12 @@ fn get_upload_subpath(field: &actix_multipart::Field, if !state.only_deltas { if let Some(path) = filename_parse_object(filename) { - return Ok(path) + return Ok(path); } } if let Some(path) = filename_parse_delta(filename) { - return Ok(path) + return Ok(path); } Err(ApiError::BadRequest("Invalid upload filename".to_string())) @@ -461,10 +542,9 @@ struct UploadState { } fn start_save( - subpath: &path::PathBuf, + subpath: &path::Path, state: &Arc, -) -> Result<(NamedTempFile,path::PathBuf)> { - +) -> Result<(NamedTempFile, path::PathBuf)> { let absolute_path = state.repo_path.join(subpath); if let Some(parent) = absolute_path.parent() { @@ -480,14 +560,14 @@ fn start_save( fn save_file( field: actix_multipart::Field, - state: &Arc + state: &Arc, ) -> Box> { - let repo_subpath = match get_upload_subpath (&field, state) { + let repo_subpath = match get_upload_subpath(&field, state) { Ok(subpath) => subpath, Err(e) => return Box::new(future::err(e)), }; - let (named_file, object_file) = match start_save (&repo_subpath, state) { + let (named_file, object_file) = match start_save(&repo_subpath, state) { Ok((named_file, object_file)) => (named_file, object_file), Err(e) => return Box::new(future::err(ApiError::InternalServerError(e.to_string()))), }; @@ -498,7 +578,8 @@ fn save_file( Box::new( field .fold(0i64, move |acc, bytes| { - let rt = shared_file.borrow_mut() + let rt = shared_file + .borrow_mut() .write_all(bytes.as_ref()) .map(|_| acc + bytes.len() as i64) .map_err(|e| { @@ -506,10 +587,8 @@ fn save_file( }); future::result(rt) }) - .map_err(|e| { - ApiError::InternalServerError(e.to_string()) - }) - .and_then (move |res| { + .map_err(|e| ApiError::InternalServerError(e.to_string())) + .and_then(move |res| { // persist consumes the named file, so we need to // completely move it out of the shared Rc+RefCell let named_file = Rc::try_unwrap(shared_file2).unwrap().into_inner(); @@ -525,8 +604,8 @@ fn save_file( warn!("Can't get permissions on uploaded file"); }; future::result(Ok(res)) - }, - Err(e) => future::err(ApiError::InternalServerError(e.to_string())) + } + Err(e) => future::err(ApiError::InternalServerError(e.to_string())), } }), ) @@ -539,28 +618,29 @@ pub fn upload( db: Data, config: Data, ) -> impl Future { - futures::done(req.has_token_claims(&format!("build/{}", params.id), "upload")) - .and_then(move |_| { + futures::done(req.has_token_claims(&format!("build/{}", params.id), "upload")).and_then( + move |_| { let uploadstate = Arc::new(UploadState { only_deltas: false, - repo_path: config.build_repo_base.join(params.id.to_string()).join("upload") + repo_path: config + .build_repo_base + .join(params.id.to_string()) + .join("upload"), }); let req2 = req.clone(); - db - .lookup_build(params.id) - .and_then (move |build| req2.has_token_repo(&build.repo)) - .and_then (move |_ok| { + db.lookup_build(params.id) + .and_then(move |build| req2.has_token_repo(&build.repo)) + .and_then(move |_ok| { multipart .map_err(|e| ApiError::InternalServerError(e.to_string())) - .map(move |field| { - save_file(field, &uploadstate).into_stream() - }) + .map(move |field| save_file(field, &uploadstate).into_stream()) .flatten() .collect() .map(|sizes| HttpResponse::Ok().json(sizes)) .from_err() }) - }) + }, + ) } pub fn get_commit_job( @@ -570,7 +650,7 @@ pub fn get_commit_job( req: HttpRequest, ) -> impl Future { futures::done(req.has_token_claims(&format!("build/{}", params.id), "build")) - .and_then(move |_| db.lookup_commit_job(params.id, args.log_offset)) + .and_then(move |_| db.lookup_commit_job(params.id, args.log_offset)) .and_then(|job| Ok(HttpResponse::Ok().json(job))) } @@ -588,24 +668,26 @@ pub fn commit( db: Data, req: HttpRequest, ) -> impl Future { - futures::done(req.has_token_claims(&format!("build/{}", params.id), "build")) - .and_then(move |_| { + futures::done(req.has_token_claims(&format!("build/{}", params.id), "build")).and_then( + move |_| { let req2 = req.clone(); let build_id = params.id; - db - .lookup_build (build_id) - .and_then (move |build| req2.has_token_repo(&build.repo)) - .and_then (move |_ok| { - db.start_commit_job(build_id, - args.endoflife.clone(), - args.endoflife_rebase.clone(), - args.token_type) + db.lookup_build(build_id) + .and_then(move |build| req2.has_token_repo(&build.repo)) + .and_then(move |_ok| { + db.start_commit_job( + build_id, + args.endoflife.clone(), + args.endoflife_rebase.clone(), + args.token_type, + ) }) .and_then(move |job| { job_queue.do_send(ProcessJobs(None)); respond_with_url(&job, &req, "show_commit_job", &[params.id.to_string()]) }) - }) + }, + ) } pub fn get_publish_job( @@ -615,13 +697,12 @@ pub fn get_publish_job( req: HttpRequest, ) -> impl Future { futures::done(req.has_token_claims(&format!("build/{}", params.id), "build")) - .and_then(move |_| db.lookup_publish_job(params.id, args.log_offset)) + .and_then(move |_| db.lookup_publish_job(params.id, args.log_offset)) .and_then(|job| Ok(HttpResponse::Ok().json(job))) } #[derive(Debug, Serialize, Deserialize)] -pub struct PublishArgs { -} +pub struct PublishArgs {} pub fn publish( _args: Json, @@ -630,25 +711,30 @@ pub fn publish( db: Data, req: HttpRequest, ) -> impl Future { - futures::done(req.has_token_claims(&format!("build/{}", params.id), "publish")) - .and_then(move |_| { + futures::done(req.has_token_claims(&format!("build/{}", params.id), "publish")).and_then( + move |_| { let build_id = params.id; let req2 = req.clone(); - db - .lookup_build(build_id) - .and_then (move |build| { + db.lookup_build(build_id) + .and_then(move |build| { req2.has_token_repo(&build.repo)?; Ok(build) }) - .and_then (move |build| { + .and_then(move |build| { db.start_publish_job(build_id, build.repo.clone()) .and_then(move |job| { job_queue.do_send(ProcessJobs(Some(build.repo))); - respond_with_url(&job, &req, "show_publish_job", &[params.id.to_string()]) + respond_with_url( + &job, + &req, + "show_publish_job", + &[params.id.to_string()], + ) }) }) - }) + }, + ) } pub fn purge( @@ -657,28 +743,30 @@ pub fn purge( config: Data, req: HttpRequest, ) -> impl Future { - futures::done(req.has_token_claims(&format!("build/{}", params.id), "build")) - .and_then (move |_| { + futures::done(req.has_token_claims(&format!("build/{}", params.id), "build")).and_then( + move |_| { let build_repo_path = config.build_repo_base.join(params.id.to_string()); let build_id = params.id; let req2 = req.clone(); let db2 = db.clone(); - db - .lookup_build (build_id) - .and_then (move |build| req2.has_token_repo(&build.repo)) - .and_then (move |_ok| db.init_purge(build_id)) + db.lookup_build(build_id) + .and_then(move |build| req2.has_token_repo(&build.repo)) + .and_then(move |_ok| db.init_purge(build_id)) .and_then(move |_ok| { let res = fs::remove_dir_all(&build_repo_path); - db2.finish_purge (build_id, - match res { - Ok(()) => None, - Err(e) => Some(e.to_string()), - }) + db2.finish_purge( + build_id, + match res { + Ok(()) => None, + Err(e) => Some(e.to_string()), + }, + ) }) .and_then(move |build| { respond_with_url(&build, &req, "show_build", &[build_id.to_string()]) }) - }) + }, + ) } #[derive(Template)] @@ -696,10 +784,11 @@ struct JobStatusData { fn job_status_data(job: Job) -> JobStatusData { JobStatusData { id: job.id, - kind: JobKind::from_db(job.kind).map_or ("Unknown".to_string(), |k| format! ("{:?}", k)), - status: JobStatus::from_db(job.status).map_or ("Unknown".to_string(), |s| format! ("{:?}", s)), + kind: JobKind::from_db(job.kind).map_or("Unknown".to_string(), |k| format!("{:?}", k)), + status: JobStatus::from_db(job.status) + .map_or("Unknown".to_string(), |s| format!("{:?}", s)), contents: job.contents, - results: job.results.unwrap_or("".to_string()), + results: job.results.unwrap_or_default(), log: job.log, finished: job.status >= JobStatus::Ended as i16, } @@ -709,12 +798,10 @@ pub fn job_status( params: Path, db: Data, ) -> impl Future { - db - .lookup_job(params.id, None) - .and_then(move |job| { - let s = job_status_data(job).render().unwrap(); - Ok(HttpResponse::Ok().content_type("text/html").body(s)) - }) + db.lookup_job(params.id, None).and_then(move |job| { + let s = job_status_data(job).render().unwrap(); + Ok(HttpResponse::Ok().content_type("text/html").body(s)) + }) } #[derive(Template)] @@ -724,18 +811,16 @@ struct Status { version: String, } -pub fn status( - db: Data, -) -> impl Future { - db - .list_active_jobs() - .and_then(move |jobs| { - let s = Status { - jobs: jobs.into_iter().map(job_status_data).collect(), - version: env!("CARGO_PKG_VERSION").to_string(), - }.render().unwrap(); - Ok(HttpResponse::Ok().content_type("text/html").body(s)) - }) +pub fn status(db: Data) -> impl Future { + db.list_active_jobs().and_then(move |jobs| { + let s = Status { + jobs: jobs.into_iter().map(job_status_data).collect(), + version: env!("CARGO_PKG_VERSION").to_string(), + } + .render() + .unwrap(); + Ok(HttpResponse::Ok().content_type("text/html").body(s)) + }) } #[derive(Deserialize)] @@ -754,28 +839,34 @@ pub fn delta_upload( .and_then(move |repoconfig| { let uploadstate = Arc::new(UploadState { only_deltas: true, - repo_path: repoconfig.get_abs_repo_path().clone() + repo_path: repoconfig.get_abs_repo_path(), }); multipart .map_err(|e| ApiError::InternalServerError(e.to_string())) - .map(move |field| { save_file(field, &uploadstate).into_stream() }) + .map(move |field| save_file(field, &uploadstate).into_stream()) .flatten() .collect() .map(|sizes| HttpResponse::Ok().json(sizes)) }) } -pub fn ws_delta(req: HttpRequest, - config: Data, - delta_generator: Data>, - stream: web::Payload) -> Result { +pub fn ws_delta( + req: HttpRequest, + config: Data, + delta_generator: Data>, + stream: web::Payload, +) -> Result { if let Err(e) = req.has_token_claims("delta", "generate") { - return Ok(e.error_response()) + return Ok(e.error_response()); } - let remote = req.connection_info().remote().unwrap_or("Unknown").to_string(); + let remote = req + .connection_info() + .remote() + .unwrap_or("Unknown") + .to_string(); ws::start( RemoteWorker::new(&config, &delta_generator, remote), &req, - stream + stream, ) } diff --git a/src/app.rs b/src/app.rs index adcb257..876219f 100644 --- a/src/app.rs +++ b/src/app.rs @@ -1,35 +1,30 @@ use actix::prelude::*; -use actix_web::{self, http, web, middleware, App, HttpRequest, HttpResponse, HttpServer}; -use actix_web::error::{ErrorNotFound,ErrorBadRequest}; -use actix_web::dev::Server; use actix_files::NamedFile; -use actix_web::http::header::{CACHE_CONTROL, HeaderValue}; +use actix_service::Service; +use actix_web::dev::Server; +use actix_web::error::{ErrorBadRequest, ErrorNotFound}; +use actix_web::http::header::{HeaderValue, CACHE_CONTROL}; use actix_web::web::Data; use actix_web::Responder; -use actix_service::{Service}; -use std::path::PathBuf; -use std::path::Path; -use std::ffi::OsStr; -use std::sync::Arc; +use actix_web::{self, http, middleware, web, App, HttpRequest, HttpResponse, HttpServer}; +use serde::{Deserialize, Serialize}; use std::collections::HashMap; +use std::ffi::OsStr; use std::io; -use std; -use std::process::{Command}; -use serde; -use serde_json; -use serde::Deserialize; -use base64; -use num_cpus; - -use errors::ApiError; -use api; -use deltas::DeltaGenerator; -use tokens::{TokenParser, ClaimsValidator}; -use jobs::{JobQueue}; -use logger::Logger; -use ostree; -use Pool; -use db::Db; +use std::path::Path; +use std::path::PathBuf; +use std::process::Command; +use std::sync::Arc; + +use crate::api; +use crate::db::Db; +use crate::deltas::DeltaGenerator; +use crate::errors::ApiError; +use crate::jobs::JobQueue; +use crate::logger::Logger; +use crate::ostree; +use crate::tokens::{ClaimsValidator, TokenParser}; +use crate::Pool; // Ensure we strip out .. and other risky things to avoid escaping out of the base dir fn canonicalize_path(path: &str) -> Result { @@ -62,26 +57,27 @@ fn canonicalize_path(path: &str) -> Result { Ok(buf) } -fn from_base64<'de,D>(deserializer: D) -> Result, D::Error> - where D: serde::Deserializer<'de> +fn from_base64<'de, D>(deserializer: D) -> Result, D::Error> +where + D: serde::Deserializer<'de>, { use serde::de::Error; String::deserialize(deserializer) .and_then(|string| base64::decode(&string).map_err(|err| Error::custom(err.to_string()))) } -fn from_opt_base64<'de,D>(deserializer: D) -> Result>, D::Error> - where D: serde::Deserializer<'de> +fn from_opt_base64<'de, D>(deserializer: D) -> Result>, D::Error> +where + D: serde::Deserializer<'de>, { use serde::de::Error; String::deserialize(deserializer) .and_then(|string| base64::decode(&string).map_err(|err| Error::custom(err.to_string()))) - .map(|s| Some(s)) + .map(Some) } -fn match_glob(glob: &str, s: &str) -> bool -{ - if let Some(index) = glob.find("*") { +fn match_glob(glob: &str, s: &str) -> bool { + if let Some(index) = glob.find('*') { let (glob_start, glob_rest) = glob.split_at(index); if !s.starts_with(glob_start) { return false; @@ -97,20 +93,20 @@ fn match_glob(glob: &str, s: &str) -> bool /* Consume at least one, fail if none */ if s_chars.next() == None { - return false + return false; } loop { if match_glob(glob_after_star, s_chars.as_str()) { - return true + return true; } if s_chars.next() == None { - break + break; } } - return false + false } else { - return glob == s + glob == s } } @@ -173,9 +169,12 @@ pub struct DeltaConfig { impl DeltaConfig { pub fn matches_ref(&self, id: &str, arch: &str) -> bool { - self.id.iter().any(|id_glob| match_glob(id_glob, id)) && - (self.arch.is_empty() || - self.arch.iter().any(|arch_glob| match_glob(arch_glob, arch))) + self.id.iter().any(|id_glob| match_glob(id_glob, id)) + && (self.arch.is_empty() + || self + .arch + .iter() + .any(|arch_glob| match_glob(arch_glob, arch))) } } @@ -266,7 +265,7 @@ impl RepoConfig { pub fn get_base_url(&self, config: &Config) -> String { match &self.base_url { Some(base_url) => base_url.clone(), - None => format!("{}/repo/{}", config.base_url, self.name) + None => format!("{}/repo/{}", config.base_url, self.name), } } @@ -278,13 +277,13 @@ impl RepoConfig { } else if ref_name.starts_with("appstream2/") { self.appstream_delta_depth /* This updates often, so lets have some more */ } else if ref_name.starts_with("app/") || ref_name.starts_with("runtime/") { - let parts : Vec<&str> = ref_name.split("/").collect(); + let parts: Vec<&str> = ref_name.split('/').collect(); if parts.len() == 4 { let id = parts[1]; let arch = parts[2]; for dc in &self.deltas { if dc.matches_ref(id, arch) { - return dc.depth + return dc.depth; } } }; @@ -297,30 +296,32 @@ impl RepoConfig { impl Config { pub fn get_repoconfig(&self, name: &str) -> Result<&RepoConfig, ApiError> { - self.repos.get(name).ok_or_else (|| ApiError::BadRequest("No such repo".to_string())) + self.repos + .get(name) + .ok_or_else(|| ApiError::BadRequest("No such repo".to_string())) } pub fn get_repoconfig_from_path(&self, path: &Path) -> Result<&RepoConfig, ApiError> { for (repo, config) in self.repos.iter() { if path.starts_with(repo) { - return Ok(config) + return Ok(config); } } Err(ApiError::BadRequest("No such repo".to_string())) } } - -fn load_gpg_key (maybe_gpg_homedir: &Option, maybe_gpg_key: &Option) -> io::Result> { +fn load_gpg_key( + maybe_gpg_homedir: &Option, + maybe_gpg_key: &Option, +) -> io::Result> { match maybe_gpg_key { Some(gpg_key) => { let mut cmd = Command::new("gpg2"); if let Some(gpg_homedir) = maybe_gpg_homedir { cmd.arg(&format!("--homedir={}", gpg_homedir)); } - cmd - .arg("--export") - .arg(gpg_key); + cmd.arg("--export").arg(gpg_key); let output = cmd.output()?; if output.status.success() { @@ -328,93 +329,120 @@ fn load_gpg_key (maybe_gpg_homedir: &Option, maybe_gpg_key: &Option Ok(None), } } - pub fn load_config>(path: P) -> io::Result { let config_contents = std::fs::read_to_string(path)?; - let mut config_data: Config = serde_json::from_str(&config_contents).map_err(|err| io::Error::new(io::ErrorKind::Other, err))?; + let mut config_data: Config = serde_json::from_str(&config_contents) + .map_err(|err| io::Error::new(io::ErrorKind::Other, err))?; - config_data.build_gpg_key_content = load_gpg_key (&config_data.gpg_homedir, &config_data.build_gpg_key)?; + config_data.build_gpg_key_content = + load_gpg_key(&config_data.gpg_homedir, &config_data.build_gpg_key)?; for (reponame, repoconfig) in &mut config_data.repos { repoconfig.name = reponame.clone(); - repoconfig.gpg_key_content = load_gpg_key (&config_data.gpg_homedir, &config_data.build_gpg_key)?; + repoconfig.gpg_key_content = + load_gpg_key(&config_data.gpg_homedir, &config_data.build_gpg_key)?; } - if config_data.base_url == "" { + if config_data.base_url.is_empty() { config_data.base_url = format!("http://{}:{}", config_data.host, config_data.port) } Ok(config_data) } -fn handle_build_repo(config: Data, - req: HttpRequest) -> Result { +fn handle_build_repo( + config: Data, + req: HttpRequest, +) -> Result { let tail = req.match_info().query("tail"); let id = req.match_info().query("id"); let relpath = canonicalize_path(tail.trim_start_matches('/'))?; let realid = canonicalize_path(id)?; - let path = Path::new(&config.build_repo_base).join(&realid).join(&relpath); + let path = Path::new(&config.build_repo_base) + .join(&realid) + .join(&relpath); if path.is_dir() { return Err(ErrorNotFound("Ignoring directory")); } - NamedFile::open(path).or_else(|_e| { - let fallback_path = Path::new(&config.build_repo_base).join(&id).join("parent").join(&relpath); - if fallback_path.is_dir() { - Err(ErrorNotFound("Ignoring directory")) - } else { - NamedFile::open(fallback_path).map_err(|e| e.into()) - } - })?.respond_to(&req) + NamedFile::open(path) + .or_else(|_e| { + let fallback_path = Path::new(&config.build_repo_base) + .join(&id) + .join("parent") + .join(&relpath); + if fallback_path.is_dir() { + Err(ErrorNotFound("Ignoring directory")) + } else { + NamedFile::open(fallback_path).map_err(|e| e.into()) + } + })? + .respond_to(&req) } -fn get_commit_for_file(path: &PathBuf) -> Option { +fn get_commit_for_file(path: &Path) -> Option { if path.file_name() == Some(OsStr::new("superblock")) { - if let Ok(superblock) = ostree::load_delta_superblock_file (&path) { + if let Ok(superblock) = ostree::load_delta_superblock_file(path) { return Some(superblock.commit); } } if path.extension() == Some(OsStr::new("commit")) { - if let Ok(commit) = ostree::load_commit_file (&path) { + if let Ok(commit) = ostree::load_commit_file(path) { return Some(commit); } } - return None; + None } struct RepoHeadersData { nocache: bool, } -fn apply_extra_headers (resp: &mut actix_web::dev::ServiceResponse){ +fn apply_extra_headers(resp: &mut actix_web::dev::ServiceResponse) { let mut nocache = false; if let Some(data) = resp.request().extensions().get::() { nocache = data.nocache; } if nocache { - resp.headers_mut().insert(CACHE_CONTROL, - HeaderValue::from_static("no-store")); + resp.headers_mut() + .insert(CACHE_CONTROL, HeaderValue::from_static("no-store")); } - } -fn verify_repo_token(req: &HttpRequest, commit: ostree::OstreeCommit, repoconfig: &RepoConfig, path: &PathBuf) -> Result<(), ApiError> { - let token_type = commit.metadata.get("xa.token-type").map(|v| v.as_i32_le().unwrap_or(0)).unwrap_or(repoconfig.default_token_type); - if !repoconfig.require_auth_for_token_types.contains(&token_type) { +fn verify_repo_token( + req: &HttpRequest, + commit: ostree::OstreeCommit, + repoconfig: &RepoConfig, + path: &Path, +) -> Result<(), ApiError> { + let token_type = commit + .metadata + .get("xa.token-type") + .map(|v| v.as_i32_le().unwrap_or(0)) + .unwrap_or(repoconfig.default_token_type); + if !repoconfig + .require_auth_for_token_types + .contains(&token_type) + { return Ok(()); } - req.extensions_mut().insert(RepoHeadersData { - nocache: true, - }); + req.extensions_mut() + .insert(RepoHeadersData { nocache: true }); - let commit_refs = commit.metadata.get("ostree.ref-binding").ok_or (ApiError::InternalServerError(format!("No ref binding for commit {:?}", path)))?.as_string_vec()?; + let commit_refs = commit + .metadata + .get("ostree.ref-binding") + .ok_or_else(|| { + ApiError::InternalServerError(format!("No ref binding for commit {:?}", path)) + })? + .as_string_vec()?; let mut result = Ok(()); // If there are any normal flatpak refs, the token must match at least one: for commit_ref in commit_refs { @@ -429,40 +457,42 @@ fn verify_repo_token(req: &HttpRequest, commit: ostree::OstreeCommit, repoconfig result } -fn handle_repo(config: Data, - req: HttpRequest) -> Result { +fn handle_repo(config: Data, req: HttpRequest) -> Result { let tail = req.match_info().query("tail"); let tailpath = canonicalize_path(tail.trim_start_matches('/'))?; let repoconfig = config.get_repoconfig_from_path(&tailpath)?; let namepath = Path::new(&repoconfig.name); - let relpath = tailpath.strip_prefix(&namepath) + let relpath = tailpath + .strip_prefix(&namepath) .map_err(|e| ApiError::InternalServerError(e.to_string()))?; let path = Path::new(&repoconfig.path).join(&relpath); if path.is_dir() { return Err(ErrorNotFound("Ignoring directory")); } - if let Some(commit) = get_commit_for_file (&path) { + if let Some(commit) = get_commit_for_file(&path) { verify_repo_token(&req, commit, repoconfig, &path)?; } - NamedFile::open(path).or_else(|e| { - // Was this a delta, if so check the deltas queued for deletion - if relpath.starts_with("deltas") { - let tmp_path = Path::new(&repoconfig.path).join("tmp").join(&relpath); - if tmp_path.is_dir() { - Err(ErrorNotFound("Ignoring directory")) + NamedFile::open(path) + .or_else(|e| { + // Was this a delta, if so check the deltas queued for deletion + if relpath.starts_with("deltas") { + let tmp_path = Path::new(&repoconfig.path).join("tmp").join(&relpath); + if tmp_path.is_dir() { + Err(ErrorNotFound("Ignoring directory")) + } else { + NamedFile::open(tmp_path).map_err(|e| e.into()) + } } else { - NamedFile::open(tmp_path).map_err(|e| e.into()) + Err(e).map_err(|e| e.into()) } - } else { - Err(e).map_err(|e| e.into()) - } - })?.respond_to(&req) + })? + .respond_to(&req) } -pub fn create_app ( +pub fn create_app( pool: Pool, config: &Arc, job_queue: Addr, @@ -470,7 +500,11 @@ pub fn create_app ( ) -> Server { let c = config.clone(); let secret = config.secret.clone(); - let repo_secret = config.repo_secret.as_ref().unwrap_or(config.secret.as_ref()).clone(); + let repo_secret = config + .repo_secret + .as_ref() + .unwrap_or_else(|| config.secret.as_ref()) + .clone(); let http_server = HttpServer::new(move || { App::new() .data(job_queue.clone()) @@ -478,75 +512,108 @@ pub fn create_app ( .register_data(Data::new((*c).clone())) .data(Db(pool.clone())) .wrap(Logger::default()) - .wrap(middleware::Compress::new(http::header::ContentEncoding::Identity)) - .service(web::scope("/api/v1") - .wrap(TokenParser::new(&secret)) - .service(web::resource("/token_subset") - .route(web::post().to(api::token_subset))) - .service(web::resource("/job/{id}").name("show_job") - .route(web::get().to_async(api::get_job))) - .service(web::resource("/build") - .route(web::post().to_async(api::create_build)) - .route(web::get().to_async(api::builds))) - .service(web::resource("/build/{id}").name("show_build") - .route(web::get().to_async(api::get_build))) - .service(web::resource("/build/{id}/build_ref") - .route(web::post().to_async(api::create_build_ref))) - .service(web::resource("/build/{id}/build_ref/{ref_id}").name("show_build_ref") - .route(web::get().to_async(api::get_build_ref))) - .service(web::resource("/build/{id}/missing_objects") - .data(web::JsonConfig::default().limit(1024*1024*10)) - .route(web::get().to(api::missing_objects))) - .service(web::resource("/build/{id}/add_extra_ids") - .route(web::post().to_async(api::add_extra_ids))) - .service(web::resource("/build/{id}/upload") - .route(web::post().to_async(api::upload))) - .service(web::resource("/build/{id}/commit").name("show_commit_job") - .route(web::post().to_async(api::commit)) - .route(web::get().to_async(api::get_commit_job))) - .service(web::resource("/build/{id}/publish").name("show_publish_job") - .route(web::post().to_async(api::publish)) - .route(web::get().to_async(api::get_publish_job))) - .service(web::resource("/build/{id}/purge") - .route(web::post().to_async(api::purge))) - .service(web::resource("/delta/worker") - .route(web::get().to(api::ws_delta))) - .service(web::resource("/delta/upload/{repo}") - .route(web::post().to_async(api::delta_upload))) + .wrap(middleware::Compress::new( + http::header::ContentEncoding::Identity, + )) + .service( + web::scope("/api/v1") + .wrap(TokenParser::new(&secret)) + .service( + web::resource("/token_subset").route(web::post().to(api::token_subset)), + ) + .service( + web::resource("/job/{id}") + .name("show_job") + .route(web::get().to_async(api::get_job)), + ) + .service( + web::resource("/build") + .route(web::post().to_async(api::create_build)) + .route(web::get().to_async(api::builds)), + ) + .service( + web::resource("/build/{id}") + .name("show_build") + .route(web::get().to_async(api::get_build)), + ) + .service( + web::resource("/build/{id}/build_ref") + .route(web::post().to_async(api::create_build_ref)), + ) + .service( + web::resource("/build/{id}/build_ref/{ref_id}") + .name("show_build_ref") + .route(web::get().to_async(api::get_build_ref)), + ) + .service( + web::resource("/build/{id}/missing_objects") + .data(web::JsonConfig::default().limit(1024 * 1024 * 10)) + .route(web::get().to(api::missing_objects)), + ) + .service( + web::resource("/build/{id}/add_extra_ids") + .route(web::post().to_async(api::add_extra_ids)), + ) + .service( + web::resource("/build/{id}/upload") + .route(web::post().to_async(api::upload)), + ) + .service( + web::resource("/build/{id}/commit") + .name("show_commit_job") + .route(web::post().to_async(api::commit)) + .route(web::get().to_async(api::get_commit_job)), + ) + .service( + web::resource("/build/{id}/publish") + .name("show_publish_job") + .route(web::post().to_async(api::publish)) + .route(web::get().to_async(api::get_publish_job)), + ) + .service( + web::resource("/build/{id}/purge").route(web::post().to_async(api::purge)), + ) + .service(web::resource("/delta/worker").route(web::get().to(api::ws_delta))) + .service( + web::resource("/delta/upload/{repo}") + .route(web::post().to_async(api::delta_upload)), + ), + ) + .service( + web::scope("/repo") + .wrap(TokenParser::optional(&repo_secret)) + .wrap_fn(|req, srv| { + srv.call(req).map(|mut resp| { + apply_extra_headers(&mut resp); + resp + }) + }) + .service( + web::resource("/{tail:.*}") + .name("repo") + .route(web::get().to(handle_repo)) + .route(web::head().to(handle_repo)) + .to(HttpResponse::MethodNotAllowed), + ), ) - .service(web::scope("/repo") - .wrap(TokenParser::optional(&repo_secret)) - .wrap_fn(|req, srv| { - srv.call(req).map(|mut resp| { - apply_extra_headers (&mut resp); - resp - }) - }) - .service(web::resource("/{tail:.*}").name("repo") - .route(web::get().to(handle_repo)) - .route(web::head().to(handle_repo)) - .to(HttpResponse::MethodNotAllowed) - )) - .service(web::resource("/build-repo/{id}/{tail:.*}") - .route(web::get().to(handle_build_repo)) - .route(web::head().to(handle_build_repo)) - .to(HttpResponse::MethodNotAllowed) + .service( + web::resource("/build-repo/{id}/{tail:.*}") + .route(web::get().to(handle_build_repo)) + .route(web::head().to(handle_build_repo)) + .to(HttpResponse::MethodNotAllowed), ) - .service(web::resource("/status") - .route(web::get().to_async(api::status))) - .service(web::resource("/status/{id}") - .route(web::get().to_async(api::job_status))) + .service(web::resource("/status").route(web::get().to_async(api::status))) + .service(web::resource("/status/{id}").route(web::get().to_async(api::job_status))) }); let bind_to = format!("{}:{}", config.host, config.port); - let server = - http_server + let server = http_server .bind(&bind_to) .unwrap() .disable_signals() .start(); - info!("Started http server: {}", bind_to); + log::info!("Started http server: {}", bind_to); server } diff --git a/src/bin/delta-generator-client.rs b/src/bin/delta-generator-client.rs index e902a3a..c0025bf 100644 --- a/src/bin/delta-generator-client.rs +++ b/src/bin/delta-generator-client.rs @@ -1,48 +1,30 @@ -extern crate flatmanager; -extern crate actix; -extern crate actix_web; -extern crate actix_http; -extern crate actix_web_actors; -extern crate actix_codec; -extern crate awc; -extern crate dotenv; -extern crate env_logger; -extern crate futures; -#[macro_use] -extern crate log; -#[macro_use] -extern crate serde_json; -extern crate num_cpus; -extern crate mpart_async; -extern crate tokio; -extern crate futures_fs; -extern crate futures_locks; - +use actix::io::{SinkWrite, WriteHandler}; use actix::*; use actix_codec::Framed; -use actix_web::http::header; use actix_web::http; +use actix_web::http::header; use awc::{ error::WsProtocolError, ws::{Codec, Frame, Message}, Client, }; -use actix::io::{SinkWrite, WriteHandler}; use dotenv::dotenv; -use std::path::{Path,PathBuf}; +use futures::stream::SplitSink; +use futures::{Future, Stream}; +use futures_fs::FsPool; +use log::{error, info, warn}; +use mpart_async::MultipartRequest; +use serde_json::json; use std::env; use std::fs; use std::io; use std::io::Write; -use std::time::{Instant, Duration}; -use mpart_async::MultipartRequest; -use futures::{Stream, Future}; -use futures::stream::SplitSink; -use futures_fs::FsPool; +use std::path::{Path, PathBuf}; +use std::time::{Duration, Instant}; -use flatmanager::{RemoteClientMessage,RemoteServerMessage}; -use flatmanager::ostree; use flatmanager::errors::DeltaGenerationError; +use flatmanager::ostree; +use flatmanager::{RemoteClientMessage, RemoteServerMessage}; const HEARTBEAT_INTERVAL: Duration = Duration::from_secs(30); const SERVER_TIMEOUT: Duration = Duration::from_secs(60); @@ -53,22 +35,26 @@ const UPLOAD_BUFFER_CAPACITY_BYTES: usize = 512 * 1024; const UPLOAD_TIMEOUT: Duration = Duration::from_secs(10 * 60); // Prune once a day -const PRUNE_INTERVAL: Duration = Duration::from_secs(60*60*24); - -fn init_ostree_repo(repo_path: &PathBuf) -> io::Result<()> { - for &d in ["extensions", - "objects", - "refs/heads", - "refs/mirrors", - "refs/remotes", - "state", - "tmp/cache"].iter() { +const PRUNE_INTERVAL: Duration = Duration::from_secs(60 * 60 * 24); + +fn init_ostree_repo(repo_path: &Path) -> io::Result<()> { + for &d in [ + "extensions", + "objects", + "refs/heads", + "refs/mirrors", + "refs/remotes", + "state", + "tmp/cache", + ] + .iter() + { fs::create_dir_all(repo_path.join(d))?; } let mut file = fs::File::create(repo_path.join("config"))?; - file.write_all(format!( -r#"[core] + file.write_all( + r#"[core] repo_version=1 mode=bare-user # We use one single upstream remote which we pass the url to manually each tim @@ -76,7 +62,9 @@ mode=bare-user url= gpg-verify=false gpg-verify-summary=false -"#).as_bytes())?; +"# + .as_bytes(), + )?; Ok(()) } @@ -91,7 +79,10 @@ struct Manager { impl Manager { fn retry(&mut self, ctx: &mut Context) { - info!("Retrying connect in {} seconds", CONNECTION_RETRY_DELAY.as_secs()); + info!( + "Retrying connect in {} seconds", + CONNECTION_RETRY_DELAY.as_secs() + ); ctx.run_later(CONNECTION_RETRY_DELAY, move |manager, ctx| { manager.connect(ctx); }); @@ -108,7 +99,6 @@ impl Manager { .map_err(|e, manager, ctx| { error!("Error connecting: {}", e); manager.retry(ctx); - () }) .map(|(_response, framed), manager, ctx| { info!("Connected"); @@ -122,18 +112,19 @@ impl Manager { manager.client = Some(DeltaClient::create(move |ctx| { DeltaClient::add_stream(stream, ctx); DeltaClient { - fs_pool: fs_pool, + fs_pool, writer: SinkWrite::new(sink, ctx), manager: addr, - url: url, - token: token, - capacity: capacity, - repo: repo, + url, + token, + capacity, + repo, repo_lock: futures_locks::RwLock::new(1), last_recieved_pong: Instant::now(), } })) - })); + }), + ); } } @@ -161,8 +152,7 @@ impl Handler for Manager { } } -struct DeltaClient -{ +struct DeltaClient { fs_pool: FsPool, writer: SinkWrite>>, manager: Addr, @@ -190,25 +180,28 @@ impl Actor for DeltaClient { } } -fn pull_and_generate_delta_async(repo_path: &PathBuf, - url: &String, - delta: &ostree::Delta) -> Box> { - let url = url.clone(); - let repo_path2 = repo_path.clone(); +fn pull_and_generate_delta_async( + repo_path: &Path, + url: &str, + delta: &ostree::Delta, +) -> Box> { + let url = url.to_string(); + let repo_path2 = repo_path.to_path_buf(); let delta_clone = delta.clone(); Box::new( // We do 5 retries, because pull is sometimes not super stable - ostree::pull_delta_async(5, &repo_path, &url, &delta_clone) + ostree::pull_delta_async(5, repo_path, &url, &delta_clone) .and_then(move |_| ostree::generate_delta_async(&repo_path2, &delta_clone)) - .from_err() - ) + .from_err(), + ) } -fn add_delta_parts(fs_pool: &FsPool, - repo_path: &PathBuf, - delta: &ostree::Delta, - mpart: &mut MultipartRequest) -> Result<(),DeltaGenerationError> -{ +fn add_delta_parts( + fs_pool: &FsPool, + repo_path: &Path, + delta: &ostree::Delta, + mpart: &mut MultipartRequest, +) -> Result<(), DeltaGenerationError> { let delta_path = delta.delta_path(repo_path)?; for entry in fs::read_dir(delta_path)? { @@ -216,53 +209,74 @@ fn add_delta_parts(fs_pool: &FsPool, let path = entry.path(); if path.is_file() { let filename = path.file_name().unwrap(); - let deltafilename = format!("{}.{}.delta", delta.to_name().unwrap(), filename.to_string_lossy()); - mpart.add_stream("content", &deltafilename,"application/octet-stream", - fs_pool.read(path.clone(), futures_fs::ReadOptions::default().buffer_size(UPLOAD_BUFFER_CAPACITY_BYTES))) + let deltafilename = format!( + "{}.{}.delta", + delta.to_name().unwrap(), + filename.to_string_lossy() + ); + mpart.add_stream( + "content", + &deltafilename, + "application/octet-stream", + fs_pool.read( + path.clone(), + futures_fs::ReadOptions::default().buffer_size(UPLOAD_BUFFER_CAPACITY_BYTES), + ), + ) } } Ok(()) } - -pub fn upload_delta(fs_pool: &FsPool, - base_url: &String, - token: &String, - repo: &String, - repo_path: &PathBuf, - delta: &ostree::Delta) -> impl Future { +pub fn upload_delta( + fs_pool: &FsPool, + base_url: &str, + token: &str, + repo: &str, + repo_path: &Path, + delta: &ostree::Delta, +) -> impl Future { let url = format!("{}/api/v1/delta/upload/{}", base_url, repo); - let token = token.clone(); + let token = token.to_string(); let mut mpart = MultipartRequest::default(); - info!("Uploading delta {}", delta.to_name().unwrap_or("??".to_string())); - futures::done(add_delta_parts(fs_pool, repo_path, delta, &mut mpart)) - .and_then( move |_| { - Client::new() - .post(&url) - .header(header::CONTENT_TYPE, format!("multipart/form-data; boundary={}", mpart.get_boundary())) - .header(header::AUTHORIZATION, format!("Bearer {}", token)) - .timeout(UPLOAD_TIMEOUT) - .method(http::Method::POST) - .send_body(actix_http::body::BodyStream::new(mpart)) - .then(|r| { - match r { - Ok(response) => { - if response.status().is_success() { - Ok(()) - } else { - error!("Unexpected upload response: {:?}", response); - Err(DeltaGenerationError::new(&format!("Delta upload failed with error {}", response.status()))) - } - }, - Err(e) => { - error!("Unexpected upload error: {:?}", e); - Err(DeltaGenerationError::new(&format!("Delta upload failed with error {}", e))) - }, + info!( + "Uploading delta {}", + delta.to_name().unwrap_or_else(|_| "??".to_string()) + ); + futures::done(add_delta_parts(fs_pool, repo_path, delta, &mut mpart)).and_then(move |_| { + Client::new() + .post(&url) + .header( + header::CONTENT_TYPE, + format!("multipart/form-data; boundary={}", mpart.get_boundary()), + ) + .header(header::AUTHORIZATION, format!("Bearer {}", token)) + .timeout(UPLOAD_TIMEOUT) + .method(http::Method::POST) + .send_body(actix_http::body::BodyStream::new(mpart)) + .then(|r| match r { + Ok(response) => { + if response.status().is_success() { + Ok(()) + } else { + error!("Unexpected upload response: {:?}", response); + Err(DeltaGenerationError::new(&format!( + "Delta upload failed with error {}", + response.status() + ))) } - }) - }) + } + Err(e) => { + error!("Unexpected upload error: {:?}", e); + Err(DeltaGenerationError::new(&format!( + "Delta upload failed with error {}", + e + ))) + } + }) + }) } impl DeltaClient { @@ -273,7 +287,7 @@ impl DeltaClient { warn!("Failed to ping server: {:?}, disconnecting", e); client.close(); ctx.stop(); - return + return; } if Instant::now().duration_since(client.last_recieved_pong) > SERVER_TIMEOUT { warn!("Server heartbeat missing, disconnecting!"); @@ -284,23 +298,21 @@ impl DeltaClient { } fn register(&mut self) { - if let Err(e) = self.writer.write(Message::Text(json!( - RemoteClientMessage::Register { + if let Err(e) = self.writer.write(Message::Text( + json!(RemoteClientMessage::Register { capacity: self.capacity, - } - ).to_string())) { + }) + .to_string(), + )) { warn!("Failed to register with server: {:?}", e); } } fn finished(&mut self, id: u32, errmsg: Option) { info!("Sending finished for request {}", id); - if let Err(e) = self.writer.write(Message::Text(json!( - RemoteClientMessage::Finished { - id: id, - errmsg: errmsg.clone(), - } - ).to_string())) { + if let Err(e) = self.writer.write(Message::Text( + json!(RemoteClientMessage::Finished { id, errmsg }).to_string(), + )) { warn!("Failed to call finished: {:?}", e); } } @@ -311,7 +323,8 @@ impl DeltaClient { ctx.spawn( // We take a write lock across the entire operation to // block out all delta request handling during the prune - self.repo_lock.write() + self.repo_lock + .write() .into_actor(self) .then(move |guard, client, _ctx| { info!("Pruning repo"); @@ -322,10 +335,10 @@ impl DeltaClient { Err(e) => error!("Failed to prune repo: {}", e.to_string()), _ => info!("Pruning repo done"), }; - &guard; + let _ = &guard; actix::fut::ok(()) }) - }) + }), ); } @@ -334,37 +347,41 @@ impl DeltaClient { ctx.run_interval(PRUNE_INTERVAL, |client, ctx| client.prune_repo(ctx)); } - fn msg_request_delta(&mut self, - id: u32, - url: String, - repo: String, - delta: ostree::Delta, - ctx: &mut Context) - { + fn msg_request_delta( + &mut self, + id: u32, + url: String, + repo: String, + delta: ostree::Delta, + ctx: &mut Context, + ) { info!("Got delta request {}: {} {}", id, repo, delta.to_string()); let path = self.repo.clone(); let path2 = self.repo.clone(); let delta2 = delta.clone(); let base_url = self.url.clone(); let token = self.token.clone(); - let reponame = repo.clone(); + let reponame = repo; let fs_pool = self.fs_pool.clone(); ctx.spawn( // We take a read lock across the entire operation to // protect against the regular GC:ing the repo, which // takes a write lock - self.repo_lock.read() + self.repo_lock + .read() .into_actor(self) .then(move |guard, client, _ctx| { pull_and_generate_delta_async(&path, &url, &delta) - .and_then(move |_| upload_delta(&fs_pool, &base_url, &token, &reponame, &path2, &delta2)) + .and_then(move |_| { + upload_delta(&fs_pool, &base_url, &token, &reponame, &path2, &delta2) + }) .into_actor(client) .then(move |r, client, _ctx| { - &guard; + let _ = &guard; client.finished(id, r.err().map(|e| e.to_string())); actix::fut::ok(()) }) - }) + }), ); } @@ -374,7 +391,12 @@ impl DeltaClient { fn message(&mut self, message: RemoteServerMessage, ctx: &mut Context) { match message { - RemoteServerMessage::RequestDelta { id, url, repo, delta } => self.msg_request_delta(id, url, repo, delta, ctx), + RemoteServerMessage::RequestDelta { + id, + url, + repo, + delta, + } => self.msg_request_delta(id, url, repo, delta, ctx), } } } @@ -382,24 +404,21 @@ impl DeltaClient { impl StreamHandler for DeltaClient { fn handle(&mut self, msg: Frame, ctx: &mut Context) { match msg { - Frame::Text(Some(bytes)) => { - match std::str::from_utf8(&bytes) { - Ok(text) => match serde_json::from_str::(text) { - Ok(message) => self.message(message, ctx), - Err(e) => error!("Got invalid websocket message: {}", e), - }, + Frame::Text(Some(bytes)) => match std::str::from_utf8(&bytes) { + Ok(text) => match serde_json::from_str::(text) { + Ok(message) => self.message(message, ctx), Err(e) => error!("Got invalid websocket message: {}", e), - } + }, + Err(e) => error!("Got invalid websocket message: {}", e), }, Frame::Pong(_) => { self.last_recieved_pong = Instant::now(); - }, + } _ => (), } } - fn started(&mut self, _ctx: &mut Context) { - } + fn started(&mut self, _ctx: &mut Context) {} fn finished(&mut self, ctx: &mut Context) { // websocket got closed @@ -408,24 +427,22 @@ impl StreamHandler for DeltaClient { } } -impl WriteHandler for DeltaClient -{ -} +impl WriteHandler for DeltaClient {} fn main() { env::set_var("RUST_LOG", "info"); let _ = env_logger::init(); let sys = actix::System::new("delta-generator-client"); - let cwd = PathBuf::from(std::env::current_dir().expect("Can't get cwd")); + let cwd = std::env::current_dir().expect("Can't get cwd"); dotenv().ok(); let token = env::var("REPO_TOKEN").expect("No token, set REPO_TOKEN in env or .env"); - let url = env::var("MANAGER_URL").unwrap_or ("http://127.0.0.1:8080".to_string()); + let url = env::var("MANAGER_URL").unwrap_or_else(|_| "http://127.0.0.1:8080".to_string()); let capacity: u32 = env::var("CAPACITY") .map(|s| s.parse().expect("Failed to parse $CAPACITY")) - .unwrap_or (num_cpus::get() as u32); + .unwrap_or_else(|_| num_cpus::get() as u32); let workdir = env::var("WORKDIR") .map(|s| cwd.join(Path::new(&s))) .unwrap_or_else(|_e| cwd.clone()); @@ -438,12 +455,13 @@ fn main() { let _addr = Manager { fs_pool: FsPool::default(), - url: url, - token: token, + url, + token, client: None, - capacity: capacity, + capacity, repo: repodir, - }.start(); + } + .start(); let r = sys.run(); info!("System run returned {:?}", r); diff --git a/src/bin/flat-manager.rs b/src/bin/flat-manager.rs index b3b3fba..a786040 100644 --- a/src/bin/flat-manager.rs +++ b/src/bin/flat-manager.rs @@ -1,7 +1,3 @@ -extern crate flatmanager; -extern crate dotenv; -extern crate env_logger; - use dotenv::dotenv; use std::env; use std::path::PathBuf; @@ -15,7 +11,8 @@ fn main() { dotenv().ok(); - let config_path = PathBuf::from(env::var("REPO_CONFIG").unwrap_or ("config.json".to_string())); + let config_path = + PathBuf::from(env::var("REPO_CONFIG").unwrap_or_else(|_| "config.json".to_string())); let config = flatmanager::load_config(&config_path); diff --git a/src/bin/gentoken.rs b/src/bin/gentoken.rs index 485b3ec..ef415b1 100644 --- a/src/bin/gentoken.rs +++ b/src/bin/gentoken.rs @@ -1,20 +1,12 @@ -extern crate jsonwebtoken as jwt; -#[macro_use] -extern crate serde_derive; -extern crate base64; -extern crate chrono; -extern crate argparse; -extern crate serde; -extern crate serde_json; - +use chrono::{Duration, Utc}; +use jwt::{encode, EncodingKey, Header}; +use std::fs; use std::io; use std::io::prelude::*; -use std::fs; use std::process; -use jwt::{encode, Header, EncodingKey}; -use chrono::{Utc, Duration}; -use argparse::{ArgumentParser, StoreTrue, Store, StoreOption, List}; +use argparse::{ArgumentParser, List, Store, StoreOption, StoreTrue}; +use serde::{Deserialize, Serialize}; #[derive(Debug, Serialize, Deserialize)] struct Claims { @@ -53,54 +45,64 @@ fn main() { let mut ap = ArgumentParser::new(); ap.set_description("Generate token for flat-manager."); ap.refer(&mut verbose) - .add_option(&["-v", "--verbose"], StoreTrue, - "Be verbose"); + .add_option(&["-v", "--verbose"], StoreTrue, "Be verbose"); ap.refer(&mut name) - .add_option(&["--name"], Store, - "Name for the token"); + .add_option(&["--name"], Store, "Name for the token"); ap.refer(&mut sub) - .add_option(&["--sub"], Store, - "Subject (default: build)"); - ap.refer(&mut scope) - .add_option(&["--scope"], List, - "Add scope (default if none: [build, upload, publish, jobs]"); - ap.refer(&mut prefixes) - .add_option(&["--prefix"], List, - "Add ref prefix (default if none: ['']"); + .add_option(&["--sub"], Store, "Subject (default: build)"); + ap.refer(&mut scope).add_option( + &["--scope"], + List, + "Add scope (default if none: [build, upload, publish, jobs]", + ); + ap.refer(&mut prefixes).add_option( + &["--prefix"], + List, + "Add ref prefix (default if none: ['']", + ); ap.refer(&mut repos) - .add_option(&["--repo"], List, - "Add repo (default if none: ['']"); + .add_option(&["--repo"], List, "Add repo (default if none: ['']"); ap.refer(&mut base64) - .add_option(&["--base64"], StoreTrue, - "The secret is base64 encoded"); - ap.refer(&mut secret) - .add_option(&["--secret"], StoreOption, - "Secret used to encode the token"); - ap.refer(&mut secret_file) - .add_option(&["--secret-file"], StoreOption, - "Load secret from file (or - for stdin)"); - ap.refer(&mut duration) - .add_option(&["--duration"], Store, - "Duration for key in seconds (default 1 year)"); + .add_option(&["--base64"], StoreTrue, "The secret is base64 encoded"); + ap.refer(&mut secret).add_option( + &["--secret"], + StoreOption, + "Secret used to encode the token", + ); + ap.refer(&mut secret_file).add_option( + &["--secret-file"], + StoreOption, + "Load secret from file (or - for stdin)", + ); + ap.refer(&mut duration).add_option( + &["--duration"], + Store, + "Duration for key in seconds (default 1 year)", + ); ap.parse_args_or_exit(); } let secret_contents; - if scope.len() == 0 { - scope = vec!["build".to_string(), "upload".to_string(), "publish".to_string(), "jobs".to_string()]; + if scope.is_empty() { + scope = vec![ + "build".to_string(), + "upload".to_string(), + "publish".to_string(), + "jobs".to_string(), + ]; } - if prefixes.len() == 0 { + if prefixes.is_empty() { prefixes = vec!["".to_string()]; } - if repos.len() == 0 { + if repos.is_empty() { repos = vec!["".to_string()]; } if let Some(s) = secret { - secret_contents = s.clone(); + secret_contents = s; } else if let Some(filename) = secret_file { match read_secret(filename) { Ok(contents) => secret_contents = contents, @@ -121,10 +123,10 @@ fn main() { }; let claims = Claims { - sub: sub, - scope: scope, - prefixes: prefixes, - repos: repos, + sub, + scope, + prefixes, + repos, name: name.clone(), exp: Utc::now().timestamp() + duration, }; diff --git a/src/db.rs b/src/db.rs index 9cab56a..932a3a0 100644 --- a/src/db.rs +++ b/src/db.rs @@ -1,181 +1,265 @@ use actix::prelude::*; use actix_web::*; -use diesel; use diesel::prelude::*; +use serde_json::json; -use models::*; -use errors::ApiError; -use schema; -use Pool; +use crate::errors::ApiError; +use crate::models::*; +use crate::schema; +use crate::Pool; pub struct Db(pub Pool); impl Db { - fn run(self: &Self, func: Func) -> impl Future - where Func: FnOnce(&r2d2::PooledConnection>) -> Result, - Func: Send + 'static, - T: Send + 'static, + fn run(&self, func: Func) -> impl Future + where + Func: FnOnce( + &r2d2::PooledConnection>, + ) -> Result, + Func: Send + 'static, + T: Send + 'static, { let p = self.0.clone(); web::block(move || { let conn = p.get()?; func(&conn) }) - .map_err(|e| ApiError::from(e)) + .map_err(ApiError::from) } - fn run_in_transaction(self: &Self, func: Func) -> impl Future - where Func: FnOnce(&r2d2::PooledConnection>) -> Result, - Func: Send + 'static, - T: Send + 'static, + fn run_in_transaction(&self, func: Func) -> impl Future + where + Func: FnOnce( + &r2d2::PooledConnection>, + ) -> Result, + Func: Send + 'static, + T: Send + 'static, { - self.run(move |conn| { - conn.transaction::(|| func(conn)) - }) + self.run(move |conn| conn.transaction::(|| func(conn))) } /* Jobs */ - pub fn lookup_job(self: &Self, - job_id: i32, - log_offset: Option) -> impl Future { + pub fn lookup_job( + &self, + job_id: i32, + log_offset: Option, + ) -> impl Future { self.run(move |conn| { use schema::jobs::dsl::*; Ok(jobs - .filter(id.eq(job_id)) - .get_result::(conn)? - .apply_log_offset (log_offset)) + .filter(id.eq(job_id)) + .get_result::(conn)? + .apply_log_offset(log_offset)) }) } - pub fn list_active_jobs(self: &Self) -> impl Future, Error = ApiError> { + pub fn list_active_jobs(&self) -> impl Future, Error = ApiError> { self.run(move |conn| { use schema::jobs::dsl::*; Ok(jobs - .order(id) - .filter(status.le (JobStatus::Started as i16)) - .get_results::(conn)?) - }) + .order(id) + .filter(status.le(JobStatus::Started as i16)) + .get_results::(conn)?) + }) } - pub fn lookup_commit_job(self: &Self, - build_id: i32, - log_offset: Option) -> impl Future { + pub fn lookup_commit_job( + &self, + build_id: i32, + log_offset: Option, + ) -> impl Future { self.run(move |conn| { - use schema::jobs::dsl::*; use schema::builds::dsl::*; + use schema::jobs::dsl::*; Ok(jobs - .inner_join(builds.on(commit_job_id.eq(schema::jobs::dsl::id.nullable()))) - .select(schema::jobs::all_columns) - .filter(schema::builds::dsl::id.eq(build_id)) - .get_result::(conn)? - .apply_log_offset (log_offset)) + .inner_join(builds.on(commit_job_id.eq(schema::jobs::dsl::id.nullable()))) + .select(schema::jobs::all_columns) + .filter(schema::builds::dsl::id.eq(build_id)) + .get_result::(conn)? + .apply_log_offset(log_offset)) }) } - pub fn lookup_publish_job(self: &Self, - build_id: i32, - log_offset: Option) -> impl Future { + pub fn lookup_publish_job( + &self, + build_id: i32, + log_offset: Option, + ) -> impl Future { self.run(move |conn| { - use schema::jobs::dsl::*; use schema::builds::dsl::*; + use schema::jobs::dsl::*; Ok(jobs - .inner_join(builds.on(publish_job_id.eq(schema::jobs::dsl::id.nullable()))) - .select(schema::jobs::all_columns) - .filter(schema::builds::dsl::id.eq(build_id)) - .get_result::(conn)? - .apply_log_offset (log_offset)) + .inner_join(builds.on(publish_job_id.eq(schema::jobs::dsl::id.nullable()))) + .select(schema::jobs::all_columns) + .filter(schema::builds::dsl::id.eq(build_id)) + .get_result::(conn)? + .apply_log_offset(log_offset)) }) } - pub fn start_commit_job(self: &Self, - build_id: i32, - endoflife: Option, - endoflife_rebase: Option, - token_type: Option) -> impl Future { + pub fn start_commit_job( + &self, + build_id: i32, + endoflife: Option, + endoflife_rebase: Option, + token_type: Option, + ) -> impl Future { self.run_in_transaction(move |conn| { let current_build = schema::builds::table .filter(schema::builds::id.eq(build_id)) .get_result::(conn)?; - let current_repo_state = RepoState::from_db(current_build.repo_state, ¤t_build.repo_state_reason); + let current_repo_state = + RepoState::from_db(current_build.repo_state, ¤t_build.repo_state_reason); match current_repo_state { RepoState::Uploading => (), - RepoState::Verifying => return Err(ApiError::WrongRepoState(format!("Build is currently being commited"), "uploading".to_string(), "verifying".to_string())), - RepoState::Ready => return Err(ApiError::WrongRepoState(format!("Build is already commited"), "uploading".to_string(), "ready".to_string())), - RepoState::Failed(s) => return Err(ApiError::WrongRepoState(format!("Commit already failed: {}", s), "uploading".to_string(), "failed".to_string())), - RepoState::Purging | - RepoState::Purged => return Err(ApiError::WrongRepoState("Build has been purged".to_string(), "uploading".to_string(), "purged".to_string())), + RepoState::Verifying => { + return Err(ApiError::WrongRepoState( + "Build is currently being commited".to_string(), + "uploading".to_string(), + "verifying".to_string(), + )) + } + RepoState::Ready => { + return Err(ApiError::WrongRepoState( + "Build is already commited".to_string(), + "uploading".to_string(), + "ready".to_string(), + )) + } + RepoState::Failed(s) => { + return Err(ApiError::WrongRepoState( + format!("Commit already failed: {}", s), + "uploading".to_string(), + "failed".to_string(), + )) + } + RepoState::Purging | RepoState::Purged => { + return Err(ApiError::WrongRepoState( + "Build has been purged".to_string(), + "uploading".to_string(), + "purged".to_string(), + )) + } } let (val, reason) = RepoState::to_db(&RepoState::Verifying); - let job = - diesel::insert_into(schema::jobs::table) + let job = diesel::insert_into(schema::jobs::table) .values(NewJob { kind: JobKind::Commit.to_db(), start_after: None, repo: None, contents: json!(CommitJob { build: build_id, - endoflife: endoflife, - endoflife_rebase: endoflife_rebase, - token_type: token_type, - }).to_string(), + endoflife, + endoflife_rebase, + token_type, + }) + .to_string(), }) .get_result::(conn)?; diesel::update(schema::builds::table) .filter(schema::builds::id.eq(build_id)) - .set((schema::builds::commit_job_id.eq(job.id), - schema::builds::repo_state.eq(val), - schema::builds::repo_state_reason.eq(reason))) + .set(( + schema::builds::commit_job_id.eq(job.id), + schema::builds::repo_state.eq(val), + schema::builds::repo_state_reason.eq(reason), + )) .get_result::(conn)?; Ok(job) }) } - pub fn start_publish_job(self: &Self, - build_id: i32, - repo: String) -> impl Future { + pub fn start_publish_job( + &self, + build_id: i32, + repo: String, + ) -> impl Future { self.run_in_transaction(move |conn| { let current_build = schema::builds::table .filter(schema::builds::id.eq(build_id)) .get_result::(conn)?; - let current_published_state = PublishedState::from_db(current_build.published_state, ¤t_build.published_state_reason); + let current_published_state = PublishedState::from_db( + current_build.published_state, + ¤t_build.published_state_reason, + ); match current_published_state { PublishedState::Unpublished => (), - PublishedState::Publishing => return Err(ApiError::WrongPublishedState("Build is currently being published".to_string(), "unpublished".to_string(), "publishing".to_string())), - PublishedState::Published => return Err(ApiError::WrongPublishedState("Build has already been published".to_string(), "unpublished".to_string(), "published".to_string())), - PublishedState::Failed(s) => return Err(ApiError::WrongPublishedState(format!("Previous publish failed: {}", s), "unpublished".to_string(), "failed".to_string())), + PublishedState::Publishing => { + return Err(ApiError::WrongPublishedState( + "Build is currently being published".to_string(), + "unpublished".to_string(), + "publishing".to_string(), + )) + } + PublishedState::Published => { + return Err(ApiError::WrongPublishedState( + "Build has already been published".to_string(), + "unpublished".to_string(), + "published".to_string(), + )) + } + PublishedState::Failed(s) => { + return Err(ApiError::WrongPublishedState( + format!("Previous publish failed: {}", s), + "unpublished".to_string(), + "failed".to_string(), + )) + } } - let current_repo_state = RepoState::from_db(current_build.repo_state, ¤t_build.repo_state_reason); + let current_repo_state = + RepoState::from_db(current_build.repo_state, ¤t_build.repo_state_reason); match current_repo_state { - RepoState::Uploading => return Err(ApiError::WrongRepoState("Build is not commited".to_string(), "ready".to_string(), "uploading".to_string())), - RepoState::Verifying => return Err(ApiError::WrongRepoState("Build is not commited".to_string(), "ready".to_string(), "verifying".to_string())), + RepoState::Uploading => { + return Err(ApiError::WrongRepoState( + "Build is not commited".to_string(), + "ready".to_string(), + "uploading".to_string(), + )) + } + RepoState::Verifying => { + return Err(ApiError::WrongRepoState( + "Build is not commited".to_string(), + "ready".to_string(), + "verifying".to_string(), + )) + } RepoState::Ready => (), - RepoState::Failed(s) => return Err(ApiError::WrongRepoState(format!("Build failed: {}", s), "ready".to_string(), "failed".to_string())), - RepoState::Purging | - RepoState::Purged => return Err(ApiError::WrongRepoState("Build has been purged".to_string(), "ready".to_string(), "purged".to_string())), + RepoState::Failed(s) => { + return Err(ApiError::WrongRepoState( + format!("Build failed: {}", s), + "ready".to_string(), + "failed".to_string(), + )) + } + RepoState::Purging | RepoState::Purged => { + return Err(ApiError::WrongRepoState( + "Build has been purged".to_string(), + "ready".to_string(), + "purged".to_string(), + )) + } } let (val, reason) = PublishedState::to_db(&PublishedState::Publishing); - let job = - diesel::insert_into(schema::jobs::table) + let job = diesel::insert_into(schema::jobs::table) .values(NewJob { kind: JobKind::Publish.to_db(), start_after: None, repo: Some(repo), - contents: json!(PublishJob { - build: build_id, - }).to_string(), + contents: json!(PublishJob { build: build_id }).to_string(), }) .get_result::(conn)?; diesel::update(schema::builds::table) .filter(schema::builds::id.eq(build_id)) - .set((schema::builds::publish_job_id.eq(job.id), - schema::builds::published_state.eq(val), - schema::builds::published_state_reason.eq(reason))) + .set(( + schema::builds::publish_job_id.eq(job.id), + schema::builds::published_state.eq(val), + schema::builds::published_state_reason.eq(reason), + )) .get_result::(conn)?; Ok(job) }) @@ -183,103 +267,108 @@ impl Db { /* Builds */ - pub fn new_build(self: &Self, a_build: NewBuild) -> impl Future { + pub fn new_build(&self, a_build: NewBuild) -> impl Future { self.run(move |conn| { use schema::builds::dsl::*; Ok(diesel::insert_into(builds) - .values(&a_build) - .get_result::(conn)?) + .values(&a_build) + .get_result::(conn)?) }) } - pub fn lookup_build(self: &Self, - build_id: i32) -> impl Future { + pub fn lookup_build(&self, build_id: i32) -> impl Future { self.run(move |conn| { use schema::builds::dsl::*; - Ok(builds - .filter(id.eq(build_id)) - .get_result::(conn)?) + Ok(builds.filter(id.eq(build_id)).get_result::(conn)?) }) } - pub fn list_builds(self: &Self) -> impl Future, Error = ApiError> { + pub fn list_builds(&self) -> impl Future, Error = ApiError> { self.run(move |conn| { use schema::builds::dsl::*; let (val, _) = RepoState::Purged.to_db(); Ok(builds - .filter(repo_state.ne(val)) - .get_results::(conn)?) + .filter(repo_state.ne(val)) + .get_results::(conn)?) }) } - pub fn add_extra_ids(self: &Self, - build_id: i32, - ids: Vec) -> impl Future { + pub fn add_extra_ids( + &self, + build_id: i32, + ids: Vec, + ) -> impl Future { self.run_in_transaction(move |conn| { let current_build = schema::builds::table .filter(schema::builds::id.eq(build_id)) .get_result::(conn)?; - let mut new_ids = current_build.extra_ids.clone(); + let mut new_ids = current_build.extra_ids; for new_id in ids.iter() { if !new_ids.contains(new_id) { new_ids.push(new_id.to_string()) } } Ok(diesel::update(schema::builds::table) - .filter(schema::builds::id.eq(build_id)) - .set(schema::builds::extra_ids.eq(new_ids)) - .get_result::(conn)?) + .filter(schema::builds::id.eq(build_id)) + .set(schema::builds::extra_ids.eq(new_ids)) + .get_result::(conn)?) }) } - pub fn init_purge(self: &Self, - build_id: i32) -> impl Future { + pub fn init_purge(&self, build_id: i32) -> impl Future { self.run_in_transaction(move |conn| { use schema::builds::dsl::*; - let current_build = builds - .filter(id.eq(build_id)) - .get_result::(conn)?; - let current_repo_state = RepoState::from_db(current_build.repo_state, ¤t_build.repo_state_reason); - let current_published_state = PublishedState::from_db(current_build.published_state, ¤t_build.published_state_reason); - if current_repo_state.same_state_as(&RepoState::Verifying) || - current_repo_state.same_state_as(&RepoState::Purging) || - current_published_state.same_state_as(&PublishedState::Publishing) { - /* Only allow pruning when we're not working on the build repo */ - return Err(ApiError::BadRequest("Can't prune build while in use".to_string())) - }; + let current_build = builds.filter(id.eq(build_id)).get_result::(conn)?; + let current_repo_state = + RepoState::from_db(current_build.repo_state, ¤t_build.repo_state_reason); + let current_published_state = PublishedState::from_db( + current_build.published_state, + ¤t_build.published_state_reason, + ); + if current_repo_state.same_state_as(&RepoState::Verifying) + || current_repo_state.same_state_as(&RepoState::Purging) + || current_published_state.same_state_as(&PublishedState::Publishing) + { + /* Only allow pruning when we're not working on the build repo */ + return Err(ApiError::BadRequest( + "Can't prune build while in use".to_string(), + )); + }; let (val, reason) = RepoState::to_db(&RepoState::Purging); diesel::update(builds) .filter(id.eq(build_id)) - .set((repo_state.eq(val), - repo_state_reason.eq(reason))) + .set((repo_state.eq(val), repo_state_reason.eq(reason))) .execute(conn)?; Ok(()) }) } - pub fn finish_purge(self: &Self, - build_id: i32, - error: Option,) -> impl Future { + pub fn finish_purge( + &self, + build_id: i32, + error: Option, + ) -> impl Future { self.run_in_transaction(move |conn| { use schema::builds::dsl::*; - let current_build = builds - .filter(id.eq(build_id)) - .get_result::(conn)?; - let current_repo_state = RepoState::from_db(current_build.repo_state, ¤t_build.repo_state_reason); + let current_build = builds.filter(id.eq(build_id)).get_result::(conn)?; + let current_repo_state = + RepoState::from_db(current_build.repo_state, ¤t_build.repo_state_reason); if !current_repo_state.same_state_as(&RepoState::Purging) { - return Err(ApiError::BadRequest("Unexpected repo state, was not purging".to_string())) + return Err(ApiError::BadRequest( + "Unexpected repo state, was not purging".to_string(), + )); }; let new_state = match error { None => RepoState::Purged, - Some(err_string) => RepoState::Failed(format!("Failed to Purge build: {}", err_string)), + Some(err_string) => { + RepoState::Failed(format!("Failed to Purge build: {}", err_string)) + } }; let (val, reason) = RepoState::to_db(&new_state); - let new_build = - diesel::update(builds) + let new_build = diesel::update(builds) .filter(id.eq(build_id)) - .set((repo_state.eq(val), - repo_state_reason.eq(reason))) + .set((repo_state.eq(val), repo_state_reason.eq(reason))) .get_result::(conn)?; Ok(new_build) }) @@ -287,18 +376,23 @@ impl Db { /* Build refs */ - pub fn new_build_ref(self: &Self, a_build_ref: NewBuildRef) -> impl Future { + pub fn new_build_ref( + &self, + a_build_ref: NewBuildRef, + ) -> impl Future { self.run(move |conn| { use self::schema::build_refs::dsl::*; Ok(diesel::insert_into(build_refs) - .values(&a_build_ref) - .get_result::(conn)?) + .values(&a_build_ref) + .get_result::(conn)?) }) } - pub fn lookup_build_ref(self: &Self, - the_build_id: i32, - ref_id: i32) -> impl Future { + pub fn lookup_build_ref( + &self, + the_build_id: i32, + ref_id: i32, + ) -> impl Future { self.run(move |conn| { use schema::build_refs::dsl::*; Ok(build_refs @@ -309,13 +403,15 @@ impl Db { } #[allow(dead_code)] - pub fn lookup_build_refs(self: &Self, - the_build_id: i32) -> impl Future, Error = ApiError> { + pub fn lookup_build_refs( + &self, + the_build_id: i32, + ) -> impl Future, Error = ApiError> { self.run(move |conn| { use schema::build_refs::dsl::*; Ok(build_refs .filter(build_id.eq(the_build_id)) - .get_results::(conn)?) + .get_results::(conn)?) }) } } diff --git a/src/delayed.rs b/src/delayed.rs index 572e228..4a3dd66 100644 --- a/src/delayed.rs +++ b/src/delayed.rs @@ -1,16 +1,16 @@ -use futures::{task, Future,Async,Poll}; +use futures::{task, Async, Future, Poll}; +use std::cell::{Cell, RefCell}; use std::collections::HashMap; -use std::cell::{Cell,RefCell}; use std::rc::Rc; #[derive(Debug)] -struct InnerDelayedResult { +struct InnerDelayedResult { next_clone_id: Cell, - result: RefCell>>, - waiters: RefCell> + result: RefCell>>, + waiters: RefCell>, } -impl InnerDelayedResult { +impl InnerDelayedResult { fn new() -> Rc { Rc::new(InnerDelayedResult { next_clone_id: Cell::new(0), @@ -28,12 +28,12 @@ impl InnerDelayedResult { } #[derive(Debug)] -pub struct DelayedResult { - inner: Rc>, +pub struct DelayedResult { + inner: Rc>, waiter: usize, } -impl Clone for DelayedResult { +impl Clone for DelayedResult { fn clone(&self) -> Self { let next_id = self.inner.next_clone_id.get() + 1; self.inner.next_clone_id.replace(next_id); @@ -45,16 +45,18 @@ impl Clone for DelayedResult { } } -impl Drop for DelayedResult { +impl Drop for DelayedResult { fn drop(&mut self) { let mut waiters = self.inner.waiters.borrow_mut(); waiters.remove(&self.waiter); } } -impl Future for DelayedResult - where T: std::fmt::Debug + std::clone::Clone, - E: std::fmt::Debug + std::clone::Clone, { +impl Future for DelayedResult +where + T: std::fmt::Debug + std::clone::Clone, + E: std::fmt::Debug + std::clone::Clone, +{ type Item = T; type Error = E; @@ -62,35 +64,40 @@ impl Future for DelayedResult let res_ref = self.inner.result.borrow().clone(); if let Some(res) = res_ref { match res { - Err(e) => Err(e.clone()), - Ok(r) => Ok(Async::Ready(r.clone())), + Err(e) => Err(e), + Ok(r) => Ok(Async::Ready(r)), } } else { - self.inner.waiters.borrow_mut().insert(self.waiter, task::current()); + self.inner + .waiters + .borrow_mut() + .insert(self.waiter, task::current()); Ok(Async::NotReady) } } } -impl DelayedResult - where T: std::fmt::Debug, - E: std::fmt::Debug, { +impl DelayedResult +where + T: std::fmt::Debug, + E: std::fmt::Debug, +{ pub fn new() -> Self { let inner = InnerDelayedResult::new(); DelayedResult { waiter: inner.next_clone_id.get(), - inner: inner, + inner, } } pub fn err(e: E) -> Self { let inner = InnerDelayedResult::err(e); DelayedResult { waiter: inner.next_clone_id.get(), - inner: inner, + inner, } } - pub fn set(&mut self, res: Result) { + pub fn set(&mut self, res: Result) { self.inner.result.replace(Some(res)); let waiters = self.inner.waiters.replace(HashMap::new()); diff --git a/src/deltas.rs b/src/deltas.rs index 9b0a212..b887bd3 100644 --- a/src/deltas.rs +++ b/src/deltas.rs @@ -1,37 +1,37 @@ +use crate::app::Config; +use crate::delayed::DelayedResult; +use crate::errors::DeltaGenerationError; +use crate::ostree; +use actix::dev::ToEnvelope; use actix::prelude::*; use actix::Actor; -use actix::dev::ToEnvelope; use actix_web::web::Data; -use app::Config; -use errors::{DeltaGenerationError}; -use futures::Future; +use actix_web_actors::ws; use futures::future; -use ostree; -use std::collections::{VecDeque,HashMap}; +use futures::Future; +use log::{error, info, warn}; +use rand::prelude::IteratorRandom; +use serde::{Deserialize, Serialize}; +use serde_json::json; use std::cell::Cell; +use std::collections::{HashMap, VecDeque}; use std::rc::Rc; -use std::sync::Arc; -use std::time::{Instant, Duration}; -use actix_web_actors::ws; -use serde_json; -use rand; -use rand::prelude::IteratorRandom; use std::sync::mpsc; - -use delayed::DelayedResult; +use std::sync::Arc; +use std::time::{Duration, Instant}; const HEARTBEAT_INTERVAL: Duration = Duration::from_secs(30); const CLIENT_TIMEOUT: Duration = Duration::from_secs(60); -#[derive(Debug,Clone,PartialEq)] +#[derive(Debug, Clone, PartialEq)] pub struct DeltaRequest { pub repo: String, pub delta: ostree::Delta, } -impl DeltaRequest { - fn to_string(&self) -> String { - format!("{}/{}", self.repo, self.delta.to_string()) +impl std::fmt::Display for DeltaRequest { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + writeln!(f, "{}/{}", self.repo, self.delta) } } @@ -39,7 +39,7 @@ impl Message for DeltaRequest { type Result = Result<(), DeltaGenerationError>; } -#[derive(Debug,Clone)] +#[derive(Debug, Clone)] pub struct DeltaRequestSync { pub delta_request: DeltaRequest, pub tx: mpsc::Sender<(ostree::Delta, Result<(), DeltaGenerationError>)>, @@ -52,7 +52,7 @@ impl Message for DeltaRequestSync { #[derive(Debug)] struct QueuedRequest { request: DeltaRequest, - delayed_result: DelayedResult<(),DeltaGenerationError>, + delayed_result: DelayedResult<(), DeltaGenerationError>, } impl QueuedRequest { @@ -60,7 +60,7 @@ impl QueuedRequest { let delayed = DelayedResult::new(); QueuedRequest { request: request.clone(), - delayed_result: delayed.clone(), + delayed_result: delayed, } } } @@ -88,14 +88,12 @@ impl WorkerInfo { } } - /* The DeltaGenerator is an actor handling the DeltaRequest message, but * it then fronts a number of workers that it queues the request onto. */ #[derive(Debug)] pub struct DeltaGenerator { - config: Arc, outstanding: VecDeque, local_worker: Rc>, remote_workers: Vec>>, @@ -112,9 +110,9 @@ impl DeltaGenerator { self.next_worker_id += 1; self.remote_workers.push(Rc::new(WorkerInfo { name: name.to_string(), - id: id, + id, available: Cell::new(available), - addr: addr, + addr, })); info!("New delta worker {} registred as #{} ", name, id); id @@ -129,10 +127,21 @@ impl DeltaGenerator { } } - fn start_request(&self, worker: Rc>, mut queued_request: QueuedRequest, ctx: &mut Context) - where A: Handler, - A::Context: ToEnvelope { - info!("Assigned delta {} to worker {} #{}", queued_request.request.to_string(), worker.name, worker.id); + fn start_request( + &self, + worker: Rc>, + mut queued_request: QueuedRequest, + ctx: &mut Context, + ) where + A: Handler, + A::Context: ToEnvelope, + { + info!( + "Assigned delta {} to worker {} #{}", + queued_request.request.to_string(), + worker.name, + worker.id + ); worker.claim(); ctx.spawn( worker.addr @@ -174,7 +183,12 @@ impl DeltaGenerator { } } else { /* Find available worker */ - if let Some(available_worker) = self.remote_workers.iter().filter(|w| w.is_available()).choose(&mut rand::thread_rng()) { + if let Some(available_worker) = self + .remote_workers + .iter() + .filter(|w| w.is_available()) + .choose(&mut rand::thread_rng()) + { self.start_request(available_worker.clone(), request, ctx); } else { /* No worker available, return to queue */ @@ -185,11 +199,16 @@ impl DeltaGenerator { } } - fn handle_request(&mut self, request: DeltaRequest, ctx: &mut Context) -> DelayedResult<(),DeltaGenerationError> { - self.outstanding.iter() + fn handle_request( + &mut self, + request: DeltaRequest, + ctx: &mut Context, + ) -> DelayedResult<(), DeltaGenerationError> { + self.outstanding + .iter() .find(|req| req.request == request) .map(|req| req.delayed_result.clone()) - .unwrap_or_else( || { + .unwrap_or_else(|| { let req = QueuedRequest::new(&request); let r = req.delayed_result.clone(); self.outstanding.push_back(req); @@ -215,17 +234,17 @@ impl Handler for DeltaGenerator { fn handle(&mut self, msg: DeltaRequestSync, ctx: &mut Self::Context) -> Self::Result { let request = msg.delta_request.clone(); let delta = request.delta.clone(); - let tx = msg.tx.clone(); + let tx = msg.tx; let r = self.handle_request(request, ctx); - ctx.spawn(Box::new(r - .then(move |r| { - if let Err(_e) = tx.send((delta, r.clone())) { - error!("Failed to reply to sync delta request"); - } - r - }) - .map_err(|_e| ()) - .into_actor(self) + ctx.spawn(Box::new( + r.then(move |r| { + if let Err(_e) = tx.send((delta, r.clone())) { + error!("Failed to reply to sync delta request"); + } + r + }) + .map_err(|_e| ()) + .into_actor(self), )); Box::new(actix::fut::ok(())) } @@ -246,7 +265,6 @@ impl Handler for DeltaGenerator { } } - #[derive(Debug)] pub struct RegisterRemoteWorker { name: String, @@ -262,11 +280,10 @@ impl Handler for DeltaGenerator { type Result = usize; fn handle(&mut self, msg: RegisterRemoteWorker, _ctx: &mut Self::Context) -> usize { - self.add_worker( &msg.name, msg.capacity, msg.addr) + self.add_worker(&msg.name, msg.capacity, msg.addr) } } - #[derive(Debug)] pub struct UnregisterRemoteWorker { id: usize, @@ -284,7 +301,6 @@ impl Handler for DeltaGenerator { } } - #[derive(Debug)] pub struct LocalWorker { pub config: Arc, @@ -301,9 +317,13 @@ impl Handler for LocalWorker { let repoconfig = match self.config.get_repoconfig(&msg.repo) { Err(_e) => { return Box::new( - future::err(DeltaGenerationError::new(&format!("No repo named: {}", &msg.repo))) - .into_actor(self)) - }, + future::err(DeltaGenerationError::new(&format!( + "No repo named: {}", + &msg.repo + ))) + .into_actor(self), + ) + } Ok(r) => r, }; @@ -313,20 +333,16 @@ impl Handler for LocalWorker { Box::new( ostree::generate_delta_async(&repo_path, &delta) .from_err() - .into_actor(self)) + .into_actor(self), + ) } } pub fn start_delta_generator(config: Arc) -> Addr { - let n_threads = config.local_delta_threads; - let config_copy = config.clone(); - let local_worker = LocalWorker { - config: config_copy.clone(), - }.start(); + let local_worker = LocalWorker { config }.start(); let generator = DeltaGenerator { - config: config, outstanding: VecDeque::new(), local_worker: Rc::new(WorkerInfo { name: "local".to_string(), @@ -344,11 +360,9 @@ pub fn start_delta_generator(config: Arc) -> Addr { #[derive(Debug)] pub struct RemoteWorkerItem { id: u32, - request: DeltaRequest, - delayed_result: DelayedResult<(),DeltaGenerationError>, + delayed_result: DelayedResult<(), DeltaGenerationError>, } - #[derive(Debug)] pub struct RemoteWorker { remote: String, @@ -365,8 +379,7 @@ pub struct RemoteWorker { pub enum RemoteClientMessage { Register { capacity: u32 }, Unregister, - Finished { id: u32, - errmsg: Option }, + Finished { id: u32, errmsg: Option }, } #[derive(Serialize, Deserialize, Debug)] @@ -382,7 +395,7 @@ pub enum RemoteServerMessage { impl RemoteWorker { pub fn new(config: &Data, generator: &Addr, remote: String) -> Self { RemoteWorker { - remote: remote, + remote, id: None, unregistered: false, last_item_id: 0, @@ -394,14 +407,13 @@ impl RemoteWorker { } fn allocate_item_id(&mut self) -> u32 { - self.last_item_id +=1; + self.last_item_id += 1; self.last_item_id } - fn new_item(&mut self, msg: &DeltaRequest) -> RemoteWorkerItem { + fn new_item(&mut self) -> RemoteWorkerItem { RemoteWorkerItem { id: self.allocate_item_id(), - request: msg.clone(), delayed_result: DelayedResult::new(), } } @@ -412,7 +424,8 @@ impl RemoteWorker { self.generator .send(RegisterRemoteWorker { name: self.remote.clone(), - addr, capacity, + addr, + capacity, }) .into_actor(self) .then(move |msg_send_res, worker, ctx| { @@ -423,18 +436,19 @@ impl RemoteWorker { * we got the register response, do it now */ if worker.unregistered { ctx.spawn( - worker.generator - .send(UnregisterRemoteWorker { - id, - }) + worker + .generator + .send(UnregisterRemoteWorker { id }) .into_actor(worker) - .then(|_msg_send_res, _worker, _ctx| actix::fut::ok(()) )); + .then(|_msg_send_res, _worker, _ctx| actix::fut::ok(())), + ); } } else { error!("Unable to register Remote Worker {:?}", msg_send_res); } actix::fut::ok(()) - })); + }), + ); } fn msg_unregister(&mut self, ctx: &mut ws::WebsocketContext) { @@ -445,25 +459,30 @@ impl RemoteWorker { if let Some(id) = self.id { ctx.spawn( self.generator - .send(UnregisterRemoteWorker { - id, - }) + .send(UnregisterRemoteWorker { id }) .into_actor(self) .then(move |_msg_send_res, worker, _ctx| { worker.id = None; actix::fut::ok(()) - })); + }), + ); } } - fn msg_finished(&mut self, id: u32, errmsg: Option, _ctx: &mut ws::WebsocketContext) { + fn msg_finished( + &mut self, + id: u32, + errmsg: Option, + _ctx: &mut ws::WebsocketContext, + ) { match self.outstanding.remove(&id) { - Some(mut item) => { - item.delayed_result.set(match errmsg { - None => Ok(()), - Some(msg) => Err(DeltaGenerationError::new(&format!("Remote worked id {} failed to generate delta: {}", id, &msg))), - }) - }, + Some(mut item) => item.delayed_result.set(match errmsg { + None => Ok(()), + Some(msg) => Err(DeltaGenerationError::new(&format!( + "Remote worked id {} failed to generate delta: {}", + id, &msg + ))), + }), None => error!("Got finished message for unexpected handle {}", id), } } @@ -481,7 +500,6 @@ impl RemoteWorker { if Instant::now().duration_since(worker.last_recieved_ping) > CLIENT_TIMEOUT { warn!("Delta worker heartbeat missing, disconnecting!"); ctx.stop(); - return; } }); } @@ -492,24 +510,32 @@ impl Handler for RemoteWorker { fn handle(&mut self, msg: DeltaRequest, ctx: &mut Self::Context) -> Self::Result { let url = { - let repoconfig = - match self.config.get_repoconfig(&msg.repo) { - Ok(c) => c, - Err(e) => return Box::new( - DelayedResult::err(DeltaGenerationError::new(&format!("Can't get repoconfig: {}", e))) - .into_actor(self)), - }; + let repoconfig = match self.config.get_repoconfig(&msg.repo) { + Ok(c) => c, + Err(e) => { + return Box::new( + DelayedResult::err(DeltaGenerationError::new(&format!( + "Can't get repoconfig: {}", + e + ))) + .into_actor(self), + ) + } + }; repoconfig.get_base_url(&self.config) }; - let item = self.new_item(&msg); + let item = self.new_item(); - ctx.text(json!(RemoteServerMessage::RequestDelta { - url: url, - id: item.id, - repo: msg.repo, - delta: msg.delta, - }).to_string()); + ctx.text( + json!(RemoteServerMessage::RequestDelta { + url, + id: item.id, + repo: msg.repo, + delta: msg.delta, + }) + .to_string(), + ); let fut = item.delayed_result.clone(); self.outstanding.insert(item.id, item); @@ -531,10 +557,9 @@ impl Actor for RemoteWorker { /* We send this with Arbiter::spawn, not cxt.spawn() as this context is shutting down */ Arbiter::spawn( self.generator - .send(UnregisterRemoteWorker { - id, - }) - .then(|_msg_send_res| Ok(()))); + .send(UnregisterRemoteWorker { id }) + .then(|_msg_send_res| Ok(())), + ); } Running::Stop @@ -549,20 +574,16 @@ impl StreamHandler for RemoteWorker { self.last_recieved_ping = Instant::now(); ctx.pong(&msg); } - ws::Message::Pong(_) => { - } - ws::Message::Text(text) => { - match serde_json::from_str::(&text) { - Ok(message) => self.message(message, ctx), - Err(e) => error!("Got invalid websocket message: {}", e), - } - } + ws::Message::Pong(_) => {} + ws::Message::Text(text) => match serde_json::from_str::(&text) { + Ok(message) => self.message(message, ctx), + Err(e) => error!("Got invalid websocket message: {}", e), + }, ws::Message::Binary(_bin) => error!("Unexpected binary ws message"), ws::Message::Close(_) => { ctx.stop(); - }, - ws::Message::Nop => { - }, + } + ws::Message::Nop => {} } } } diff --git a/src/errors.rs b/src/errors.rs index c59698b..c0cd50e 100644 --- a/src/errors.rs +++ b/src/errors.rs @@ -1,10 +1,11 @@ -use actix; +use crate::ostree::OstreeError; +use actix_web::error::BlockingError; +use actix_web::http::StatusCode; use actix_web::{error::ResponseError, HttpResponse}; -use diesel::result::{Error as DieselError}; +use diesel::result::Error as DieselError; +use failure::Fail; +use serde_json::json; use std::io; -use actix_web::http::StatusCode; -use ostree::OstreeError; -use actix_web::error::BlockingError; #[derive(Fail, Debug, Clone)] pub enum DeltaGenerationError { @@ -49,31 +50,19 @@ pub type JobResult = Result; impl From for JobError { fn from(e: DieselError) -> Self { - match e { - _ => { - JobError::DBError(e.to_string()) - } - } + JobError::DBError(e.to_string()) } } impl From for JobError { fn from(e: OstreeError) -> Self { - match e { - _ => { - JobError::InternalError(e.to_string()) - } - } + JobError::InternalError(e.to_string()) } } impl From for ApiError { fn from(e: OstreeError) -> Self { - match e { - _ => { - ApiError::InternalServerError(e.to_string()) - } - } + ApiError::InternalServerError(e.to_string()) } } @@ -81,36 +70,28 @@ impl From> for ApiError { fn from(e: BlockingError) -> Self { match e { BlockingError::Error(e) => e, - BlockingError::Canceled => ApiError::InternalServerError("Blocking operation cancelled".to_string()) + BlockingError::Canceled => { + ApiError::InternalServerError("Blocking operation cancelled".to_string()) + } } } } impl From for ApiError { fn from(e: r2d2::Error) -> Self { - match e { - _ => ApiError::InternalServerError(format!("Database error: {}", e.to_string())) - } + ApiError::InternalServerError(format!("Database error: {}", e)) } } impl From for JobError { fn from(e: DeltaGenerationError) -> Self { - match e { - _ => { - JobError::InternalError(format!("Failed to generate delta: {}", e.to_string())) - } - } + JobError::InternalError(format!("Failed to generate delta: {}", e)) } } impl From for JobError { fn from(e: io::Error) -> Self { - match e { - _ => { - JobError::InternalError(e.to_string()) - } - } + JobError::InternalError(e.to_string()) } } @@ -126,10 +107,10 @@ pub enum ApiError { BadRequest(String), #[fail(display = "WrongRepoState({}): {}", _2, _0)] - WrongRepoState(String,String,String), + WrongRepoState(String, String, String), - #[fail(display = "WrongPublishedState({}): {}", _2, _0 )] - WrongPublishedState(String,String,String), + #[fail(display = "WrongPublishedState({}): {}", _2, _0)] + WrongPublishedState(String, String, String), #[fail(display = "InvalidToken: {}", _0)] InvalidToken(String), @@ -142,30 +123,20 @@ impl From for ApiError { fn from(e: DieselError) -> Self { match e { DieselError::NotFound => ApiError::NotFound, - _ => { - ApiError::InternalServerError(e.to_string()) - } + _ => ApiError::InternalServerError(e.to_string()), } } } impl From for ApiError { fn from(io_error: io::Error) -> Self { - match io_error { - _ => { - ApiError::InternalServerError(io_error.to_string()) - } - } + ApiError::InternalServerError(io_error.to_string()) } } impl From for ApiError { fn from(e: actix::MailboxError) -> Self { - match e { - _ => { - ApiError::InternalServerError(e.to_string()) - } - } + ApiError::InternalServerError(e.to_string()) } } @@ -216,11 +187,13 @@ impl ApiError { pub fn status_code(&self) -> StatusCode { match *self { - ApiError::InternalServerError(ref _internal_message) => StatusCode::INTERNAL_SERVER_ERROR, + ApiError::InternalServerError(ref _internal_message) => { + StatusCode::INTERNAL_SERVER_ERROR + } ApiError::NotFound => StatusCode::NOT_FOUND, ApiError::BadRequest(ref _message) => StatusCode::BAD_REQUEST, - ApiError::WrongRepoState(_,_,_) => StatusCode::BAD_REQUEST, - ApiError::WrongPublishedState(_,_,_) => StatusCode::BAD_REQUEST, + ApiError::WrongRepoState(_, _, _) => StatusCode::BAD_REQUEST, + ApiError::WrongPublishedState(_, _, _) => StatusCode::BAD_REQUEST, ApiError::InvalidToken(_) => StatusCode::UNAUTHORIZED, ApiError::NotEnoughPermissions(ref _message) => StatusCode::FORBIDDEN, } @@ -230,10 +203,13 @@ impl ApiError { impl ResponseError for ApiError { fn error_response(&self) -> HttpResponse { if let ApiError::InternalServerError(internal_message) = self { - error!("Responding with internal error: {}", internal_message); + log::error!("Responding with internal error: {}", internal_message); } if let ApiError::NotEnoughPermissions(internal_message) = self { - error!("Responding with NotEnoughPermissions error: {}", internal_message); + log::error!( + "Responding with NotEnoughPermissions error: {}", + internal_message + ); } HttpResponse::build(self.status_code()).json(self.to_json()) } diff --git a/src/jobs.rs b/src/jobs.rs index 7d23685..fa3668f 100644 --- a/src/jobs.rs +++ b/src/jobs.rs @@ -2,36 +2,37 @@ use actix::prelude::*; use actix::{Actor, SyncContext}; use diesel::pg::PgConnection; use diesel::prelude::*; -use diesel::result::{Error as DieselError}; use diesel::result::DatabaseErrorKind::SerializationFailure; -use diesel; -use filetime; -use serde_json; +use diesel::result::Error as DieselError; +use log::{error, info}; +use serde_json::json; use std::cell::RefCell; -use std::str; +use std::collections::{HashMap, HashSet}; use std::ffi::OsString; use std::fs::{self, File}; use std::io::Write; -use std::process::{Command, Stdio}; -use std::sync::{Arc}; +use std::iter::FromIterator; +use std::os::unix::process::CommandExt; use std::path::PathBuf; +use std::process::{Command, Stdio}; +use std::str; +use std::sync::mpsc; +use std::sync::Arc; use std::time; -use std::os::unix::process::CommandExt; -use libc; -use std::collections::{HashMap,HashSet}; -use std::iter::FromIterator; use walkdir::WalkDir; -use std::sync::mpsc; -use ostree; -use app::{RepoConfig, Config}; -use Pool; -use errors::{JobError, JobResult}; -use models::{NewJob, Job, JobDependency, JobKind, CommitJob, PublishJob, UpdateRepoJob, JobStatus, job_dependencies_with_status, RepoState, PublishedState }; -use deltas::{DeltaGenerator, DeltaRequest, DeltaRequestSync}; -use models; -use schema::*; -use schema; +use crate::app::{Config, RepoConfig}; +use crate::deltas::{DeltaGenerator, DeltaRequest, DeltaRequestSync}; +use crate::errors::{JobError, JobResult}; +use crate::models; +use crate::models::{ + job_dependencies_with_status, CommitJob, Job, JobDependency, JobKind, JobStatus, NewJob, + PublishJob, PublishedState, RepoState, UpdateRepoJob, +}; +use crate::ostree; +use crate::schema; +use crate::schema::*; +use crate::Pool; /************************************************************************** * Job handling - theory of operations. @@ -62,10 +63,12 @@ use schema; * ************************************************************************/ -fn generate_flatpakref(ref_name: &String, - maybe_build_id: Option, - config: &Config, - repoconfig: &RepoConfig) -> (String, String) { +fn generate_flatpakref( + ref_name: &str, + maybe_build_id: Option, + config: &Config, + repoconfig: &RepoConfig, +) -> (String, String) { let parts: Vec<&str> = ref_name.split('/').collect(); let filename = format!("{}.flatpakref", parts[1]); @@ -81,31 +84,31 @@ fn generate_flatpakref(ref_name: &String, let (url, maybe_gpg_content) = match maybe_build_id { Some(build_id) => ( format!("{}/build-repo/{}", config.base_url, build_id), - &config.build_gpg_key_content - ), - None => ( - repoconfig.get_base_url (&config), - &repoconfig.gpg_key_content + &config.build_gpg_key_content, ), + None => (repoconfig.get_base_url(config), &repoconfig.gpg_key_content), }; let title = if let Some(build_id) = maybe_build_id { format!("{} build nr {}", parts[1], build_id) } else { let reponame = match &repoconfig.suggested_repo_name { - Some(suggested_name) => &suggested_name, + Some(suggested_name) => suggested_name, None => &repoconfig.name, }; format!("{} from {}", app_id, reponame) }; - let mut contents = format!(r#"[Flatpak Ref] + let mut contents = format!( + r#"[Flatpak Ref] Name={} Branch={} Title={} IsRuntime={} Url={} -"#, app_id, branch, title, is_runtime, url); +"#, + app_id, branch, title, is_runtime, url + ); /* We only want to deploy the collection ID if the flatpakref is being generated for the main * repo not a build repo. @@ -133,23 +136,26 @@ Url={} (filename, contents) } -fn add_gpg_args(cmd: &mut Command, maybe_gpg_key: &Option, maybe_gpg_homedir: &Option) { +fn add_gpg_args( + cmd: &mut Command, + maybe_gpg_key: &Option, + maybe_gpg_homedir: &Option, +) { if let Some(gpg_homedir) = maybe_gpg_homedir { - cmd - .arg(format!("--gpg-homedir={}", gpg_homedir)); + cmd.arg(format!("--gpg-homedir={}", gpg_homedir)); }; if let Some(key) = maybe_gpg_key { - cmd - .arg(format!("--gpg-sign={}", key)); + cmd.arg(format!("--gpg-sign={}", key)); }; } -fn queue_update_job (delay_secs: u64, - conn: &PgConnection, - repo: &str, - starting_job_id: Option) -> Result<(bool,Job), DieselError> -{ +fn queue_update_job( + delay_secs: u64, + conn: &PgConnection, + repo: &str, + starting_job_id: Option, +) -> Result<(bool, Job), DieselError> { /* We wrap everything in a serializable transaction, because if something else * starts the job while we're adding dependencies to it the dependencies will be * ignored. @@ -223,7 +229,7 @@ fn queue_update_job (delay_secs: u64, diesel::insert_into(schema::job_dependencies::table) .values(JobDependency { job_id: update_job.id, - depends_on: depends_on, + depends_on, }) .execute(conn)?; } @@ -233,8 +239,10 @@ fn queue_update_job (delay_secs: u64, /* Retry on serialization failure */ match transaction_result { - Err(DieselError::DatabaseError(SerializationFailure, _)) => queue_update_job (delay_secs, conn, repo, starting_job_id), - _ => transaction_result + Err(DieselError::DatabaseError(SerializationFailure, _)) => { + queue_update_job(delay_secs, conn, repo, starting_job_id) + } + _ => transaction_result, } } @@ -253,9 +261,10 @@ fn job_log(job_id: i32, conn: &PgConnection, output: &str) { if let Err(e) = diesel::update(jobs::table) .filter(jobs::id.eq(job_id)) .set((jobs::log.eq(jobs::log.concat(&output)),)) - .execute(conn) { - error!("Error appending to job {} log: {}", job_id, e.to_string()); - } + .execute(conn) + { + error!("Error appending to job {} log: {}", job_id, e.to_string()); + } } fn job_log_and_info(job_id: i32, conn: &PgConnection, output: &str) { @@ -268,26 +277,27 @@ fn job_log_and_error(job_id: i32, conn: &PgConnection, output: &str) { job_log(job_id, conn, &format!("{}\n", output)); } -fn do_command(mut cmd: Command) -> JobResult<()> -{ - let output = - unsafe { - cmd - .stdin(Stdio::null()) - .stdout(Stdio::piped()) - .stderr(Stdio::piped()) - .pre_exec (|| { - // Setsid in the child to avoid SIGINT on server killing - // child and breaking the graceful shutdown - libc::setsid(); - Ok(()) - }) - .output() - .map_err(|e| JobError::new(&format!("Failed to run {:?}: {}", &cmd, e)))? - }; +fn do_command(mut cmd: Command) -> JobResult<()> { + let output = unsafe { + cmd.stdin(Stdio::null()) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .pre_exec(|| { + // Setsid in the child to avoid SIGINT on server killing + // child and breaking the graceful shutdown + libc::setsid(); + Ok(()) + }) + .output() + .map_err(|e| JobError::new(&format!("Failed to run {:?}: {}", &cmd, e)))? + }; if !output.status.success() { - return Err(JobError::new(&format!("Command {:?} exited unsuccesfully: {}", &cmd, String::from_utf8_lossy(&output.stderr)))) + return Err(JobError::new(&format!( + "Command {:?} exited unsuccesfully: {}", + &cmd, + String::from_utf8_lossy(&output.stderr) + ))); } Ok(()) } @@ -296,17 +306,23 @@ fn new_job_instance(executor: &JobExecutor, job: Job) -> Box { match JobKind::from_db(job.kind) { Some(JobKind::Commit) => CommitJobInstance::new(job), Some(JobKind::Publish) => PublishJobInstance::new(job), - Some(JobKind::UpdateRepo) => UpdateRepoJobInstance::new(job, executor.delta_generator.clone()), + Some(JobKind::UpdateRepo) => { + UpdateRepoJobInstance::new(job, executor.delta_generator.clone()) + } _ => InvalidJobInstance::new(job, JobError::new("Unknown job type")), } } pub trait JobInstance { - fn get_job_id (&self) -> i32; - fn order (&self) -> i32 { + fn get_job_id(&self) -> i32; + fn order(&self) -> i32 { 0 } - fn handle_job (&mut self, executor: &JobExecutor, conn: &PgConnection) -> JobResult; + fn handle_job( + &mut self, + executor: &JobExecutor, + conn: &PgConnection, + ) -> JobResult; } struct InvalidJobInstance { @@ -315,26 +331,29 @@ struct InvalidJobInstance { } impl InvalidJobInstance { - fn new(job: Job, - error: JobError) -> Box { + #[allow(clippy::new_ret_no_self)] + fn new(job: Job, error: JobError) -> Box { Box::new(InvalidJobInstance { job_id: job.id, - error: error, + error, }) } } impl JobInstance for InvalidJobInstance { - fn get_job_id (&self) -> i32 { + fn get_job_id(&self) -> i32 { self.job_id } - fn handle_job (&mut self, _executor: &JobExecutor, _conn: &PgConnection) -> JobResult { + fn handle_job( + &mut self, + _executor: &JobExecutor, + _conn: &PgConnection, + ) -> JobResult { Err(self.error.clone()) } } - #[derive(Debug)] struct CommitJobInstance { pub job_id: i32, @@ -345,6 +364,7 @@ struct CommitJobInstance { } impl CommitJobInstance { + #[allow(clippy::new_ret_no_self)] fn new(job: Job) -> Box { if let Ok(commit_job) = serde_json::from_str::(&job.contents) { Box::new(CommitJobInstance { @@ -359,11 +379,13 @@ impl CommitJobInstance { } } - fn do_commit_build_refs (&self, - build_refs: &Vec, - config: &Config, - repoconfig: &RepoConfig, - conn: &PgConnection) -> JobResult { + fn do_commit_build_refs( + &self, + build_refs: &[models::BuildRef], + config: &Config, + repoconfig: &RepoConfig, + conn: &PgConnection, + ) -> JobResult { let build_repo_path = config.build_repo_base.join(self.build_id.to_string()); let upload_path = build_repo_path.join("upload"); @@ -373,11 +395,16 @@ impl CommitJobInstance { let mut commits = HashMap::new(); let endoflife_rebase_arg = if let Some(endoflife_rebase) = &self.endoflife_rebase { - if let Some(app_ref) = build_refs.iter().filter(|app_ref| app_ref.ref_name.starts_with("app/")).nth(0) { - Some(format!("--end-of-life-rebase={}={}", app_ref.ref_name.split('/').nth(1).unwrap(), endoflife_rebase)) - } else { - None - } + build_refs + .iter() + .find(|app_ref| app_ref.ref_name.starts_with("app/")) + .map(|app_ref| { + format!( + "--end-of-life-rebase={}={}", + app_ref.ref_name.split('/').nth(1).unwrap(), + endoflife_rebase + ) + }) } else { None }; @@ -387,38 +414,40 @@ impl CommitJobInstance { src_ref_arg.push_str(&build_ref.commit); let mut cmd = Command::new("flatpak"); - cmd - .arg("build-commit-from") - .arg("--timestamp=NOW") // All builds have the same timestamp, not when the individual builds finished + cmd.arg("build-commit-from") + .arg("--timestamp=NOW") // All builds have the same timestamp, not when the individual builds finished .arg("--no-update-summary") // We update it once at the end - .arg("--untrusted") // Verify that the uploaded objects are correct - .arg("--force") // Always generate a new commit even if nothing changed - .arg("--disable-fsync"); // There is a sync in flatpak build-update-repo, so avoid it here + .arg("--untrusted") // Verify that the uploaded objects are correct + .arg("--force") // Always generate a new commit even if nothing changed + .arg("--disable-fsync"); // There is a sync in flatpak build-update-repo, so avoid it here add_gpg_args(&mut cmd, &config.build_gpg_key, &config.gpg_homedir); if let Some(endoflife) = &self.endoflife { - cmd - .arg(format!("--end-of-life={}", endoflife)); + cmd.arg(format!("--end-of-life={}", endoflife)); }; if let Some(endoflife_rebase_arg) = &endoflife_rebase_arg { - cmd - .arg(&endoflife_rebase_arg); + cmd.arg(&endoflife_rebase_arg); }; if let Some(token_type) = &self.token_type { - cmd - .arg(format!("--token-type={}", token_type)); + cmd.arg(format!("--token-type={}", token_type)); }; - cmd - .arg(&src_repo_arg) + cmd.arg(&src_repo_arg) .arg(&src_ref_arg) .arg(&build_repo_path) .arg(&build_ref.ref_name); - job_log_and_info(self.job_id, conn, &format!("Committing ref {} ({})", build_ref.ref_name, build_ref.commit)); + job_log_and_info( + self.job_id, + conn, + &format!( + "Committing ref {} ({})", + build_ref.ref_name, build_ref.commit + ), + ); do_command(cmd)?; let commit = ostree::parse_ref(&build_repo_path, &build_ref.ref_name)?; @@ -427,18 +456,25 @@ impl CommitJobInstance { let unwanted_exts = [".Debug", ".Locale", ".Sources", ".Docs"]; let ref_id_parts: Vec<&str> = build_ref.ref_name.split('/').collect(); - if build_ref.ref_name.starts_with("app/") || (build_ref.ref_name.starts_with("runtime/") && !unwanted_exts.iter().any(|&ext| ref_id_parts[1].ends_with(ext))) { - let (filename, contents) = generate_flatpakref(&build_ref.ref_name, Some(self.build_id), config, repoconfig); + if build_ref.ref_name.starts_with("app/") + || (build_ref.ref_name.starts_with("runtime/") + && !unwanted_exts + .iter() + .any(|&ext| ref_id_parts[1].ends_with(ext))) + { + let (filename, contents) = generate_flatpakref( + &build_ref.ref_name, + Some(self.build_id), + config, + repoconfig, + ); let path = build_repo_path.join(&filename); File::create(&path)?.write_all(contents.as_bytes())?; } } - let mut cmd = Command::new("flatpak"); - cmd - .arg("build-update-repo") - .arg(&build_repo_path); + cmd.arg("build-update-repo").arg(&build_repo_path); add_gpg_args(&mut cmd, &config.build_gpg_key, &config.gpg_homedir); @@ -448,16 +484,20 @@ impl CommitJobInstance { job_log_and_info(self.job_id, conn, "Removing upload directory"); fs::remove_dir_all(&upload_path)?; - Ok(json!({ "refs": commits})) + Ok(json!({ "refs": commits })) } } impl JobInstance for CommitJobInstance { - fn get_job_id (&self) -> i32 { + fn get_job_id(&self) -> i32 { self.job_id } - fn handle_job (&mut self, executor: &JobExecutor, conn: &PgConnection) -> JobResult { + fn handle_job( + &mut self, + executor: &JobExecutor, + conn: &PgConnection, + ) -> JobResult { info!("#{}: Handling Job Commit: build: {}, end-of-life: {}, eol-rebase: {}, token-type: {:?}", &self.job_id, &self.build_id, self.endoflife.as_ref().unwrap_or(&"".to_string()), self.endoflife_rebase.as_ref().unwrap_or(&"".to_string()), self.token_type); @@ -467,19 +507,20 @@ impl JobInstance for CommitJobInstance { let build_data = builds::table .filter(builds::id.eq(self.build_id)) .get_result::(conn) - .or_else(|_e| Err(JobError::new("Can't load build")))?; + .map_err(|_e| JobError::new("Can't load build"))?; // Get repo config - let repoconfig = config.get_repoconfig(&build_data.repo) - .or_else(|_e| Err(JobError::new(&format!("Can't find repo {}", &build_data.repo))))?; + let repoconfig = config + .get_repoconfig(&build_data.repo) + .map_err(|_e| JobError::new(&format!("Can't find repo {}", &build_data.repo)))?; // Get the uploaded refs from db let build_refs = build_refs::table .filter(build_refs::build_id.eq(self.build_id)) .get_results::(conn) - .or_else(|_e| Err(JobError::new("Can't load build refs")))?; + .map_err(|_e| JobError::new("Can't load build refs"))?; - if build_refs.len() == 0 { + if build_refs.is_empty() { return Err(JobError::new("No refs in build")); } @@ -498,16 +539,19 @@ impl JobInstance for CommitJobInstance { let current_build = builds::table .filter(builds::id.eq(self.build_id)) .get_result::(conn)?; - let current_repo_state = RepoState::from_db(current_build.repo_state, ¤t_build.repo_state_reason); + let current_repo_state = + RepoState::from_db(current_build.repo_state, ¤t_build.repo_state_reason); if !current_repo_state.same_state_as(&RepoState::Verifying) { // Something weird was happening, we expected this build to be in the verifying state - return Err(DieselError::RollbackTransaction) + return Err(DieselError::RollbackTransaction); }; let (val, reason) = RepoState::to_db(&new_repo_state); diesel::update(builds::table) .filter(builds::id.eq(self.build_id)) - .set((builds::repo_state.eq(val), - builds::repo_state_reason.eq(reason))) + .set(( + builds::repo_state.eq(val), + builds::repo_state_reason.eq(reason), + )) .get_result::(conn) })?; @@ -515,7 +559,6 @@ impl JobInstance for CommitJobInstance { } } - #[derive(Debug)] struct PublishJobInstance { pub job_id: i32, @@ -523,6 +566,7 @@ struct PublishJobInstance { } impl PublishJobInstance { + #[allow(clippy::new_ret_no_self)] fn new(job: Job) -> Box { if let Ok(publish_job) = serde_json::from_str::(&job.contents) { Box::new(PublishJobInstance { @@ -534,12 +578,14 @@ impl PublishJobInstance { } } - fn do_publish (&self, - build: &models::Build, - build_refs: &Vec, - config: &Config, - repoconfig: &RepoConfig, - conn: &PgConnection) -> JobResult { + fn do_publish( + &self, + build: &models::Build, + build_refs: &[models::BuildRef], + config: &Config, + repoconfig: &RepoConfig, + conn: &PgConnection, + ) -> JobResult { let build_repo_path = config.build_repo_base.join(self.build_id.to_string()); let mut src_repo_arg = OsString::from("--src-repo="); @@ -548,25 +594,28 @@ impl PublishJobInstance { // Import commit and modify refs let mut cmd = Command::new("flatpak"); - cmd - .arg("build-commit-from") - .arg("--force") // Always generate a new commit even if nothing changed + cmd.arg("build-commit-from") + .arg("--force") // Always generate a new commit even if nothing changed .arg("--no-update-summary"); // We update it separately add_gpg_args(&mut cmd, &repoconfig.gpg_key, &config.gpg_homedir); if let Some(collection_id) = &repoconfig.collection_id { for ref extra_id in build.extra_ids.iter() { - cmd.arg(format!("--extra-collection-id={}.{}", collection_id, extra_id)); + cmd.arg(format!( + "--extra-collection-id={}.{}", + collection_id, extra_id + )); } } - cmd - .arg(&src_repo_arg) - .arg(&repoconfig.path); + cmd.arg(&src_repo_arg).arg(&repoconfig.path); - job_log_and_info(self.job_id, conn, - &format!("Importing build to repo {}", repoconfig.name)); + job_log_and_info( + self.job_id, + conn, + &format!("Importing build to repo {}", repoconfig.name), + ); do_command(cmd)?; let appstream_dir = repoconfig.path.join("appstream"); @@ -577,15 +626,17 @@ impl PublishJobInstance { let mut commits = HashMap::new(); for build_ref in build_refs.iter() { - if build_ref.ref_name.starts_with("app/") || build_ref.ref_name.starts_with("runtime/") { + if build_ref.ref_name.starts_with("app/") || build_ref.ref_name.starts_with("runtime/") + { let commit = ostree::parse_ref(&repoconfig.path, &build_ref.ref_name)?; commits.insert(build_ref.ref_name.to_string(), commit); } if build_ref.ref_name.starts_with("app/") { - let (filename, contents) = generate_flatpakref(&build_ref.ref_name, None, config, repoconfig); + let (filename, contents) = + generate_flatpakref(&build_ref.ref_name, None, config, repoconfig); let path = appstream_dir.join(&filename); - job_log_and_info (self.job_id, conn, &format!("generating {}", &filename)); + job_log_and_info(self.job_id, conn, &format!("generating {}", &filename)); let old_contents = fs::read_to_string(&path).unwrap_or_default(); if contents != old_contents { File::create(&path)?.write_all(contents.as_bytes())?; @@ -595,10 +646,13 @@ impl PublishJobInstance { for build_ref in build_refs.iter() { if build_ref.ref_name.starts_with("screenshots/") { - job_log_and_info (self.job_id, conn, &format!("extracting {}", build_ref.ref_name)); + job_log_and_info( + self.job_id, + conn, + &format!("extracting {}", build_ref.ref_name), + ); let mut cmd = Command::new("ostree"); - cmd - .arg(&format!("--repo={}", &build_repo_path.to_str().unwrap())) + cmd.arg(&format!("--repo={}", &build_repo_path.to_str().unwrap())) .arg("checkout") .arg("--user-mode") .arg("--bareuseronly-dirs") @@ -611,17 +665,27 @@ impl PublishJobInstance { /* Create update repo job */ let delay = config.delay_update_secs; - let (is_new, update_job) = queue_update_job (delay, conn, &repoconfig.name, Some(self.job_id))?; + let (is_new, update_job) = + queue_update_job(delay, conn, &repoconfig.name, Some(self.job_id))?; if is_new { - job_log_and_info(self.job_id, conn, - &format!("Queued repository update job {}{}", - update_job.id, match delay { - 0 => "".to_string(), - _ => format!(" in {} secs", delay), - })); + job_log_and_info( + self.job_id, + conn, + &format!( + "Queued repository update job {}{}", + update_job.id, + match delay { + 0 => "".to_string(), + _ => format!(" in {} secs", delay), + } + ), + ); } else { - job_log_and_info(self.job_id, conn, - &format!("Piggy-backed on existing update job {}", update_job.id)); + job_log_and_info( + self.job_id, + conn, + &format!("Piggy-backed on existing update job {}", update_job.id), + ); } Ok(json!({ @@ -632,18 +696,24 @@ impl PublishJobInstance { } impl JobInstance for PublishJobInstance { - fn get_job_id (&self) -> i32 { + fn get_job_id(&self) -> i32 { self.job_id } - fn order (&self) -> i32 { + fn order(&self) -> i32 { 1 /* Delay publish after commits (and other normal ops). because the - commits may generate more publishes. */ + commits may generate more publishes. */ } - fn handle_job (&mut self, executor: &JobExecutor, conn: &PgConnection) -> JobResult { - info!("#{}: Handling Job Publish: build: {}", - &self.job_id, &self.build_id); + fn handle_job( + &mut self, + executor: &JobExecutor, + conn: &PgConnection, + ) -> JobResult { + info!( + "#{}: Handling Job Publish: build: {}", + &self.job_id, &self.build_id + ); let config = &executor.config; @@ -651,18 +721,19 @@ impl JobInstance for PublishJobInstance { let build_data = builds::table .filter(builds::id.eq(self.build_id)) .get_result::(conn) - .or_else(|_e| Err(JobError::new("Can't load build")))?; + .map_err(|_e| JobError::new("Can't load build"))?; // Get repo config - let repoconfig = config.get_repoconfig(&build_data.repo) - .or_else(|_e| Err(JobError::new(&format!("Can't find repo {}", &build_data.repo))))?; + let repoconfig = config + .get_repoconfig(&build_data.repo) + .map_err(|_e| JobError::new(&format!("Can't find repo {}", &build_data.repo)))?; // Get the uploaded refs from db let build_refs = build_refs::table - .filter(build_refs::build_id.eq(self.build_id)) + .filter(build_refs::build_id.eq(self.build_id)) .get_results::(conn) - .or_else(|_e| Err(JobError::new("Can't load build refs")))?; - if build_refs.len() == 0 { + .map_err(|_e| JobError::new("Can't load build refs"))?; + if build_refs.is_empty() { return Err(JobError::new("No refs in build")); } @@ -680,17 +751,22 @@ impl JobInstance for PublishJobInstance { let current_build = builds::table .filter(builds::id.eq(self.build_id)) .get_result::(conn)?; - let current_published_state = PublishedState::from_db(current_build.published_state, ¤t_build.published_state_reason); + let current_published_state = PublishedState::from_db( + current_build.published_state, + ¤t_build.published_state_reason, + ); if !current_published_state.same_state_as(&PublishedState::Publishing) { // Something weird was happening, we expected this build to be in the publishing state error!("Unexpected publishing state {:?}", current_published_state); - return Err(DieselError::RollbackTransaction) + return Err(DieselError::RollbackTransaction); }; let (val, reason) = PublishedState::to_db(&new_published_state); diesel::update(builds::table) .filter(builds::id.eq(self.build_id)) - .set((builds::published_state.eq(val), - builds::published_state_reason.eq(reason))) + .set(( + builds::published_state.eq(val), + builds::published_state_reason.eq(reason), + )) .get_result::(conn) })?; @@ -706,10 +782,11 @@ struct UpdateRepoJobInstance { } impl UpdateRepoJobInstance { + #[allow(clippy::new_ret_no_self)] fn new(job: Job, delta_generator: Addr) -> Box { if let Ok(update_repo_job) = serde_json::from_str::(&job.contents) { Box::new(UpdateRepoJobInstance { - delta_generator: delta_generator, + delta_generator, job_id: job.id, repo: update_repo_job.repo, }) @@ -718,11 +795,14 @@ impl UpdateRepoJobInstance { } } - fn calculate_deltas(&self, repoconfig: &RepoConfig) -> (HashSet, HashSet) { + fn calculate_deltas( + &self, + repoconfig: &RepoConfig, + ) -> (HashSet, HashSet) { let repo_path = repoconfig.get_abs_repo_path(); let mut wanted_deltas = HashSet::new(); - let refs = ostree::list_refs (&repo_path, ""); + let refs = ostree::list_refs(&repo_path, ""); for ref_name in refs { let depth = repoconfig.get_delta_depth_for_ref(&ref_name); @@ -734,7 +814,7 @@ impl UpdateRepoJobInstance { } } } - let old_deltas = HashSet::from_iter(ostree::list_deltas (&repo_path).iter().cloned()); + let old_deltas = HashSet::from_iter(ostree::list_deltas(&repo_path).iter().cloned()); let missing_deltas = wanted_deltas.difference(&old_deltas).cloned().collect(); let unwanted_deltas = old_deltas.difference(&wanted_deltas).cloned().collect(); @@ -742,10 +822,12 @@ impl UpdateRepoJobInstance { (missing_deltas, unwanted_deltas) } - fn generate_deltas(&self, - deltas: &HashSet, - repoconfig: &RepoConfig, - conn: &PgConnection) -> JobResult<()> { + fn generate_deltas( + &self, + deltas: &HashSet, + repoconfig: &RepoConfig, + conn: &PgConnection, + ) -> JobResult<()> { job_log_and_info(self.job_id, conn, "Generating deltas"); let (tx, rx) = mpsc::channel(); @@ -753,7 +835,7 @@ impl UpdateRepoJobInstance { /* We can't use a regular .send() here, as that requres a current task which is * not available in a sync actor like this. Instead we use the non-blocking * do_send and implement returns using a mpsc::channel. - */ + */ for delta in deltas.iter() { self.delta_generator.do_send(DeltaRequestSync { @@ -767,8 +849,8 @@ impl UpdateRepoJobInstance { for (delta, result) in rx.iter().take(deltas.len()) { let message = match result { - Ok(()) => format!(" {}", delta.to_string()), - Err(e) => format!(" failed to generate {}: {}", delta.to_string(), e), + Ok(()) => format!(" {}", delta), + Err(e) => format!(" failed to generate {}: {}", delta, e), }; job_log_and_info(self.job_id, conn, &message); } @@ -778,10 +860,12 @@ impl UpdateRepoJobInstance { Ok(()) } - fn retire_deltas(&self, - deltas: &HashSet, - repoconfig: &RepoConfig, - conn: &PgConnection) -> JobResult<()> { + fn retire_deltas( + &self, + deltas: &HashSet, + repoconfig: &RepoConfig, + conn: &PgConnection, + ) -> JobResult<()> { job_log_and_info(self.job_id, conn, "Cleaning out old deltas"); let repo_path = repoconfig.get_abs_repo_path(); let deltas_dir = repo_path.join("deltas"); @@ -802,8 +886,14 @@ impl UpdateRepoJobInstance { let dst_parent = dst.parent().unwrap(); fs::create_dir_all(&dst_parent)?; - job_log_and_info(self.job_id, conn, - &format!(" Queuing delta {:?} for deletion", src.strip_prefix(&deltas_dir).unwrap())); + job_log_and_info( + self.job_id, + conn, + &format!( + " Queuing delta {:?} for deletion", + src.strip_prefix(&deltas_dir).unwrap() + ), + ); if dst.exists() { fs::remove_dir_all(&dst)?; @@ -815,8 +905,7 @@ impl UpdateRepoJobInstance { } /* Delete all temporary deltas older than one hour */ - let to_delete = - WalkDir::new(&tmp_deltas_dir) + let to_delete = WalkDir::new(&tmp_deltas_dir) .min_depth(2) .max_depth(2) .into_iter() @@ -825,7 +914,7 @@ impl UpdateRepoJobInstance { if let Ok(metadata) = e.metadata() { if let Ok(mtime) = metadata.modified() { if let Ok(since) = now.duration_since(mtime) { - return since.as_secs() > 60 * 60 + return since.as_secs() > 60 * 60; } } }; @@ -835,79 +924,76 @@ impl UpdateRepoJobInstance { .collect::>(); for dir in to_delete { - job_log_and_info(self.job_id, conn, - &format!(" Deleting old delta {:?}", dir.strip_prefix(&tmp_deltas_dir).unwrap())); + job_log_and_info( + self.job_id, + conn, + &format!( + " Deleting old delta {:?}", + dir.strip_prefix(&tmp_deltas_dir).unwrap() + ), + ); fs::remove_dir_all(&dir)?; } Ok(()) } - fn update_appstream (&self, - config: &Config, - repoconfig: &RepoConfig, - conn: &PgConnection) -> JobResult<()> { + fn update_appstream( + &self, + config: &Config, + repoconfig: &RepoConfig, + conn: &PgConnection, + ) -> JobResult<()> { job_log_and_info(self.job_id, conn, "Regenerating appstream branches"); let repo_path = repoconfig.get_abs_repo_path(); let mut cmd = Command::new("flatpak"); - cmd - .arg("build-update-repo") - .arg("--no-update-summary"); + cmd.arg("build-update-repo").arg("--no-update-summary"); add_gpg_args(&mut cmd, &repoconfig.gpg_key, &config.gpg_homedir); - cmd - .arg(&repo_path); + cmd.arg(&repo_path); do_command(cmd)?; Ok(()) } - fn update_summary (&self, - config: &Config, - repoconfig: &RepoConfig, - conn: &PgConnection) -> JobResult<()> { + fn update_summary( + &self, + config: &Config, + repoconfig: &RepoConfig, + conn: &PgConnection, + ) -> JobResult<()> { job_log_and_info(self.job_id, conn, "Updating summary"); let repo_path = repoconfig.get_abs_repo_path(); let mut cmd = Command::new("flatpak"); - cmd - .arg("build-update-repo") - .arg("--no-update-appstream"); + cmd.arg("build-update-repo").arg("--no-update-appstream"); add_gpg_args(&mut cmd, &repoconfig.gpg_key, &config.gpg_homedir); - cmd - .arg(&repo_path); + cmd.arg(&repo_path); do_command(cmd)?; Ok(()) } - fn run_post_publish (&self, - repoconfig: &RepoConfig, - conn: &PgConnection) -> JobResult<()> { + fn run_post_publish(&self, repoconfig: &RepoConfig, conn: &PgConnection) -> JobResult<()> { if let Some(post_publish_script) = &repoconfig.post_publish_script { let repo_path = repoconfig.get_abs_repo_path(); let mut cmd = Command::new(post_publish_script); - cmd - .arg(&repoconfig.name) - .arg(&repo_path); + cmd.arg(&repoconfig.name).arg(&repo_path); job_log_and_info(self.job_id, conn, "Running post-publish script"); do_command(cmd)?; }; Ok(()) } - fn extract_appstream (&self, - repoconfig: &RepoConfig, - conn: &PgConnection) -> JobResult<()> { + fn extract_appstream(&self, repoconfig: &RepoConfig, conn: &PgConnection) -> JobResult<()> { job_log_and_info(self.job_id, conn, "Extracting appstream branches"); let repo_path = repoconfig.get_abs_repo_path(); let appstream_dir = repo_path.join("appstream"); - let appstream_refs = ostree::list_refs (&repoconfig.path, "appstream"); + let appstream_refs = ostree::list_refs(&repoconfig.path, "appstream"); for appstream_ref in appstream_refs { - let arch = appstream_ref.split("/").nth(1).unwrap(); + let arch = appstream_ref.split('/').nth(1).unwrap(); let mut cmd = Command::new("ostree"); - cmd - .arg(&format!("--repo={}", &repoconfig.path.to_str().unwrap())) + cmd.arg(&format!("--repo={}", &repoconfig.path.to_str().unwrap())) .arg("checkout") .arg("--user-mode") .arg("--union") @@ -915,29 +1001,35 @@ impl UpdateRepoJobInstance { .arg(&appstream_ref) .arg(appstream_dir.join(arch)); do_command(cmd)?; - }; + } Ok(()) } } - impl JobInstance for UpdateRepoJobInstance { - fn get_job_id (&self) -> i32 { + fn get_job_id(&self) -> i32 { self.job_id } - fn order (&self) -> i32 { + fn order(&self) -> i32 { 2 /* Delay updates after publish so they can be chunked. */ } - fn handle_job (&mut self, executor: &JobExecutor, conn: &PgConnection) -> JobResult { - info!("#{}: Handling Job UpdateRepo: repo: {}", - &self.job_id, &self.repo); + fn handle_job( + &mut self, + executor: &JobExecutor, + conn: &PgConnection, + ) -> JobResult { + info!( + "#{}: Handling Job UpdateRepo: repo: {}", + &self.job_id, &self.repo + ); // Get repo config let config = &executor.config; - let repoconfig = config.get_repoconfig(&self.repo) - .or_else(|_e| Err(JobError::new(&format!("Can't find repo {}", &self.repo))))?; + let repoconfig = config + .get_repoconfig(&self.repo) + .map_err(|_e| JobError::new(&format!("Can't find repo {}", &self.repo)))?; self.update_appstream(config, repoconfig, conn)?; @@ -951,11 +1043,14 @@ impl JobInstance for UpdateRepoJobInstance { self.run_post_publish(repoconfig, conn)?; - Ok(json!({ })) + Ok(json!({})) } } -fn pick_next_job (executor: &mut JobExecutor, conn: &PgConnection) -> Result, DieselError> { +fn pick_next_job( + executor: &mut JobExecutor, + conn: &PgConnection, +) -> Result, DieselError> { use diesel::dsl::exists; use diesel::dsl::not; use diesel::dsl::now; @@ -963,53 +1058,50 @@ fn pick_next_job (executor: &mut JobExecutor, conn: &PgConnection) -> Result> = match for_repo { - None => { - jobs::table - .order(jobs::id) - .filter(ready_job_filter.and(jobs::repo.is_null())) - .get_results::(conn)? - .into_iter() - .map(|job| new_job_instance(executor, job)) - .collect() - }, - Some(repo) => { - jobs::table - .order(jobs::id) - .filter(ready_job_filter.and(jobs::repo.eq(repo))) - .get_results::(conn)? - .into_iter() - .map(|job| new_job_instance(executor, job)) - .collect() - }, + .and(not(exists( + job_dependencies_with_status::table.filter( + job_dependencies_with_status::job_id.eq(jobs::id).and( + job_dependencies_with_status::dependant_status + .le(JobStatus::Started as i16), + ), + ), + ))); + + let mut new_instances: Vec> = match for_repo { + None => jobs::table + .order(jobs::id) + .filter(ready_job_filter.and(jobs::repo.is_null())) + .get_results::(conn)? + .into_iter() + .map(|job| new_job_instance(executor, job)) + .collect(), + Some(repo) => jobs::table + .order(jobs::id) + .filter(ready_job_filter.and(jobs::repo.eq(repo))) + .get_results::(conn)? + .into_iter() + .map(|job| new_job_instance(executor, job)) + .collect(), }; /* Sort by prio */ - new_instances.sort_by(|a, b| a.order().cmp(&b.order())); + new_instances.sort_by_key(|a| a.order()); /* Handle the first, if any */ - for new_instance in new_instances { + if let Some(new_instance) = new_instances.into_iter().next() { diesel::update(jobs::table) .filter(jobs::id.eq(new_instance.get_job_id())) .set((jobs::status.eq(JobStatus::Started as i16),)) .execute(conn)?; - return Ok(new_instance) + return Ok(new_instance); } Err(diesel::NotFound) @@ -1017,48 +1109,49 @@ fn pick_next_job (executor: &mut JobExecutor, conn: &PgConnection) -> Result pick_next_job (executor, conn), - _ => transaction_result + Err(DieselError::DatabaseError(SerializationFailure, _)) => pick_next_job(executor, conn), + _ => transaction_result, } } - -fn process_one_job (executor: &mut JobExecutor, conn: &PgConnection) -> bool { +fn process_one_job(executor: &mut JobExecutor, conn: &PgConnection) -> bool { let new_instance = pick_next_job(executor, conn); match new_instance { Ok(mut instance) => { - let (new_status, new_results) = - match instance.handle_job(executor, conn) { - Ok(json) => { - info!("#{}: Job succeeded", instance.get_job_id()); - (JobStatus::Ended, json.to_string()) - }, - Err(e) => { - job_log_and_error(instance.get_job_id(), conn, - &format!("Job failed: {}", e.to_string())); - (JobStatus::Broken, json!({"error-message": e.to_string()}).to_string()) - } - }; + let (new_status, new_results) = match instance.handle_job(executor, conn) { + Ok(json) => { + info!("#{}: Job succeeded", instance.get_job_id()); + (JobStatus::Ended, json.to_string()) + } + Err(e) => { + job_log_and_error(instance.get_job_id(), conn, &format!("Job failed: {}", e)); + ( + JobStatus::Broken, + json!({"error-message": e.to_string()}).to_string(), + ) + } + }; - let update_res = - diesel::update(jobs::table) + let update_res = diesel::update(jobs::table) .filter(jobs::id.eq(instance.get_job_id())) - .set((jobs::status.eq(new_status as i16), - jobs::results.eq(new_results))) + .set(( + jobs::status.eq(new_status as i16), + jobs::results.eq(new_results), + )) .execute(conn); if let Err(e) = update_res { error!("handle_job: Error updating job {}", e); } true /* We handled a job */ - }, + } Err(diesel::NotFound) => { false /* We didn't handle a job */ - }, + } Err(e) => { error!("Unexpected db error processing job: {}", e); false - }, + } } } @@ -1088,11 +1181,10 @@ impl Handler for JobExecutor { fn handle(&mut self, _msg: ProcessOneJob, _ctx: &mut Self::Context) -> Self::Result { let conn = &self.pool.get().map_err(|_e| ())?; - Ok(process_one_job (self, conn)) + Ok(process_one_job(self, conn)) } } - // We have an async JobQueue object that wraps the sync JobExecutor, because // that way we can respond to incomming requests immediately and decide in // what order to handle them. In particular, we want to prioritize stop @@ -1105,7 +1197,7 @@ struct ExecutorInfo { } pub struct JobQueue { - executors: HashMap,RefCell>, + executors: HashMap, RefCell>, running: bool, } @@ -1114,13 +1206,13 @@ impl JobQueue { let mut info = match self.executors.get(repo) { None => { error!("Got process jobs for non existing executor"); - return - }, + return; + } Some(executor_info) => executor_info.borrow_mut(), }; if !self.running { - return + return; } if info.processing_job { info.job_queued = true; @@ -1129,45 +1221,41 @@ impl JobQueue { info.job_queued = false; let repo = repo.clone(); - ctx.spawn( - info.addr - .send (ProcessOneJob()) - .into_actor(self) - .then(|result, queue, ctx| { - let job_queued = { - let mut info = queue.executors.get(&repo).unwrap().borrow_mut(); - info.processing_job = false; - info.job_queued + ctx.spawn(info.addr.send(ProcessOneJob()).into_actor(self).then( + |result, queue, ctx| { + let job_queued = { + let mut info = queue.executors.get(&repo).unwrap().borrow_mut(); + info.processing_job = false; + info.job_queued + }; + + if queue.running { + let processed_job = match result { + Ok(Ok(true)) => true, + Ok(Ok(false)) => false, + res => { + error!("Unexpected ProcessOneJob result {:?}", res); + false + } }; - if queue.running { - let processed_job = match result { - Ok(Ok(true)) => true, - Ok(Ok(false)) => false, - res => { - error!("Unexpected ProcessOneJob result {:?}", res); - false - }, - }; - - // If we ran a job, or a job was queued, kick again - if job_queued || processed_job { - queue.kick(&repo, ctx); - } else { - // We send a ProcessJobs message each time we added something to the - // db, but case something external modifes the db we have a 10 sec - // polling loop here. Ideally this should be using NOTIFY/LISTEN - // postgre, but diesel/pq-sys does not currently support it. - - ctx.run_later(time::Duration::new(10, 0), move |queue, ctx| { - queue.kick(&repo, ctx); - }); - } + // If we ran a job, or a job was queued, kick again + if job_queued || processed_job { + queue.kick(&repo, ctx); + } else { + // We send a ProcessJobs message each time we added something to the + // db, but case something external modifes the db we have a 10 sec + // polling loop here. Ideally this should be using NOTIFY/LISTEN + // postgre, but diesel/pq-sys does not currently support it. + ctx.run_later(time::Duration::new(10, 0), move |queue, ctx| { + queue.kick(&repo, ctx); + }); } - actix::fut::ok(()) - }) - ); + } + actix::fut::ok(()) + }, + )); } } } @@ -1211,27 +1299,31 @@ impl Handler for JobQueue { fn handle(&mut self, _msg: StopJobQueue, _ctx: &mut Self::Context) -> Self::Result { self.running = false; - let executors : Vec> = self.executors.values().map(|info| info.borrow().addr.clone()).collect(); - ActorResponse::async( - futures::stream::iter_ok(executors).into_actor(self) + let executors: Vec> = self + .executors + .values() + .map(|info| info.borrow().addr.clone()) + .collect(); + ActorResponse::r#async( + futures::stream::iter_ok(executors) + .into_actor(self) .map(|executor: Addr, job_queue, _ctx| { executor - .send (StopJobs()) + .send(StopJobs()) .into_actor(job_queue) - .then(|_result, _job_queue, _ctx| { - actix::fut::ok::<_,(),_>(()) - }) + .then(|_result, _job_queue, _ctx| actix::fut::ok::<_, (), _>(())) }) - .finish() + .finish(), ) } } -fn start_executor(repo: &Option, - config: &Arc, - delta_generator: &Addr, - pool: &Pool) -> RefCell -{ +fn start_executor( + repo: &Option, + config: &Arc, + delta_generator: &Addr, + pool: &Pool, +) -> RefCell { let config_copy = config.clone(); let delta_generator_copy = delta_generator.clone(); let pool_copy = pool.clone(); @@ -1241,29 +1333,35 @@ fn start_executor(repo: &Option, repo: repo_clone.clone(), config: config_copy.clone(), delta_generator: delta_generator_copy.clone(), - pool: pool_copy.clone() + pool: pool_copy.clone(), }), processing_job: false, job_queued: false, }) } - -pub fn start_job_executor(config: Arc, - delta_generator: Addr, - pool: Pool) -> Addr { +pub fn start_job_executor( + config: Arc, + delta_generator: Addr, + pool: Pool, +) -> Addr { let mut executors = HashMap::new(); - executors.insert(None, - start_executor(&None, &config, &delta_generator, &pool)); - - for repo in config.repos.keys().cloned() { - executors.insert(Some(repo.clone()), - start_executor(&Some(repo.clone()), &config, &delta_generator, &pool)); + executors.insert( + None, + start_executor(&None, &config, &delta_generator, &pool), + ); + + for repo in config.repos.keys() { + executors.insert( + Some(repo.clone()), + start_executor(&Some(repo.clone()), &config, &delta_generator, &pool), + ); } JobQueue { - executors: executors, + executors, running: true, - }.start() + } + .start() } pub fn cleanup_started_jobs(pool: &Pool) -> Result<(), diesel::result::Error> { @@ -1272,37 +1370,46 @@ pub fn cleanup_started_jobs(pool: &Pool) -> Result<(), diesel::result::Error> { use schema::builds::dsl::*; let (verifying, _) = RepoState::Verifying.to_db(); let (purging, _) = RepoState::Purging.to_db(); - let (failed, failed_reason) = RepoState::Failed("Server was restarted during job".to_string()).to_db(); - let n_updated = - diesel::update(builds) + let (failed, failed_reason) = + RepoState::Failed("Server was restarted during job".to_string()).to_db(); + let n_updated = diesel::update(builds) .filter(repo_state.eq(verifying).or(repo_state.eq(purging))) - .set((repo_state.eq(failed), - repo_state_reason.eq(failed_reason))) + .set((repo_state.eq(failed), repo_state_reason.eq(failed_reason))) .execute(conn)?; if n_updated != 0 { - error!("Marked {} builds as failed due to in progress jobs on startup", n_updated); + error!( + "Marked {} builds as failed due to in progress jobs on startup", + n_updated + ); } let (publishing, _) = PublishedState::Publishing.to_db(); - let (failed_publish, failed_publish_reason) = PublishedState::Failed("Server was restarted during publish".to_string()).to_db(); - let n_updated2 = - diesel::update(builds) + let (failed_publish, failed_publish_reason) = + PublishedState::Failed("Server was restarted during publish".to_string()).to_db(); + let n_updated2 = diesel::update(builds) .filter(published_state.eq(publishing)) - .set((published_state.eq(failed_publish), - published_state_reason.eq(failed_publish_reason))) + .set(( + published_state.eq(failed_publish), + published_state_reason.eq(failed_publish_reason), + )) .execute(conn)?; if n_updated2 != 0 { - error!("Marked {} builds as failed to publish due to in progress jobs on startup", n_updated2); + error!( + "Marked {} builds as failed to publish due to in progress jobs on startup", + n_updated2 + ); } }; { use schema::jobs::dsl::*; - let updated = - diesel::update(jobs) + let updated = diesel::update(jobs) .filter(status.eq(JobStatus::Started as i16)) .set((status.eq(JobStatus::Broken as i16),)) .get_results::(conn)?; if !updated.is_empty() { - error!("Marked {} jobs as broken due to being started already at startup", updated.len()); + error!( + "Marked {} jobs as broken due to being started already at startup", + updated.len() + ); /* For any repo that had an update-repo marked broken, queue a new job */ for job in updated.iter() { let mut queue_update_for_repos = HashSet::new(); @@ -1313,7 +1420,7 @@ pub fn cleanup_started_jobs(pool: &Pool) -> Result<(), diesel::result::Error> { } for reponame in queue_update_for_repos { info!("Queueing new update job for repo {:?}", reponame); - let _update_job = queue_update_job (0, conn, &reponame, None); + let _update_job = queue_update_job(0, conn, &reponame, None); } } } diff --git a/src/lib.rs b/src/lib.rs index fb97549..17d3515 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,72 +1,44 @@ #![allow(proc_macro_derive_resolution_fallback)] -extern crate actix; -extern crate actix_net; -extern crate actix_service; -extern crate actix_web; -extern crate actix_web_actors; -extern crate actix_multipart; -extern crate actix_files; -extern crate askama; -extern crate base64; -extern crate byteorder; -extern crate bytes; -extern crate chrono; -#[macro_use] extern crate diesel; -#[macro_use] extern crate diesel_migrations; -extern crate env_logger; -#[macro_use] extern crate failure; -extern crate futures; -extern crate r2d2; -extern crate serde; -#[macro_use] extern crate serde_json; -#[macro_use] extern crate serde_derive; -extern crate tempfile; -extern crate jsonwebtoken as jwt; -#[macro_use] extern crate log; -extern crate libc; -extern crate walkdir; -extern crate hex; -extern crate filetime; -extern crate num_cpus; -extern crate time; -extern crate tokio; -extern crate tokio_process; -extern crate tokio_signal; -extern crate rand; +#[macro_use] +extern crate diesel; +#[macro_use] +extern crate diesel_migrations; mod api; mod app; mod db; +mod delayed; +mod deltas; pub mod errors; +mod jobs; +mod logger; mod models; +pub mod ostree; mod schema; mod tokens; -mod jobs; -pub mod ostree; -mod deltas; -mod delayed; -mod logger; use actix::prelude::*; use actix_web::dev::Server; +use app::Config; +use deltas::{DeltaGenerator, StopDeltaGenerator}; use diesel::prelude::*; use diesel::r2d2::{ConnectionManager, ManageConnection}; +use jobs::{JobQueue, StopJobQueue}; +use log::info; use std::path; use std::sync::Arc; use std::time::{Duration, Instant}; use tokio_signal::unix::Signal; -use app::Config; -use deltas::{DeltaGenerator,StopDeltaGenerator}; -use jobs::{JobQueue, StopJobQueue}; -pub use deltas::{RemoteClientMessage,RemoteServerMessage}; -pub use errors::{DeltaGenerationError}; +pub use deltas::{RemoteClientMessage, RemoteServerMessage}; +pub use errors::DeltaGenerationError; type Pool = diesel::r2d2::Pool>; pub fn load_config(path: &path::Path) -> Arc { - let config_data = app::load_config(&path).expect(&format!("Failed to read config file {:?}", &path)); + let config_data = app::load_config(&path) + .unwrap_or_else(|_| panic!("Failed to read config file {:?}", &path)); Arc::new(config_data) } @@ -89,14 +61,21 @@ fn start_delta_generator(config: &Arc) -> Addr { deltas::start_delta_generator(config.clone()) } -fn start_job_queue(config: &Arc, - pool: &Pool, - delta_generator: &Addr) -> Addr { - jobs::cleanup_started_jobs(&pool).expect("Failed to cleanup started jobs"); +fn start_job_queue( + config: &Arc, + pool: &Pool, + delta_generator: &Addr, +) -> Addr { + jobs::cleanup_started_jobs(pool).expect("Failed to cleanup started jobs"); jobs::start_job_executor(config.clone(), delta_generator.clone(), pool.clone()) } -fn handle_signal(sig: i32, server: &Server, job_queue: Addr, delta_generator: Addr) -> impl Future { +fn handle_signal( + sig: i32, + server: &Server, + job_queue: Addr, + delta_generator: Addr, +) -> impl Future { let graceful = match sig { tokio_signal::unix::SIGINT => { info!("SIGINT received, exiting"); @@ -118,31 +97,33 @@ fn handle_signal(sig: i32, server: &Server, job_queue: Addr, delta_gen .stop(graceful) .then(move |_result| { info!("Stopping delta generator"); - delta_generator - .send(StopDeltaGenerator()) + delta_generator.send(StopDeltaGenerator()) }) .then(move |_result| { info!("Stopping job processing"); - job_queue - .send(StopJobQueue()) + job_queue.send(StopJobQueue()) }) - .then( |_| { + .then(|_| { info!("Exiting..."); tokio::timer::Delay::new(Instant::now() + Duration::from_millis(300)) }) - .then( |_| { + .then(|_| { System::current().stop(); Ok(()) }) } -fn handle_signals(server: Server, - job_queue: Addr, - delta_generator: Addr) { +fn handle_signals( + server: Server, + job_queue: Addr, + delta_generator: Addr, +) { let sigint = Signal::new(tokio_signal::unix::SIGINT).flatten_stream(); let sigterm = Signal::new(tokio_signal::unix::SIGTERM).flatten_stream(); let sigquit = Signal::new(tokio_signal::unix::SIGQUIT).flatten_stream(); - let handle_signals = sigint.select(sigterm).select(sigquit) + let handle_signals = sigint + .select(sigterm) + .select(sigquit) .for_each(move |sig| { handle_signal(sig, &server, job_queue.clone(), delta_generator.clone()) }) diff --git a/src/logger.rs b/src/logger.rs index 8faad37..db3e237 100644 --- a/src/logger.rs +++ b/src/logger.rs @@ -1,16 +1,15 @@ //! Request logging middleware -use time; use actix_service::{Service, Transform}; -use actix_web::dev::{BodySize, MessageBody, ResponseBody,ServiceRequest, ServiceResponse}; +use actix_web::dev::{BodySize, MessageBody, ResponseBody, ServiceRequest, ServiceResponse}; use actix_web::error::Error; use actix_web::http::StatusCode; -use std::rc::Rc; -use futures::{Async, Future, Poll}; +use bytes::Bytes; use futures::future::{ok, FutureResult}; +use futures::{Async, Future, Poll}; use std::marker::PhantomData; -use bytes::Bytes; +use std::rc::Rc; -use tokens::ClaimsValidator; +use crate::tokens::ClaimsValidator; pub struct Logger(Rc); @@ -27,29 +26,28 @@ struct ResponseData { size: usize, } -pub struct Inner { -} +pub struct Inner {} impl Inner { fn log(&self, req: &RequestData, resp: &ResponseData) { let rt = ((time::now() - req.time).num_nanoseconds().unwrap_or(0) as f64) / 1_000_000_000.0; - info!("{} \"{}\" {} {} {} {} {:.6}", - req.remote_ip, - req.request_line, - resp.token_name, - resp.status.as_u16(), - resp.size, - req.user_agent, - rt); + log::info!( + "{} \"{}\" {} {} {} {} {:.6}", + req.remote_ip, + req.request_line, + resp.token_name, + resp.status.as_u16(), + resp.size, + req.user_agent, + rt + ); } } - impl Logger { pub fn default() -> Logger { - Logger(Rc::new(Inner { - })) + Logger(Rc::new(Inner {})) } } @@ -99,16 +97,15 @@ where let remote_ip = req.connection_info().remote().unwrap_or("-").to_string(); let request_line = if req.query_string().is_empty() { - format!("{} {} {:?}", - req.method(), - req.path(), - req.version()) + format!("{} {} {:?}", req.method(), req.path(), req.version()) } else { - format!("{} {}?{} {:?}", - req.method(), - req.path(), - req.query_string(), - req.version()) + format!( + "{} {}?{} {:?}", + req.method(), + req.path(), + req.query_string(), + req.version() + ) }; let user_agent = if let Some(val) = req.headers().get("User-Agent") { @@ -119,16 +116,17 @@ where } } else { "-" - }.to_string(); + } + .to_string(); LoggerResponse { fut: self.service.call(req), inner: self.inner.clone(), - request_data: Some( RequestData { + request_data: Some(RequestData { time: now, - remote_ip: remote_ip, - request_line: request_line, - user_agent: user_agent, + remote_ip, + request_line, + user_agent, }), _t: PhantomData, } @@ -159,23 +157,22 @@ where if let Some(error) = res.response().error() { if res.response().head().status != StatusCode::INTERNAL_SERVER_ERROR { - debug!("Error in response: {:?}", error); + log::debug!("Error in response: {:?}", error); } } - let token_name = - if let Some(ref claims) = res.request().get_claims() { - if let Some(ref name) = claims.name { - name.clone() - } else { - "-".to_string() - } + let token_name = if let Some(ref claims) = res.request().get_claims() { + if let Some(ref name) = claims.name { + name.clone() + } else { + "-".to_string() + } } else { "-".to_string() }; let response_data = ResponseData { - token_name: token_name, + token_name, status: res.response().head().status, size: 0, }; @@ -185,7 +182,7 @@ where body, inner: self.inner.clone(), request_data: self.request_data.take().unwrap(), - response_data: response_data, + response_data, }) }))) } diff --git a/src/models.rs b/src/models.rs index 783752a..0cae1f1 100644 --- a/src/models.rs +++ b/src/models.rs @@ -1,7 +1,6 @@ -use std::{mem,time}; - -use chrono; -use schema::{ builds, build_refs, jobs, job_dependencies }; +use crate::schema::{build_refs, builds, job_dependencies, jobs}; +use serde::{Deserialize, Serialize}; +use std::{mem, time}; #[derive(Deserialize, Insertable, Debug)] #[table_name = "builds"] @@ -27,7 +26,7 @@ pub struct Build { pub extra_ids: Vec, } -#[derive(Deserialize, Debug,PartialEq)] +#[derive(Deserialize, Debug, PartialEq)] pub enum PublishedState { Unpublished, Publishing, @@ -45,7 +44,7 @@ impl PublishedState { PublishedState::Unpublished => (0, None), PublishedState::Publishing => (1, None), PublishedState::Published => (2, None), - PublishedState::Failed(s) => (3, Some(s.to_string())) + PublishedState::Failed(s) => (3, Some(s.to_string())), } } @@ -54,7 +53,12 @@ impl PublishedState { 0 => PublishedState::Unpublished, 1 => PublishedState::Publishing, 2 => PublishedState::Published, - 3 => PublishedState::Failed(reason.as_ref().unwrap_or(&"Unknown reason".to_string()).to_string()), + 3 => PublishedState::Failed( + reason + .as_ref() + .unwrap_or(&"Unknown reason".to_string()) + .to_string(), + ), _ => PublishedState::Failed("Unknown state".to_string()), } } @@ -91,7 +95,12 @@ impl RepoState { 0 => RepoState::Uploading, 1 => RepoState::Verifying, 2 => RepoState::Ready, - 3 => RepoState::Failed(reason.as_ref().unwrap_or(&"Unknown reason".to_string()).to_string()), + 3 => RepoState::Failed( + reason + .as_ref() + .unwrap_or(&"Unknown reason".to_string()) + .to_string(), + ), 4 => RepoState::Purging, 5 => RepoState::Purged, _ => RepoState::Failed("Unknown state".to_string()), @@ -124,12 +133,9 @@ table! { } } -allow_tables_to_appear_in_same_query!( - jobs, - job_dependencies_with_status, -); +allow_tables_to_appear_in_same_query!(jobs, job_dependencies_with_status,); -#[derive(Deserialize, Debug,PartialEq)] +#[derive(Deserialize, Debug, PartialEq)] pub enum JobStatus { New, Started, @@ -149,7 +155,7 @@ impl JobStatus { } } -#[derive(Debug,PartialEq)] +#[derive(Debug, PartialEq)] pub enum JobKind { Commit, Publish, @@ -199,9 +205,11 @@ pub struct Job { impl Job { // Ideally we'd do this via a SUBSTRING query, but at least do it behind the API - pub fn apply_log_offset(mut self: Self, log_offset: Option) -> Self { + pub fn apply_log_offset(mut self, log_offset: Option) -> Self { if let Some(log_offset) = log_offset { - self.log = self.log.split_off(std::cmp::min(log_offset, self.log.len())) + self.log = self + .log + .split_off(std::cmp::min(log_offset, self.log.len())) } self } @@ -226,7 +234,6 @@ pub struct JobDependencyWithStatus { pub dependant_status: i16, } - #[derive(Serialize, Deserialize, Debug)] pub struct CommitJob { pub build: i32, @@ -235,7 +242,6 @@ pub struct CommitJob { pub token_type: Option, } - #[derive(Serialize, Deserialize, Debug)] pub struct PublishJob { pub build: i32, diff --git a/src/ostree.rs b/src/ostree.rs index c1a6e75..add0c11 100644 --- a/src/ostree.rs +++ b/src/ostree.rs @@ -1,20 +1,20 @@ -use base64; -use byteorder::{NativeEndian,LittleEndian, ByteOrder}; +use byteorder::{ByteOrder, LittleEndian, NativeEndian}; +use failure::Fail; +use futures::future; +use futures::future::Either; +use futures::Future; +use serde::{Deserialize, Serialize}; use std::fs; use std::io::Read; use std::num::NonZeroUsize; +use std::os::unix::process::CommandExt as UnixCommandExt; use std::path; -use std::str; -use walkdir::WalkDir; -use hex; +use std::path::PathBuf; use std::process::Command; +use std::str; +use std::{collections::HashMap, path::Path}; use tokio_process::CommandExt; -use std::os::unix::process::CommandExt as UnixCommandExt; -use futures::Future; -use futures::future::Either; -use std::path::{PathBuf}; -use std::collections::HashMap; -use futures::future; +use walkdir::WalkDir; #[derive(Fail, Debug, Clone, PartialEq)] pub enum OstreeError { @@ -27,9 +27,9 @@ pub enum OstreeError { #[fail(display = "Invalid utf8 string")] InvalidUtf8, #[fail(display = "Command {} failed to start: {}", _0, _1)] - ExecFailed(String,String), + ExecFailed(String, String), #[fail(display = "Command {} exited unsucessfully with stderr: {}", _0, _1)] - CommandFailed(String,String), + CommandFailed(String, String), #[fail(display = "Internal Error: {}", _0)] InternalError(String), } @@ -38,7 +38,7 @@ pub type OstreeResult = Result; #[derive(Debug)] pub struct OstreeCommit { - pub metadata: HashMap, + pub metadata: HashMap, pub parent: Option, pub subject: String, pub body: String, @@ -49,90 +49,78 @@ pub struct OstreeCommit { #[derive(Debug)] pub struct OstreeDeltaSuperblock { - pub metadata: HashMap, + pub metadata: HashMap, pub commit: OstreeCommit, } fn is_base_type(byte: u8) -> bool { let c = byte as char; - return - c == 'b' || - c == 'y' || - c == 'n' || - c == 'q' || - c == 'i' || - c == 'u' || - c == 'x' || - c == 't' || - c == 's' || - c == 'o' || - c == 'g'; + c == 'b' + || c == 'y' + || c == 'n' + || c == 'q' + || c == 'i' + || c == 'u' + || c == 'x' + || c == 't' + || c == 's' + || c == 'o' + || c == 'g' } fn type_string_element_len(type_string: &str) -> Option { - if type_string.len() == 0 { + if type_string.is_empty() { return None; } let bytes = type_string.as_bytes(); let c = bytes[0]; - if is_base_type(c) || c == 'v' as u8 { - return Some(1) + if is_base_type(c) || c == b'v' { + return Some(1); } match c as char { - 'm' | 'a' => { - if let Some(len) = type_string_element_len(&type_string[1..]) { - return Some(1 + len); - } else { - return None; - } - }, + 'm' | 'a' => type_string_element_len(&type_string[1..]).map(|len| 1 + len), '{' => { if type_string.len() < 3 || !is_base_type(bytes[1]) { return None; } if let Some(len) = type_string_element_len(&type_string[2..]) { - if type_string.len() > 2 + len && bytes[2 + len] != '{' as u8 { - return Some(3 + len); + if type_string.len() > 2 + len && bytes[2 + len] != b'{' { + Some(3 + len) } else { - return None; + None } } else { - return None; + None } - }, + } '(' => { - let mut pos : usize = 1; + let mut pos: usize = 1; loop { if type_string.len() <= pos { return None; } - if bytes[pos] == ')' as u8 { + if bytes[pos] == b')' { return Some(pos + 1); } if let Some(len) = type_string_element_len(&type_string[pos..]) { pos += len; } else { - return None + return None; } } } - _ => { - return None; - } + _ => None, } } -fn type_string_split<'a>(type_string: &'a str) -> Option<(&'a str, &'a str)> { - if let Some(len) = type_string_element_len (type_string) { - return Some((&type_string[0..len], &type_string[len..])); - } - return None; +fn type_string_split(type_string: &str) -> Option<(&str, &str)> { + type_string_element_len(type_string).map(|len| (&type_string[0..len], &type_string[len..])) } #[derive(Debug)] enum VariantSize { Fixed(NonZeroUsize), - Variable + Variable, } #[derive(Debug)] @@ -157,22 +145,25 @@ impl Variant { fn new(type_string: String, data: Vec) -> OstreeResult { match type_string_element_len(&type_string) { None => { - return Err(OstreeError::InternalError(format!("Invalid type string '{}'", type_string))); - }, + return Err(OstreeError::InternalError(format!( + "Invalid type string '{}'", + type_string + ))); + } Some(len) => { if len != type_string.len() { - return Err(OstreeError::InternalError(format!("Leftover text in type string '{}'", type_string))); + return Err(OstreeError::InternalError(format!( + "Leftover text in type string '{}'", + type_string + ))); } } }; - Ok(Variant { - type_string: type_string, - data: data, - }) + Ok(Variant { type_string, data }) } - fn root<'a>(&'a self) -> SubVariant<'a> { + fn root(&self) -> SubVariant<'_> { SubVariant { type_string: &self.type_string, data: &self.data, @@ -199,14 +190,14 @@ impl Variant { return self.root().parse_as_i32_le(); } - pub fn as_bytes<'a>(&'a self) -> &'a [u8] { + pub fn as_bytes(&self) -> &[u8] { return self.root().parse_as_bytes(); } } impl<'a> SubVariant<'a> { fn copy(&self) -> Variant { - return Variant { + Variant { type_string: self.type_string.to_string(), data: self.data.to_vec(), } @@ -228,7 +219,10 @@ impl<'a> SubVariant<'a> { } fn read_frame_offset(&self, offset: usize, framing_size: usize) -> OstreeResult { if offset + framing_size > self.data.len() { - return Err(OstreeError::InternalError(format!("Framing error: can't read frame offset at {}", offset))); + return Err(OstreeError::InternalError(format!( + "Framing error: can't read frame offset at {}", + offset + ))); } let data = &self.data[offset..offset + framing_size]; let offset = match framing_size { @@ -239,16 +233,26 @@ impl<'a> SubVariant<'a> { 8 => { let len64 = LittleEndian::read_u64(data); if len64 > ::std::usize::MAX as u64 { - return Err(OstreeError::InternalError("Framing error: To large framing size fror usize".to_string())); + return Err(OstreeError::InternalError( + "Framing error: To large framing size fror usize".to_string(), + )); } len64 as usize - }, - _ => return Err(OstreeError::InternalError(format!("Framing error: Unexpected framing size {}", framing_size))), + } + _ => { + return Err(OstreeError::InternalError(format!( + "Framing error: Unexpected framing size {}", + framing_size + ))) + } }; if offset > self.data.len() { - return Err(OstreeError::InternalError(format!("Framing error: out of bounds offset at {}", offset))); + return Err(OstreeError::InternalError(format!( + "Framing error: out of bounds offset at {}", + offset + ))); }; - return Ok(offset) + Ok(offset) } fn pad(&self, cur: usize, alignment: usize) -> usize { @@ -260,19 +264,29 @@ impl<'a> SubVariant<'a> { } } - fn subset(&self, start: usize, end: usize, type_string: &'a str) -> OstreeResult> { + fn subset( + &self, + start: usize, + end: usize, + type_string: &'a str, + ) -> OstreeResult> { if end < start || end > self.data.len() { - return Err(OstreeError::InternalError(format!("Framing error: subset {}-{} out of bounds for {:?}", start, end, self))); + return Err(OstreeError::InternalError(format!( + "Framing error: subset {}-{} out of bounds for {:?}", + start, end, self + ))); } - Ok( SubVariant { - type_string: type_string, + Ok(SubVariant { + type_string, data: &self.data[start..end], }) } fn checked_sub(&self, a: usize, b: usize) -> OstreeResult { if b > a { - return Err(OstreeError::InternalError("Framing error: negative checked_sub".to_string())); + Err(OstreeError::InternalError( + "Framing error: negative checked_sub".to_string(), + )) } else { Ok(a - b) } @@ -283,7 +297,10 @@ impl<'a> SubVariant<'a> { let t = self.type_string.as_bytes()[0] as char; if t != '(' && t != '{' { - return Err(OstreeError::InternalError(format!("Not a dictionary: {}", self.type_string))); + return Err(OstreeError::InternalError(format!( + "Not a dictionary: {}", + self.type_string + ))); } let mut type_string_rest = &self.type_string[1..]; @@ -291,35 +308,36 @@ impl<'a> SubVariant<'a> { let framing_size = self.framing_size(); let mut frame_offset = self.data.len(); - let mut next : usize = 0; + let mut next: usize = 0; for i in 0..fields.len() { let field = &fields[i]; - let field_type = if let Some((t, r)) = type_string_split (type_string_rest) { + let field_type = if let Some((t, r)) = type_string_split(type_string_rest) { type_string_rest = r; t } else { - return Err(OstreeError::InternalError(format!("Invalid type: {}", type_string_rest))); + return Err(OstreeError::InternalError(format!( + "Invalid type: {}", + type_string_rest + ))); }; next = self.pad(next, field.alignment); - let field_size = - match field.size { - VariantSize::Fixed(size) => usize::from(size), - VariantSize::Variable => { - let end = - if i == fields.len() - 1 { - frame_offset - } else { - frame_offset = self.checked_sub(frame_offset, framing_size)?; - self.read_frame_offset(frame_offset, framing_size)? - }; - self.checked_sub(end, next)? - }, - }; - - let sub = self.subset(next, next+field_size, field_type)?; + let field_size = match field.size { + VariantSize::Fixed(size) => usize::from(size), + VariantSize::Variable => { + let end = if i == fields.len() - 1 { + frame_offset + } else { + frame_offset = self.checked_sub(frame_offset, framing_size)?; + self.read_frame_offset(frame_offset, framing_size)? + }; + self.checked_sub(end, next)? + } + }; + + let sub = self.subset(next, next + field_size, field_type)?; result.push(sub); next += field_size; } @@ -327,10 +345,16 @@ impl<'a> SubVariant<'a> { Ok(result) } - fn parse_as_variable_width_array(&self, element_alignment: usize) -> OstreeResult>> { + fn parse_as_variable_width_array( + &self, + element_alignment: usize, + ) -> OstreeResult>> { let t = self.type_string.as_bytes()[0] as char; if t != 'a' { - return Err(OstreeError::InternalError(format!("Not an array: {}", self.type_string))); + return Err(OstreeError::InternalError(format!( + "Not an array: {}", + self.type_string + ))); } let size = self.data.len(); @@ -346,7 +370,7 @@ impl<'a> SubVariant<'a> { let mut result = Vec::with_capacity(length); - let mut last_end : usize = 0; + let mut last_end: usize = 0; for i in 0..length { let start = self.pad(last_end, element_alignment); let end = self.read_frame_offset(frame_offsets + i * framing_size, framing_size)?; @@ -359,41 +383,52 @@ impl<'a> SubVariant<'a> { Ok(result) } - fn parse_as_variant(&self) -> OstreeResult> { + fn parse_as_variant(&self) -> OstreeResult> { if self.type_string != "v" { - return Err(OstreeError::InternalError(format!("Variant type '{}' not a variant", self.type_string))); + return Err(OstreeError::InternalError(format!( + "Variant type '{}' not a variant", + self.type_string + ))); } - if self.data.len() == 0 { - return Ok (self.subset(0,0, "()")?); + if self.data.is_empty() { + return self.subset(0, 0, "()"); } - let parts : Vec<&'a [u8]> = self.data.rsplitn(2, |&x| x == 0).collect(); + let parts: Vec<&'a [u8]> = self.data.rsplitn(2, |&x| x == 0).collect(); if parts.len() != 2 { - return Err(OstreeError::InternalError(format!("No type string in variant"))); + return Err(OstreeError::InternalError( + "No type string in variant".to_string(), + )); } if let Ok(type_string) = str::from_utf8(parts[0]) { - return self.subset(0, parts[1].len(), type_string); + self.subset(0, parts[1].len(), type_string) } else { - return Err(OstreeError::InvalidUtf8); + Err(OstreeError::InvalidUtf8) } } - fn parse_as_asv_element(&self) -> OstreeResult<(String, SubVariant<'a>)> { + fn parse_as_asv_element(&self) -> OstreeResult<(String, SubVariant<'a>)> { let fields = vec![ // 0 - s - key - VariantFieldInfo { size: VariantSize::Variable, alignment: 0 }, + VariantFieldInfo { + size: VariantSize::Variable, + alignment: 0, + }, // 1 - v - value - VariantFieldInfo { size: VariantSize::Variable, alignment: 8 } + VariantFieldInfo { + size: VariantSize::Variable, + alignment: 8, + }, ]; let kv = self.parse_as_tuple(&fields)?; let key = kv[0].parse_as_string()?; let val = kv.into_iter().nth(1).unwrap(); - return Ok((key, val)); + Ok((key, val)) } - fn parse_as_asv(&self) -> OstreeResult> { + fn parse_as_asv(&self) -> OstreeResult> { let mut res = HashMap::new(); for elt in self.parse_as_variable_width_array(8)? { @@ -401,26 +436,32 @@ impl<'a> SubVariant<'a> { let vv = v.parse_as_variant()?; res.insert(k, vv.copy()); } - return Ok(res); + Ok(res) } fn parse_as_string(&self) -> OstreeResult { if self.type_string != "s" { - return Err(OstreeError::InternalError(format!("Variant type '{}' not a string", self.type_string))); + return Err(OstreeError::InternalError(format!( + "Variant type '{}' not a string", + self.type_string + ))); } let without_nul = &self.data[0..self.data.len() - 1]; if let Ok(str) = str::from_utf8(without_nul) { Ok(str.to_string()) } else { - return Err(OstreeError::InvalidUtf8); + Err(OstreeError::InvalidUtf8) } } fn parse_as_string_vec(&self) -> OstreeResult> { if self.type_string != "as" { - return Err(OstreeError::InternalError(format!("Variant type '{}' not an array of strings", self.type_string))); + return Err(OstreeError::InternalError(format!( + "Variant type '{}' not an array of strings", + self.type_string + ))); } - let array = self.parse_as_variable_width_array(0)?; + let array = self.parse_as_variable_width_array(0)?; return array.iter().map(|v| v.parse_as_string()).collect(); } @@ -430,24 +471,36 @@ impl<'a> SubVariant<'a> { fn parse_as_u64(&self) -> OstreeResult { if self.type_string != "t" { - return Err(OstreeError::InternalError(format!("Variant type '{}' not a u64", self.type_string))); + return Err(OstreeError::InternalError(format!( + "Variant type '{}' not a u64", + self.type_string + ))); } if self.data.len() != 8 { - return Err(OstreeError::InternalError(format!("Wrong length {} for u64", self.data.len()))); + return Err(OstreeError::InternalError(format!( + "Wrong length {} for u64", + self.data.len() + ))); } Ok(NativeEndian::read_u64(self.data)) } fn parse_as_i32(&self) -> OstreeResult { if self.type_string != "i" { - return Err(OstreeError::InternalError(format!("Variant type '{}' not a i32", self.type_string))); + return Err(OstreeError::InternalError(format!( + "Variant type '{}' not a i32", + self.type_string + ))); } Ok(NativeEndian::read_i32(self.data)) } fn parse_as_i32_le(&self) -> OstreeResult { if self.type_string != "i" { - return Err(OstreeError::InternalError(format!("Variant type '{}' not a i32", self.type_string))); + return Err(OstreeError::InternalError(format!( + "Variant type '{}' not a i32", + self.type_string + ))); } Ok(LittleEndian::read_i32(self.data)) } @@ -458,78 +511,103 @@ fn bytes_to_object(bytes: &[u8]) -> String { } fn object_to_bytes(object: &str) -> OstreeResult> { - hex::decode(object).map_err(|e| OstreeError::InternalError(format!("Invalid object '{}: {}'", object, e))) + hex::decode(object) + .map_err(|e| OstreeError::InternalError(format!("Invalid object '{}: {}'", object, e))) } fn maybe_bytes_to_object(bytes: &[u8]) -> Option { - if bytes.len() == 0 { + if bytes.is_empty() { None } else { Some(bytes_to_object(bytes)) } } -fn get_ref_path(repo_path: &path::PathBuf) -> path::PathBuf { - let mut ref_dir = std::env::current_dir().unwrap_or_else(|_e| path::PathBuf::new()); +fn get_ref_path(repo_path: &path::Path) -> path::PathBuf { + let mut ref_dir = std::env::current_dir().unwrap_or_default(); ref_dir.push(repo_path); ref_dir.push("refs/heads"); ref_dir } -fn get_deltas_path(repo_path: &path::PathBuf) -> path::PathBuf { - let mut ref_dir = std::env::current_dir().unwrap_or_else(|_e| path::PathBuf::new()); +fn get_deltas_path(repo_path: &path::Path) -> path::PathBuf { + let mut ref_dir = std::env::current_dir().unwrap_or_default(); ref_dir.push(repo_path); ref_dir.push("deltas"); ref_dir } -fn get_tmp_deltas_path(repo_path: &path::PathBuf) -> path::PathBuf { - let mut ref_dir = std::env::current_dir().unwrap_or_else(|_e| path::PathBuf::new()); +fn get_tmp_deltas_path(repo_path: &path::Path) -> path::PathBuf { + let mut ref_dir = std::env::current_dir().unwrap_or_default(); ref_dir.push(repo_path); ref_dir.push("tmp/deltas"); ref_dir } -fn get_object_path(repo_path: &path::PathBuf, object: &str, object_type: &str) -> path::PathBuf { - let mut path = std::env::current_dir().unwrap_or_else(|_e| path::PathBuf::new()); +fn get_object_path(repo_path: &path::Path, object: &str, object_type: &str) -> path::PathBuf { + let mut path = std::env::current_dir().unwrap_or_default(); path.push(repo_path); path.push("objects"); - path.push(object[0..2].to_string()); + path.push(&object[0..2]); path.push(format!("{}.{}", &object[2..], object_type)); path } -fn parse_commit (variant: &SubVariant) ->OstreeResult { +fn parse_commit(variant: &SubVariant) -> OstreeResult { let ostree_commit_fields = vec![ // 0 - a{sv} - Metadata - VariantFieldInfo { size: VariantSize::Variable, alignment: 8 }, + VariantFieldInfo { + size: VariantSize::Variable, + alignment: 8, + }, // 1 - ay - parent checksum (empty string for initial) - VariantFieldInfo { size: VariantSize::Variable, alignment: 0 }, + VariantFieldInfo { + size: VariantSize::Variable, + alignment: 0, + }, // 2- a(say) - Related objects - VariantFieldInfo { size: VariantSize::Variable, alignment: 0 }, + VariantFieldInfo { + size: VariantSize::Variable, + alignment: 0, + }, // 3 - s - subject - VariantFieldInfo { size: VariantSize::Variable, alignment: 0 }, + VariantFieldInfo { + size: VariantSize::Variable, + alignment: 0, + }, // 4- s - body - VariantFieldInfo { size: VariantSize::Variable, alignment: 0 }, + VariantFieldInfo { + size: VariantSize::Variable, + alignment: 0, + }, // 5- t - Timestamp in seconds since the epoch (UTC, big-endian) - VariantFieldInfo { size: VariantSize::Fixed(std::num::NonZeroUsize::new(8).unwrap()), alignment: 8 }, + VariantFieldInfo { + size: VariantSize::Fixed(std::num::NonZeroUsize::new(8).unwrap()), + alignment: 8, + }, // 6- ay - Root tree contents - VariantFieldInfo { size: VariantSize::Variable, alignment: 0 }, + VariantFieldInfo { + size: VariantSize::Variable, + alignment: 0, + }, // 7- ay - Root tree metadata - VariantFieldInfo { size: VariantSize::Variable, alignment: 0 }, + VariantFieldInfo { + size: VariantSize::Variable, + alignment: 0, + }, ]; let commit = variant.parse_as_tuple(&ostree_commit_fields)?; let metadata = commit[0].parse_as_asv()?; - return Ok(OstreeCommit { - metadata: metadata, - parent: maybe_bytes_to_object (commit[1].parse_as_bytes()), + Ok(OstreeCommit { + metadata, + parent: maybe_bytes_to_object(commit[1].parse_as_bytes()), subject: commit[3].parse_as_string()?, body: commit[4].parse_as_string()?, timestamp: u64::from_be(commit[5].parse_as_u64()?), - root_tree: bytes_to_object (commit[6].parse_as_bytes()), - root_metadata: bytes_to_object (commit[7].parse_as_bytes()), + root_tree: bytes_to_object(commit[6].parse_as_bytes()), + root_metadata: bytes_to_object(commit[7].parse_as_bytes()), }) } @@ -538,129 +616,165 @@ fn parse_commit (variant: &SubVariant) ->OstreeResult { * of the ID, which we don't want to miss. * Also, this is mainly used for debug/errors, so converts to string. */ -fn get_dir_and_basename (path: &path::PathBuf) -> String { +fn get_dir_and_basename(path: &path::Path) -> String { let mut res = String::new(); - if let Some(parent_name) = - path.parent() + if let Some(parent_name) = path + .parent() .and_then(|parent| parent.file_name()) .and_then(|file_name| file_name.to_str()) { res.push_str(parent_name); - res.push_str("/"); + res.push('/'); } res.push_str(path.file_name().and_then(|s| s.to_str()).unwrap_or("?")); res } -pub fn load_commit_file (path: &path::PathBuf) ->OstreeResult { - let mut fp = fs::File::open(path) - .map_err(|_e| OstreeError::NoSuchCommit(get_dir_and_basename(path)))?; +pub fn load_commit_file(path: &path::Path) -> OstreeResult { + let mut fp = + fs::File::open(path).map_err(|_e| OstreeError::NoSuchCommit(get_dir_and_basename(path)))?; let mut contents = vec![]; - fp.read_to_end(&mut contents) - .map_err(|_e| OstreeError::InternalError(format!("Invalid commit {}", get_dir_and_basename(path))))?; + fp.read_to_end(&mut contents).map_err(|_e| { + OstreeError::InternalError(format!("Invalid commit {}", get_dir_and_basename(path))) + })?; let variant = Variant::new("(a{sv}aya(say)sstayay)".to_string(), contents)?; - return parse_commit (&variant.root()); + return parse_commit(&variant.root()); } -pub fn get_commit (repo_path: &path::PathBuf, commit: &String) ->OstreeResult { +pub fn get_commit(repo_path: &path::Path, commit: &str) -> OstreeResult { let path = get_object_path(repo_path, commit, "commit"); - return load_commit_file(&path); + load_commit_file(&path) } -pub fn load_delta_superblock_file (path: &path::PathBuf) ->OstreeResult { - let mut fp = fs::File::open(path) - .map_err(|_e| OstreeError::NoSuchObject(get_dir_and_basename(path)))?; +pub fn load_delta_superblock_file(path: &path::Path) -> OstreeResult { + let mut fp = + fs::File::open(path).map_err(|_e| OstreeError::NoSuchObject(get_dir_and_basename(path)))?; let mut contents = vec![]; - fp.read_to_end(&mut contents) - .map_err(|_e| OstreeError::InternalError(format!("Invalid delta superblock {}", get_dir_and_basename(path))))?; + fp.read_to_end(&mut contents).map_err(|_e| { + OstreeError::InternalError(format!( + "Invalid delta superblock {}", + get_dir_and_basename(path) + )) + })?; let ostree_superblock_fields = vec![ // 0 - "a{sv}", - Metadata - VariantFieldInfo { size: VariantSize::Variable, alignment: 8 }, + VariantFieldInfo { + size: VariantSize::Variable, + alignment: 8, + }, // 1 - "t", - timestamp - VariantFieldInfo { size: VariantSize::Fixed(std::num::NonZeroUsize::new(8).unwrap()), alignment: 8 }, + VariantFieldInfo { + size: VariantSize::Fixed(std::num::NonZeroUsize::new(8).unwrap()), + alignment: 8, + }, // 2 - "ay" - from checksum - VariantFieldInfo { size: VariantSize::Variable, alignment: 0 }, + VariantFieldInfo { + size: VariantSize::Variable, + alignment: 0, + }, // 3 - "ay" - to checksum - VariantFieldInfo { size: VariantSize::Variable, alignment: 0 }, + VariantFieldInfo { + size: VariantSize::Variable, + alignment: 0, + }, // 4 - "(a{sv}aya(say)sstayay)" - commit object - VariantFieldInfo { size: VariantSize::Variable, alignment: 8 }, + VariantFieldInfo { + size: VariantSize::Variable, + alignment: 8, + }, // 5 - "ay" -Prerequisite deltas - VariantFieldInfo { size: VariantSize::Variable, alignment: 0 }, + VariantFieldInfo { + size: VariantSize::Variable, + alignment: 0, + }, // 6 - "a(uayttay)" -Delta objects - VariantFieldInfo { size: VariantSize::Variable, alignment: 8 }, + VariantFieldInfo { + size: VariantSize::Variable, + alignment: 8, + }, // 7 - "a(yaytt)" - Fallback objects - VariantFieldInfo { size: VariantSize::Variable, alignment: 8 }, + VariantFieldInfo { + size: VariantSize::Variable, + alignment: 8, + }, ]; - - let variant = Variant::new("(a{sv}tayay(a{sv}aya(say)sstayay)aya(uayttay)a(yaytt))".to_string(), contents)?; + let variant = Variant::new( + "(a{sv}tayay(a{sv}aya(say)sstayay)aya(uayttay)a(yaytt))".to_string(), + contents, + )?; let container = variant.root(); let superblock = container.parse_as_tuple(&ostree_superblock_fields)?; let metadata = superblock[0].parse_as_asv()?; let commit = parse_commit(&superblock[4])?; - Ok(OstreeDeltaSuperblock { - metadata: metadata, - commit: commit, - }) + Ok(OstreeDeltaSuperblock { metadata, commit }) } -pub fn get_delta_superblock (repo_path: &path::PathBuf, delta: &String) ->OstreeResult { +pub fn get_delta_superblock( + repo_path: &path::Path, + delta: &str, +) -> OstreeResult { let mut path = get_deltas_path(repo_path); - path.push(delta[0..2].to_string()); - path.push(delta[2..].to_string()); - path.push("superblock".to_string()); + path.push(&delta[0..2]); + path.push(&delta[2..]); + path.push("superblock"); - return load_delta_superblock_file(&path); + load_delta_superblock_file(&path) } -pub fn parse_ref (repo_path: &path::PathBuf, ref_name: &str) ->OstreeResult { +pub fn parse_ref(repo_path: &path::Path, ref_name: &str) -> OstreeResult { let mut ref_dir = get_ref_path(repo_path); ref_dir.push(ref_name); - let commit = - fs::read_to_string(ref_dir) + let commit = fs::read_to_string(ref_dir) .map_err(|_e| OstreeError::NoSuchRef(ref_name.to_string()))? - .trim_end().to_string(); + .trim_end() + .to_string(); Ok(commit) } -pub fn list_refs (repo_path: &path::PathBuf, prefix: &str) -> Vec { +pub fn list_refs(repo_path: &path::Path, prefix: &str) -> Vec { let mut ref_dir = get_ref_path(repo_path); let path_prefix = &ref_dir.clone(); ref_dir.push(prefix); - return - WalkDir::new(&ref_dir) + WalkDir::new(&ref_dir) .into_iter() .filter_map(|e| e.ok()) .filter(|e| !e.file_type().is_dir()) - .filter_map(|e| e.path().strip_prefix(path_prefix).map(|p| p.to_path_buf()).ok()) + .filter_map(|e| { + e.path() + .strip_prefix(path_prefix) + .map(|p| p.to_path_buf()) + .ok() + }) .filter_map(|p| p.to_str().map(|s| s.to_string())) - .collect(); + .collect() } -#[derive(Serialize, Deserialize,Debug,PartialEq,Eq,Hash,Clone)] +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Hash, Clone)] pub struct Delta { pub from: Option, pub to: String, } fn delta_part_to_hex(part: &str) -> OstreeResult { - let bytes = base64::decode(&part.replace("_", "/")) - .map_err(|err| OstreeError::InternalError(format!("Invalid delta part name '{}': {}", part, err)))?; + let bytes = base64::decode(&part.replace('_', "/")).map_err(|err| { + OstreeError::InternalError(format!("Invalid delta part name '{}': {}", part, err)) + })?; Ok(bytes_to_object(&bytes)) } fn hex_to_delta_part(hex: &str) -> OstreeResult { let bytes = object_to_bytes(hex)?; let part = base64::encode_config(&bytes, base64::STANDARD_NO_PAD); - Ok(part.replace("/", "_")) + Ok(part.replace('/', "_")) } impl Delta { @@ -671,7 +785,7 @@ impl Delta { } } pub fn from_name(name: &str) -> OstreeResult { - let parts: Vec<&str> = name.split("-").collect(); + let parts: Vec<&str> = name.split('-').collect(); if parts.len() == 1 { Ok(Delta { from: None, @@ -689,36 +803,40 @@ impl Delta { let mut name = String::new(); if let Some(ref from) = self.from { - name.push_str(&hex_to_delta_part(&from)?); - name.push_str("-"); + name.push_str(&hex_to_delta_part(from)?); + name.push('-'); } name.push_str(&hex_to_delta_part(&self.to)?); Ok(name) } - pub fn delta_path(&self, repo_path: &path::PathBuf) -> OstreeResult { + pub fn delta_path(&self, repo_path: &path::Path) -> OstreeResult { let mut path = get_deltas_path(repo_path); let name = self.to_name()?; - path.push(name[0..2].to_string()); - path.push(name[2..].to_string()); + path.push(&name[0..2]); + path.push(&name[2..]); Ok(path) } - pub fn tmp_delta_path(&self, repo_path: &path::PathBuf) -> OstreeResult { + pub fn tmp_delta_path(&self, repo_path: &path::Path) -> OstreeResult { let mut path = get_tmp_deltas_path(repo_path); let name = self.to_name()?; - path.push(name[0..2].to_string()); - path.push(name[2..].to_string()); + path.push(&name[0..2]); + path.push(&name[2..]); Ok(path) } +} - pub fn to_string(&self) -> String { - format!("{}-{}", - self.from.as_ref().unwrap_or(&"nothing".to_string()), - self.to) +impl std::fmt::Display for Delta { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + writeln!( + f, + "{}-{}", + self.from.as_ref().unwrap_or(&"nothing".to_string()), + self.to + ) } } - #[cfg(test)] mod tests { // Note this useful idiom: importing names from outer (for mod tests) scope. @@ -742,35 +860,52 @@ mod tests { #[test] fn test_delta_name() { - assert_eq!(delta_part_to_hex("OkiocD9GLq_Nt660BvWyrH8G62dAvtLv7RPqngWqf5c"), - Ok("3a48a8703f462eafcdb7aeb406f5b2ac7f06eb6740bed2efed13ea9e05aa7f97".to_string())); - assert_eq!(hex_to_delta_part("3a48a8703f462eafcdb7aeb406f5b2ac7f06eb6740bed2efed13ea9e05aa7f97"), - Ok("OkiocD9GLq_Nt660BvWyrH8G62dAvtLv7RPqngWqf5c".to_string())); - assert_eq!(Delta::from_name("OkiocD9GLq_Nt660BvWyrH8G62dAvtLv7RPqngWqf5c"), - Ok(Delta { from: None, to: "3a48a8703f462eafcdb7aeb406f5b2ac7f06eb6740bed2efed13ea9e05aa7f97".to_string() })); + assert_eq!( + delta_part_to_hex("OkiocD9GLq_Nt660BvWyrH8G62dAvtLv7RPqngWqf5c"), + Ok("3a48a8703f462eafcdb7aeb406f5b2ac7f06eb6740bed2efed13ea9e05aa7f97".to_string()) + ); + assert_eq!( + hex_to_delta_part("3a48a8703f462eafcdb7aeb406f5b2ac7f06eb6740bed2efed13ea9e05aa7f97"), + Ok("OkiocD9GLq_Nt660BvWyrH8G62dAvtLv7RPqngWqf5c".to_string()) + ); + assert_eq!( + Delta::from_name("OkiocD9GLq_Nt660BvWyrH8G62dAvtLv7RPqngWqf5c"), + Ok(Delta { + from: None, + to: "3a48a8703f462eafcdb7aeb406f5b2ac7f06eb6740bed2efed13ea9e05aa7f97".to_string() + }) + ); assert_eq!(Delta::from_name("OkiocD9GLq_Nt660BvWyrH8G62dAvtLv7RPqngWqf5c-3dpOrJG4MNyKHDDGXHpH_zd9NXugnexr5jpvSFQ77S4"), Ok(Delta { from: Some("3a48a8703f462eafcdb7aeb406f5b2ac7f06eb6740bed2efed13ea9e05aa7f97".to_string()), to: "ddda4eac91b830dc8a1c30c65c7a47ff377d357ba09dec6be63a6f48543bed2e".to_string() })); } } -pub fn list_deltas (repo_path: &path::PathBuf) -> Vec { +pub fn list_deltas(repo_path: &path::Path) -> Vec { let deltas_dir = get_deltas_path(repo_path); - return - WalkDir::new(&deltas_dir) + WalkDir::new(&deltas_dir) .min_depth(2) .max_depth(2) .into_iter() .filter_map(|e| e.ok()) .filter(|e| e.file_type().is_dir()) - .map(|e| format!("{}{}", - e.path().parent().unwrap().file_name().unwrap().to_string_lossy(), - e.file_name().to_string_lossy())) + .map(|e| { + format!( + "{}{}", + e.path() + .parent() + .unwrap() + .file_name() + .unwrap() + .to_string_lossy(), + e.file_name().to_string_lossy() + ) + }) .filter_map(|name| Delta::from_name(&name).ok()) - .collect(); + .collect() } -pub fn calc_deltas_for_ref (repo_path: &path::PathBuf, ref_name: &str, depth: u32) -> Vec { +pub fn calc_deltas_for_ref(repo_path: &path::Path, ref_name: &str, depth: u32) -> Vec { let mut res = Vec::new(); let to_commit_res = parse_ref(repo_path, ref_name); @@ -779,10 +914,10 @@ pub fn calc_deltas_for_ref (repo_path: &path::PathBuf, ref_name: &str, depth: u3 } let to_commit = to_commit_res.unwrap(); - let mut from_commit : Option = None; + let mut from_commit: Option = None; for _i in 0..depth { - if let Ok(commitinfo) = get_commit (repo_path, from_commit.as_ref().unwrap_or(&to_commit)) { - res.push(Delta::new(from_commit.as_ref().map(|x| &**x), &to_commit)); + if let Ok(commitinfo) = get_commit(repo_path, from_commit.as_ref().unwrap_or(&to_commit)) { + res.push(Delta::new(from_commit.as_deref(), &to_commit)); from_commit = commitinfo.parent; if from_commit == None { break; @@ -797,135 +932,139 @@ pub fn calc_deltas_for_ref (repo_path: &path::PathBuf, ref_name: &str, depth: u3 fn result_from_output(output: std::process::Output, command: &str) -> Result<(), OstreeError> { if !output.status.success() { - Err(OstreeError::CommandFailed(command.to_string(), String::from_utf8_lossy(&output.stderr).trim().to_string())) + Err(OstreeError::CommandFailed( + command.to_string(), + String::from_utf8_lossy(&output.stderr).trim().to_string(), + )) } else { Ok(()) } } -pub fn pull_commit_async(n_retries: i32, - repo_path: PathBuf, - url: String, - commit: String) -> Box> { +pub fn pull_commit_async( + n_retries: i32, + repo_path: PathBuf, + url: String, + commit: String, +) -> Box> { Box::new(future::loop_fn(n_retries, move |count| { let mut cmd = Command::new("ostree"); unsafe { - cmd - .pre_exec (|| { - // Setsid in the child to avoid SIGINT on server killing - // child and breaking the graceful shutdown - libc::setsid(); - Ok(()) - }); + cmd.pre_exec(|| { + // Setsid in the child to avoid SIGINT on server killing + // child and breaking the graceful shutdown + libc::setsid(); + Ok(()) + }); } - cmd - .arg(&format!("--repo={}", &repo_path.to_str().unwrap())) + cmd.arg(&format!("--repo={}", &repo_path.to_str().unwrap())) .arg("pull") .arg(&format!("--url={}", url)) .arg("upstream") .arg(&commit); - info!("Pulling commit {}", commit); + log::info!("Pulling commit {}", commit); let commit_clone = commit.clone(); - cmd - .output_async() + cmd.output_async() .map_err(|e| OstreeError::ExecFailed("ostree pull".to_string(), e.to_string())) - .and_then(|output| { - result_from_output(output, "ostree pull")} - ) - .then(move |r| { - match r { - Ok(res) => Ok(future::Loop::Break(res)), - Err(e) => { - if count > 1 { - warn!("Pull error, retrying commit {}: {}", commit_clone, e.to_string()); - Ok(future::Loop::Continue(count - 1)) - } else { - Err(e) - } + .and_then(|output| result_from_output(output, "ostree pull")) + .then(move |r| match r { + Ok(res) => Ok(future::Loop::Break(res)), + Err(e) => { + if count > 1 { + log::warn!( + "Pull error, retrying commit {}: {}", + commit_clone, + e.to_string() + ); + Ok(future::Loop::Continue(count - 1)) + } else { + Err(e) } } }) })) } -pub fn pull_delta_async(n_retries: i32, - repo_path: &PathBuf, - url: &String, - delta: &Delta) -> Box> { - let url_clone = url.clone(); - let repo_path_clone = repo_path.clone(); +pub fn pull_delta_async( + n_retries: i32, + repo_path: &Path, + url: &str, + delta: &Delta, +) -> Box> { + let url_clone = url.to_string(); + let repo_path_clone = repo_path.to_path_buf(); let to = delta.to.clone(); Box::new( if let Some(ref from) = delta.from { - Either::A(pull_commit_async(n_retries, repo_path.clone(), url.clone(), from.clone())) + Either::A(pull_commit_async( + n_retries, + repo_path.to_path_buf(), + url_clone.clone(), + from.clone(), + )) } else { Either::B(future::result(Ok(()))) } - .and_then(move |_| pull_commit_async(n_retries, repo_path_clone, url_clone, to)) + .and_then(move |_| pull_commit_async(n_retries, repo_path_clone, url_clone, to)), ) } -pub fn generate_delta_async(repo_path: &PathBuf, - delta: &Delta) -> Box> { +pub fn generate_delta_async( + repo_path: &Path, + delta: &Delta, +) -> Box> { let mut cmd = Command::new("flatpak"); unsafe { - cmd - .pre_exec (|| { - // Setsid in the child to avoid SIGINT on server killing - // child and breaking the graceful shutdown - libc::setsid(); - Ok(()) - }); + cmd.pre_exec(|| { + // Setsid in the child to avoid SIGINT on server killing + // child and breaking the graceful shutdown + libc::setsid(); + Ok(()) + }); } - cmd - .arg("build-update-repo") + cmd.arg("build-update-repo") .arg("--generate-static-delta-to") .arg(delta.to.clone()); if let Some(ref from) = delta.from { - cmd - .arg("--generate-static-delta-from") - .arg(from.clone()); + cmd.arg("--generate-static-delta-from").arg(from.clone()); }; - cmd - .arg(&repo_path); + cmd.arg(&repo_path); - info!("Generating delta {}", delta.to_string()); + log::info!("Generating delta {}", delta.to_string()); Box::new( - cmd - .output_async() - .map_err(|e| OstreeError::ExecFailed("flatpak build-update-repo".to_string(), e.to_string())) - .and_then(|output| result_from_output(output, "flatpak build-update-repo")) + cmd.output_async() + .map_err(|e| { + OstreeError::ExecFailed("flatpak build-update-repo".to_string(), e.to_string()) + }) + .and_then(|output| result_from_output(output, "flatpak build-update-repo")), ) } -pub fn prune_async(repo_path: &PathBuf) -> Box> { +pub fn prune_async(repo_path: &Path) -> Box> { let mut cmd = Command::new("ostree"); unsafe { - cmd - .pre_exec (|| { - // Setsid in the child to avoid SIGINT on server killing - // child and breaking the graceful shutdown - libc::setsid(); - Ok(()) - }); + cmd.pre_exec(|| { + // Setsid in the child to avoid SIGINT on server killing + // child and breaking the graceful shutdown + libc::setsid(); + Ok(()) + }); } - cmd - .arg("prune") + cmd.arg("prune") .arg(&format!("--repo={}", repo_path.to_string_lossy())) .arg("--keep-younger-than=3 days ago"); Box::new( - cmd - .output_async() + cmd.output_async() .map_err(|e| OstreeError::ExecFailed("ostree prune".to_string(), e.to_string())) - .and_then(|output| result_from_output(output, "ostree prune")) + .and_then(|output| result_from_output(output, "ostree prune")), ) } diff --git a/src/schema.rs b/src/schema.rs index a6e00f5..f17e9c1 100644 --- a/src/schema.rs +++ b/src/schema.rs @@ -54,10 +54,4 @@ table! { joinable!(build_refs -> builds (build_id)); joinable!(published_refs -> builds (build_id)); -allow_tables_to_appear_in_same_query!( - build_refs, - builds, - job_dependencies, - jobs, - published_refs, -); +allow_tables_to_appear_in_same_query!(build_refs, builds, job_dependencies, jobs, published_refs,); diff --git a/src/tokens.rs b/src/tokens.rs index 224287a..d7fcd6a 100644 --- a/src/tokens.rs +++ b/src/tokens.rs @@ -1,20 +1,21 @@ -use actix_web::{HttpRequest, Result, HttpMessage}; -use actix_web::http::header::{HeaderValue, AUTHORIZATION}; use actix_service::{Service, Transform}; use actix_web::dev::{ServiceRequest, ServiceResponse}; use actix_web::error::Error; -use futures::{Future, Poll}; +use actix_web::http::header::{HeaderValue, AUTHORIZATION}; +use actix_web::{HttpMessage, HttpRequest, Result}; use futures::future::{ok, Either, FutureResult}; -use jwt::{decode, Validation, DecodingKey}; +use futures::{Future, Poll}; +use jwt::{decode, DecodingKey, Validation}; use std::rc::Rc; -use app::Claims; -use errors::ApiError; +use crate::app::Claims; +use crate::errors::ApiError; pub trait ClaimsValidator { fn get_claims(&self) -> Option; fn validate_claims(&self, func: Func) -> Result<(), ApiError> - where Func: Fn(&Claims) -> Result<(), ApiError>; + where + Func: Fn(&Claims) -> Result<(), ApiError>; fn has_token_claims(&self, required_sub: &str, required_scope: &str) -> Result<(), ApiError>; fn has_token_prefix(&self, id: &str) -> Result<(), ApiError>; fn has_token_repo(&self, repo: &str) -> Result<(), ApiError>; @@ -24,41 +25,41 @@ pub fn sub_has_prefix(required_sub: &str, claimed_sub: &str) -> bool { // Matches using a path-prefix style comparison: // claimed_sub == "build" should match required_sub == "build" or "build/N[/...]" // claimed_sub == "build/N" should only matchs required_sub == "build/N[/...]" - if required_sub.starts_with(claimed_sub) { - let rest = &required_sub[claimed_sub.len()..]; - if rest.len() == 0 || rest.starts_with("/") { - return true + if let Some(rest) = required_sub.strip_prefix(claimed_sub) { + if rest.is_empty() || rest.starts_with('/') { + return true; } }; false } pub fn id_matches_prefix(id: &str, prefix: &str) -> bool { - if prefix == "" { - return true + if prefix.is_empty() { + return true; } - if id.starts_with(prefix) { - let rest = &id[prefix.len()..]; - if rest.len() == 0 || rest.starts_with(".") { - return true + if let Some(rest) = id.strip_prefix(prefix) { + if rest.is_empty() || rest.starts_with('.') { + return true; } }; false } -pub fn id_matches_one_prefix(id: &str, prefixes: &Vec) -> bool { +pub fn id_matches_one_prefix(id: &str, prefixes: &[String]) -> bool { prefixes.iter().any(|prefix| id_matches_prefix(id, prefix)) } pub fn repo_matches_claimed(repo: &str, claimed_repo: &str) -> bool { - if claimed_repo == "" { - return true + if claimed_repo.is_empty() { + return true; } repo == claimed_repo } -pub fn repo_matches_one_claimed(repo: &str, claimed_repos: &Vec) -> bool { - claimed_repos.iter().any(|claimed_repo| repo_matches_claimed(repo, claimed_repo)) +pub fn repo_matches_one_claimed(repo: &str, claimed_repos: &[String]) -> bool { + claimed_repos + .iter() + .any(|claimed_repo| repo_matches_claimed(repo, claimed_repo)) } impl ClaimsValidator for HttpRequest { @@ -67,28 +68,37 @@ impl ClaimsValidator for HttpRequest { } fn validate_claims(&self, func: Func) -> Result<(), ApiError> - where Func: Fn(&Claims) -> Result<(), ApiError> { + where + Func: Fn(&Claims) -> Result<(), ApiError>, + { if let Some(claims) = self.extensions().get::() { func(claims) } else { - Err(ApiError::NotEnoughPermissions("No token specified".to_string())) + Err(ApiError::NotEnoughPermissions( + "No token specified".to_string(), + )) } } fn has_token_claims(&self, required_sub: &str, required_scope: &str) -> Result<(), ApiError> { - self.validate_claims( - |claims| { - // Matches using a path-prefix style comparison: - // claim.sub == "build" should match required_sub == "build" or "build/N[/...]" - // claim.sub == "build/N" should only matchs required_sub == "build/N[/...]" - if !sub_has_prefix(required_sub, &claims.sub) { - return Err(ApiError::NotEnoughPermissions(format!("Not matching sub '{}' in token", required_sub))) - } - if !claims.scope.contains(&required_scope.to_string()) { - return Err(ApiError::NotEnoughPermissions(format!("Not matching scope '{}' in token", required_scope))) - } - Ok(()) - }) + self.validate_claims(|claims| { + // Matches using a path-prefix style comparison: + // claim.sub == "build" should match required_sub == "build" or "build/N[/...]" + // claim.sub == "build/N" should only matchs required_sub == "build/N[/...]" + if !sub_has_prefix(required_sub, &claims.sub) { + return Err(ApiError::NotEnoughPermissions(format!( + "Not matching sub '{}' in token", + required_sub + ))); + } + if !claims.scope.contains(&required_scope.to_string()) { + return Err(ApiError::NotEnoughPermissions(format!( + "Not matching scope '{}' in token", + required_scope + ))); + } + Ok(()) + }) } /* A token prefix is something like org.my.App, and should allow @@ -99,20 +109,24 @@ impl ClaimsValidator for HttpRequest { fn has_token_prefix(&self, id: &str) -> Result<(), ApiError> { self.validate_claims(|claims| { if !id_matches_one_prefix(id, &claims.prefixes) { - return Err(ApiError::NotEnoughPermissions(format!("Id {} not matching prefix in token", id))); + return Err(ApiError::NotEnoughPermissions(format!( + "Id {} not matching prefix in token", + id + ))); } Ok(()) }) } fn has_token_repo(&self, repo: &str) -> Result<(), ApiError> { - self.validate_claims( - |claims| { - if !repo_matches_one_claimed(&repo.to_string(), &claims.repos) { - return Err(ApiError::NotEnoughPermissions("Not matching repo in token".to_string())) - } - Ok(()) - }) + self.validate_claims(|claims| { + if !repo_matches_one_claimed(repo, &claims.repos) { + return Err(ApiError::NotEnoughPermissions( + "Not matching repo in token".to_string(), + )); + } + Ok(()) + }) } } @@ -125,16 +139,27 @@ impl Inner { fn parse_authorization(&self, header: &HeaderValue) -> Result { // "Bearer *" length if header.len() < 8 { - return Err(ApiError::InvalidToken("Header length too short".to_string())); + return Err(ApiError::InvalidToken( + "Header length too short".to_string(), + )); } - let mut parts = header.to_str().or(Err(ApiError::InvalidToken("Cannot convert header to string".to_string())))?.splitn(2, ' '); + let mut parts = header + .to_str() + .map_err(|_| ApiError::InvalidToken("Cannot convert header to string".to_string()))? + .splitn(2, ' '); match parts.next() { Some(scheme) if scheme == "Bearer" => (), - _ => return Err(ApiError::InvalidToken("Token scheme is not Bearer".to_string())), + _ => { + return Err(ApiError::InvalidToken( + "Token scheme is not Bearer".to_string(), + )) + } } - let token = parts.next().ok_or(ApiError::InvalidToken("No token value in header".to_string()))?; + let token = parts + .next() + .ok_or_else(|| ApiError::InvalidToken("No token value in header".to_string()))?; Ok(token.to_string()) } @@ -144,7 +169,11 @@ impl Inner { ..Validation::default() }; - let token_data = match decode::(&token, &DecodingKey::from_secret(self.secret.as_ref()), &validation) { + let token_data = match decode::( + &token, + &DecodingKey::from_secret(self.secret.as_ref()), + &validation, + ) { Ok(c) => c, Err(_err) => return Err(ApiError::InvalidToken("Invalid token claims".to_string())), }; @@ -157,10 +186,16 @@ pub struct TokenParser(Rc); impl TokenParser { pub fn new(secret: &[u8]) -> TokenParser { - TokenParser(Rc::new(Inner { secret: secret.to_vec(), optional: false })) + TokenParser(Rc::new(Inner { + secret: secret.to_vec(), + optional: false, + })) } pub fn optional(secret: &[u8]) -> TokenParser { - TokenParser(Rc::new(Inner { secret: secret.to_vec(), optional: true })) + TokenParser(Rc::new(Inner { + secret: secret.to_vec(), + optional: true, + })) } } @@ -179,7 +214,7 @@ where fn new_transform(&self, service: S) -> Self::Future { ok(TokenParserMiddleware { - service: service, + service, inner: self.0.clone(), }) } @@ -199,7 +234,9 @@ impl TokenParserMiddleware { if self.inner.optional { return Ok(None); } - return Err(ApiError::InvalidToken("No Authorization header".to_string())) + return Err(ApiError::InvalidToken( + "No Authorization header".to_string(), + )); } }; let token = self.inner.parse_authorization(header)?; @@ -217,19 +254,21 @@ where type Request = ServiceRequest; type Response = ServiceResponse; type Error = Error; - type Future = Either>, - FutureResult>; + #[allow(clippy::type_complexity)] + type Future = Either< + //S::Future, + Box>, + FutureResult, + >; fn poll_ready(&mut self) -> Poll<(), Self::Error> { self.service.poll_ready() } - fn call(&mut self, req: ServiceRequest) -> Self::Future { let maybe_claims = match self.check_token(&req) { - Err(e) => return Either::B(ok(req.error_response(e))), - Ok(c) => c + Err(e) => return Either::B(ok(req.error_response(e))), + Ok(c) => c, }; let c = maybe_claims.clone(); @@ -238,15 +277,13 @@ where req.extensions_mut().insert(claims); } - Either::A(Box::new(self.service.call(req) - .and_then(move |resp| { - if resp.status() == 401 || resp.status() == 403 { - if let Some(ref claims) = c { - info!("Presented claims: {:?}", claims); - } - } - Ok(resp) - }) - )) + Either::A(Box::new(self.service.call(req).and_then(move |resp| { + if resp.status() == 401 || resp.status() == 403 { + if let Some(ref claims) = c { + log::info!("Presented claims: {:?}", claims); + } + } + Ok(resp) + }))) } }